From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- drivers/media/platform/ti/Kconfig | 69 + drivers/media/platform/ti/Makefile | 7 + drivers/media/platform/ti/am437x/Kconfig | 16 + drivers/media/platform/ti/am437x/Makefile | 4 + drivers/media/platform/ti/am437x/am437x-vpfe.c | 2638 +++++++++++++++++++ drivers/media/platform/ti/am437x/am437x-vpfe.h | 288 ++ .../media/platform/ti/am437x/am437x-vpfe_regs.h | 132 + drivers/media/platform/ti/cal/Makefile | 3 + drivers/media/platform/ti/cal/cal-camerarx.c | 895 +++++++ drivers/media/platform/ti/cal/cal-video.c | 1062 ++++++++ drivers/media/platform/ti/cal/cal.c | 1343 ++++++++++ drivers/media/platform/ti/cal/cal.h | 339 +++ drivers/media/platform/ti/cal/cal_regs.h | 463 ++++ drivers/media/platform/ti/davinci/Kconfig | 33 + drivers/media/platform/ti/davinci/Makefile | 9 + drivers/media/platform/ti/davinci/vpif.c | 608 +++++ drivers/media/platform/ti/davinci/vpif.h | 679 +++++ drivers/media/platform/ti/davinci/vpif_capture.c | 1820 +++++++++++++ drivers/media/platform/ti/davinci/vpif_capture.h | 107 + drivers/media/platform/ti/davinci/vpif_display.c | 1403 ++++++++++ drivers/media/platform/ti/davinci/vpif_display.h | 116 + drivers/media/platform/ti/omap/Kconfig | 17 + drivers/media/platform/ti/omap/Makefile | 9 + drivers/media/platform/ti/omap/omap_vout.c | 1741 ++++++++++++ drivers/media/platform/ti/omap/omap_vout_vrfb.c | 419 +++ drivers/media/platform/ti/omap/omap_vout_vrfb.h | 40 + drivers/media/platform/ti/omap/omap_voutdef.h | 219 ++ drivers/media/platform/ti/omap/omap_voutlib.c | 361 +++ drivers/media/platform/ti/omap/omap_voutlib.h | 39 + drivers/media/platform/ti/omap3isp/Kconfig | 21 + drivers/media/platform/ti/omap3isp/Makefile | 12 + .../media/platform/ti/omap3isp/cfa_coef_table.h | 48 + drivers/media/platform/ti/omap3isp/gamma_table.h | 77 + drivers/media/platform/ti/omap3isp/isp.c | 2489 ++++++++++++++++++ drivers/media/platform/ti/omap3isp/isp.h | 369 +++ drivers/media/platform/ti/omap3isp/ispccdc.c | 2770 ++++++++++++++++++++ drivers/media/platform/ti/omap3isp/ispccdc.h | 174 ++ drivers/media/platform/ti/omap3isp/ispccp2.c | 1183 +++++++++ drivers/media/platform/ti/omap3isp/ispccp2.h | 85 + drivers/media/platform/ti/omap3isp/ispcsi2.c | 1324 ++++++++++ drivers/media/platform/ti/omap3isp/ispcsi2.h | 152 ++ drivers/media/platform/ti/omap3isp/ispcsiphy.c | 365 +++ drivers/media/platform/ti/omap3isp/ispcsiphy.h | 43 + drivers/media/platform/ti/omap3isp/isph3a.h | 104 + drivers/media/platform/ti/omap3isp/isph3a_aewb.c | 350 +++ drivers/media/platform/ti/omap3isp/isph3a_af.c | 405 +++ drivers/media/platform/ti/omap3isp/isphist.c | 540 ++++ drivers/media/platform/ti/omap3isp/isphist.h | 27 + drivers/media/platform/ti/omap3isp/isppreview.c | 2363 +++++++++++++++++ drivers/media/platform/ti/omap3isp/isppreview.h | 161 ++ drivers/media/platform/ti/omap3isp/ispreg.h | 1518 +++++++++++ drivers/media/platform/ti/omap3isp/ispresizer.c | 1801 +++++++++++++ drivers/media/platform/ti/omap3isp/ispresizer.h | 136 + drivers/media/platform/ti/omap3isp/ispstat.c | 1083 ++++++++ drivers/media/platform/ti/omap3isp/ispstat.h | 156 ++ drivers/media/platform/ti/omap3isp/ispvideo.c | 1493 +++++++++++ drivers/media/platform/ti/omap3isp/ispvideo.h | 214 ++ .../platform/ti/omap3isp/luma_enhance_table.h | 29 + .../platform/ti/omap3isp/noise_filter_table.h | 17 + drivers/media/platform/ti/omap3isp/omap3isp.h | 132 + drivers/media/platform/ti/vpe/Makefile | 12 + drivers/media/platform/ti/vpe/csc.c | 279 ++ drivers/media/platform/ti/vpe/csc.h | 65 + drivers/media/platform/ti/vpe/sc.c | 306 +++ drivers/media/platform/ti/vpe/sc.h | 208 ++ drivers/media/platform/ti/vpe/sc_coeff.h | 1339 ++++++++++ drivers/media/platform/ti/vpe/vpdma.c | 1176 +++++++++ drivers/media/platform/ti/vpe/vpdma.h | 284 ++ drivers/media/platform/ti/vpe/vpdma_priv.h | 639 +++++ drivers/media/platform/ti/vpe/vpe.c | 2663 +++++++++++++++++++ drivers/media/platform/ti/vpe/vpe_regs.h | 306 +++ 71 files changed, 41797 insertions(+) create mode 100644 drivers/media/platform/ti/Kconfig create mode 100644 drivers/media/platform/ti/Makefile create mode 100644 drivers/media/platform/ti/am437x/Kconfig create mode 100644 drivers/media/platform/ti/am437x/Makefile create mode 100644 drivers/media/platform/ti/am437x/am437x-vpfe.c create mode 100644 drivers/media/platform/ti/am437x/am437x-vpfe.h create mode 100644 drivers/media/platform/ti/am437x/am437x-vpfe_regs.h create mode 100644 drivers/media/platform/ti/cal/Makefile create mode 100644 drivers/media/platform/ti/cal/cal-camerarx.c create mode 100644 drivers/media/platform/ti/cal/cal-video.c create mode 100644 drivers/media/platform/ti/cal/cal.c create mode 100644 drivers/media/platform/ti/cal/cal.h create mode 100644 drivers/media/platform/ti/cal/cal_regs.h create mode 100644 drivers/media/platform/ti/davinci/Kconfig create mode 100644 drivers/media/platform/ti/davinci/Makefile create mode 100644 drivers/media/platform/ti/davinci/vpif.c create mode 100644 drivers/media/platform/ti/davinci/vpif.h create mode 100644 drivers/media/platform/ti/davinci/vpif_capture.c create mode 100644 drivers/media/platform/ti/davinci/vpif_capture.h create mode 100644 drivers/media/platform/ti/davinci/vpif_display.c create mode 100644 drivers/media/platform/ti/davinci/vpif_display.h create mode 100644 drivers/media/platform/ti/omap/Kconfig create mode 100644 drivers/media/platform/ti/omap/Makefile create mode 100644 drivers/media/platform/ti/omap/omap_vout.c create mode 100644 drivers/media/platform/ti/omap/omap_vout_vrfb.c create mode 100644 drivers/media/platform/ti/omap/omap_vout_vrfb.h create mode 100644 drivers/media/platform/ti/omap/omap_voutdef.h create mode 100644 drivers/media/platform/ti/omap/omap_voutlib.c create mode 100644 drivers/media/platform/ti/omap/omap_voutlib.h create mode 100644 drivers/media/platform/ti/omap3isp/Kconfig create mode 100644 drivers/media/platform/ti/omap3isp/Makefile create mode 100644 drivers/media/platform/ti/omap3isp/cfa_coef_table.h create mode 100644 drivers/media/platform/ti/omap3isp/gamma_table.h create mode 100644 drivers/media/platform/ti/omap3isp/isp.c create mode 100644 drivers/media/platform/ti/omap3isp/isp.h create mode 100644 drivers/media/platform/ti/omap3isp/ispccdc.c create mode 100644 drivers/media/platform/ti/omap3isp/ispccdc.h create mode 100644 drivers/media/platform/ti/omap3isp/ispccp2.c create mode 100644 drivers/media/platform/ti/omap3isp/ispccp2.h create mode 100644 drivers/media/platform/ti/omap3isp/ispcsi2.c create mode 100644 drivers/media/platform/ti/omap3isp/ispcsi2.h create mode 100644 drivers/media/platform/ti/omap3isp/ispcsiphy.c create mode 100644 drivers/media/platform/ti/omap3isp/ispcsiphy.h create mode 100644 drivers/media/platform/ti/omap3isp/isph3a.h create mode 100644 drivers/media/platform/ti/omap3isp/isph3a_aewb.c create mode 100644 drivers/media/platform/ti/omap3isp/isph3a_af.c create mode 100644 drivers/media/platform/ti/omap3isp/isphist.c create mode 100644 drivers/media/platform/ti/omap3isp/isphist.h create mode 100644 drivers/media/platform/ti/omap3isp/isppreview.c create mode 100644 drivers/media/platform/ti/omap3isp/isppreview.h create mode 100644 drivers/media/platform/ti/omap3isp/ispreg.h create mode 100644 drivers/media/platform/ti/omap3isp/ispresizer.c create mode 100644 drivers/media/platform/ti/omap3isp/ispresizer.h create mode 100644 drivers/media/platform/ti/omap3isp/ispstat.c create mode 100644 drivers/media/platform/ti/omap3isp/ispstat.h create mode 100644 drivers/media/platform/ti/omap3isp/ispvideo.c create mode 100644 drivers/media/platform/ti/omap3isp/ispvideo.h create mode 100644 drivers/media/platform/ti/omap3isp/luma_enhance_table.h create mode 100644 drivers/media/platform/ti/omap3isp/noise_filter_table.h create mode 100644 drivers/media/platform/ti/omap3isp/omap3isp.h create mode 100644 drivers/media/platform/ti/vpe/Makefile create mode 100644 drivers/media/platform/ti/vpe/csc.c create mode 100644 drivers/media/platform/ti/vpe/csc.h create mode 100644 drivers/media/platform/ti/vpe/sc.c create mode 100644 drivers/media/platform/ti/vpe/sc.h create mode 100644 drivers/media/platform/ti/vpe/sc_coeff.h create mode 100644 drivers/media/platform/ti/vpe/vpdma.c create mode 100644 drivers/media/platform/ti/vpe/vpdma.h create mode 100644 drivers/media/platform/ti/vpe/vpdma_priv.h create mode 100644 drivers/media/platform/ti/vpe/vpe.c create mode 100644 drivers/media/platform/ti/vpe/vpe_regs.h (limited to 'drivers/media/platform/ti') diff --git a/drivers/media/platform/ti/Kconfig b/drivers/media/platform/ti/Kconfig new file mode 100644 index 0000000000..e1ab56c3be --- /dev/null +++ b/drivers/media/platform/ti/Kconfig @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: GPL-2.0-only + +comment "Texas Instruments drivers" + +# TI VIDEO PORT Helper Modules +# These will be selected by VPE and VIP +config VIDEO_TI_VPDMA + tristate + +config VIDEO_TI_SC + tristate + +config VIDEO_TI_CSC + tristate + +# V4L drivers + +config VIDEO_TI_CAL + tristate "TI CAL (Camera Adaptation Layer) driver" + depends on VIDEO_DEV + depends on V4L_PLATFORM_DRIVERS + select MEDIA_CONTROLLER + select VIDEO_V4L2_SUBDEV_API + depends on SOC_DRA7XX || ARCH_K3 || COMPILE_TEST + select VIDEOBUF2_DMA_CONTIG + select V4L2_FWNODE + help + Support for the TI CAL (Camera Adaptation Layer) block + found on DRA72X SoC. + In TI Technical Reference Manual this module is referred as + Camera Interface Subsystem (CAMSS). + +config VIDEO_TI_CAL_MC + bool "Media Controller centric mode by default" + depends on VIDEO_TI_CAL + default n + help + Enables Media Controller centric mode by default. + + If set, CAL driver will start in Media Controller mode by + default. Note that this behavior can be overridden via + module parameter 'mc_api'. + +# Mem2mem drivers + +config VIDEO_TI_VPE + tristate "TI VPE (Video Processing Engine) driver" + depends on V4L_MEM2MEM_DRIVERS + depends on VIDEO_DEV + depends on SOC_DRA7XX || COMPILE_TEST + select VIDEOBUF2_DMA_CONTIG + select V4L2_MEM2MEM_DEV + select VIDEO_TI_VPDMA + select VIDEO_TI_SC + select VIDEO_TI_CSC + help + Support for the TI VPE(Video Processing Engine) block + found on DRA7XX SoC. + +config VIDEO_TI_VPE_DEBUG + bool "VPE debug messages" + depends on VIDEO_TI_VPE + help + Enable debug messages on VPE driver. + +source "drivers/media/platform/ti/am437x/Kconfig" +source "drivers/media/platform/ti/davinci/Kconfig" +source "drivers/media/platform/ti/omap/Kconfig" +source "drivers/media/platform/ti/omap3isp/Kconfig" diff --git a/drivers/media/platform/ti/Makefile b/drivers/media/platform/ti/Makefile new file mode 100644 index 0000000000..98c5fe5c40 --- /dev/null +++ b/drivers/media/platform/ti/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-y += am437x/ +obj-y += cal/ +obj-y += vpe/ +obj-y += davinci/ +obj-y += omap/ +obj-y += omap3isp/ diff --git a/drivers/media/platform/ti/am437x/Kconfig b/drivers/media/platform/ti/am437x/Kconfig new file mode 100644 index 0000000000..2e24fff7e6 --- /dev/null +++ b/drivers/media/platform/ti/am437x/Kconfig @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +config VIDEO_AM437X_VPFE + tristate "TI AM437x VPFE video capture driver" + depends on V4L_PLATFORM_DRIVERS + depends on VIDEO_DEV + depends on SOC_AM43XX || COMPILE_TEST + select MEDIA_CONTROLLER + select VIDEO_V4L2_SUBDEV_API + select VIDEOBUF2_DMA_CONTIG + select V4L2_FWNODE + help + Support for AM437x Video Processing Front End based Video + Capture Driver. + + To compile this driver as a module, choose M here. The module + will be called am437x-vpfe. diff --git a/drivers/media/platform/ti/am437x/Makefile b/drivers/media/platform/ti/am437x/Makefile new file mode 100644 index 0000000000..5410434872 --- /dev/null +++ b/drivers/media/platform/ti/am437x/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Makefile for AM437x VPFE driver + +obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x-vpfe.o diff --git a/drivers/media/platform/ti/am437x/am437x-vpfe.c b/drivers/media/platform/ti/am437x/am437x-vpfe.c new file mode 100644 index 0000000000..63092013d4 --- /dev/null +++ b/drivers/media/platform/ti/am437x/am437x-vpfe.c @@ -0,0 +1,2638 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * TI VPFE capture Driver + * + * Copyright (C) 2013 - 2014 Texas Instruments, Inc. + * + * Benoit Parrot + * Lad, Prabhakar + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "am437x-vpfe.h" + +#define VPFE_MODULE_NAME "vpfe" +#define VPFE_VERSION "0.1.0" + +static int debug; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "Debug level 0-8"); + +#define vpfe_dbg(level, dev, fmt, arg...) \ + v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ##arg) +#define vpfe_info(dev, fmt, arg...) \ + v4l2_info(&dev->v4l2_dev, fmt, ##arg) +#define vpfe_err(dev, fmt, arg...) \ + v4l2_err(&dev->v4l2_dev, fmt, ##arg) + +/* standard information */ +struct vpfe_standard { + v4l2_std_id std_id; + unsigned int width; + unsigned int height; + struct v4l2_fract pixelaspect; + int frame_format; +}; + +static const struct vpfe_standard vpfe_standards[] = { + {V4L2_STD_525_60, 720, 480, {11, 10}, 1}, + {V4L2_STD_625_50, 720, 576, {54, 59}, 1}, +}; + +static struct vpfe_fmt formats[VPFE_NUM_FORMATS] = { + { + .fourcc = V4L2_PIX_FMT_YUYV, + .code = MEDIA_BUS_FMT_YUYV8_2X8, + .bitsperpixel = 16, + }, { + .fourcc = V4L2_PIX_FMT_UYVY, + .code = MEDIA_BUS_FMT_UYVY8_2X8, + .bitsperpixel = 16, + }, { + .fourcc = V4L2_PIX_FMT_YVYU, + .code = MEDIA_BUS_FMT_YVYU8_2X8, + .bitsperpixel = 16, + }, { + .fourcc = V4L2_PIX_FMT_VYUY, + .code = MEDIA_BUS_FMT_VYUY8_2X8, + .bitsperpixel = 16, + }, { + .fourcc = V4L2_PIX_FMT_SBGGR8, + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .bitsperpixel = 8, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG8, + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .bitsperpixel = 8, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG8, + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .bitsperpixel = 8, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB8, + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .bitsperpixel = 8, + }, { + .fourcc = V4L2_PIX_FMT_RGB565, + .code = MEDIA_BUS_FMT_RGB565_2X8_LE, + .bitsperpixel = 16, + }, { + .fourcc = V4L2_PIX_FMT_RGB565X, + .code = MEDIA_BUS_FMT_RGB565_2X8_BE, + .bitsperpixel = 16, + }, +}; + +static int __subdev_get_format(struct vpfe_device *vpfe, + struct v4l2_mbus_framefmt *fmt); +static int vpfe_calc_format_size(struct vpfe_device *vpfe, + const struct vpfe_fmt *fmt, + struct v4l2_format *f); + +static struct vpfe_fmt *find_format_by_code(struct vpfe_device *vpfe, + unsigned int code) +{ + struct vpfe_fmt *fmt; + unsigned int k; + + for (k = 0; k < vpfe->num_active_fmt; k++) { + fmt = vpfe->active_fmt[k]; + if (fmt->code == code) + return fmt; + } + + return NULL; +} + +static struct vpfe_fmt *find_format_by_pix(struct vpfe_device *vpfe, + unsigned int pixelformat) +{ + struct vpfe_fmt *fmt; + unsigned int k; + + for (k = 0; k < vpfe->num_active_fmt; k++) { + fmt = vpfe->active_fmt[k]; + if (fmt->fourcc == pixelformat) + return fmt; + } + + return NULL; +} + +static unsigned int __get_bytesperpixel(struct vpfe_device *vpfe, + const struct vpfe_fmt *fmt) +{ + struct vpfe_subdev_info *sdinfo = vpfe->current_subdev; + unsigned int bus_width = sdinfo->vpfe_param.bus_width; + u32 bpp, bus_width_bytes, clocksperpixel; + + bus_width_bytes = ALIGN(bus_width, 8) >> 3; + clocksperpixel = DIV_ROUND_UP(fmt->bitsperpixel, bus_width); + bpp = clocksperpixel * bus_width_bytes; + + return bpp; +} + +/* Print Four-character-code (FOURCC) */ +static char *print_fourcc(u32 fmt) +{ + static char code[5]; + + code[0] = (unsigned char)(fmt & 0xff); + code[1] = (unsigned char)((fmt >> 8) & 0xff); + code[2] = (unsigned char)((fmt >> 16) & 0xff); + code[3] = (unsigned char)((fmt >> 24) & 0xff); + code[4] = '\0'; + + return code; +} + +static inline u32 vpfe_reg_read(struct vpfe_ccdc *ccdc, u32 offset) +{ + return ioread32(ccdc->ccdc_cfg.base_addr + offset); +} + +static inline void vpfe_reg_write(struct vpfe_ccdc *ccdc, u32 val, u32 offset) +{ + iowrite32(val, ccdc->ccdc_cfg.base_addr + offset); +} + +static inline struct vpfe_device *to_vpfe(struct vpfe_ccdc *ccdc) +{ + return container_of(ccdc, struct vpfe_device, ccdc); +} + +static inline +struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_v4l2_buffer *vb) +{ + return container_of(vb, struct vpfe_cap_buffer, vb); +} + +static inline void vpfe_pcr_enable(struct vpfe_ccdc *ccdc, int flag) +{ + vpfe_reg_write(ccdc, !!flag, VPFE_PCR); +} + +static void vpfe_config_enable(struct vpfe_ccdc *ccdc, int flag) +{ + unsigned int cfg; + + if (!flag) { + cfg = vpfe_reg_read(ccdc, VPFE_CONFIG); + cfg &= ~(VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT); + } else { + cfg = VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT; + } + + vpfe_reg_write(ccdc, cfg, VPFE_CONFIG); +} + +static void vpfe_ccdc_setwin(struct vpfe_ccdc *ccdc, + struct v4l2_rect *image_win, + enum ccdc_frmfmt frm_fmt, + int bpp) +{ + int horz_start, horz_nr_pixels; + int vert_start, vert_nr_lines; + int val, mid_img; + + /* + * ppc - per pixel count. indicates how many pixels per cell + * output to SDRAM. example, for ycbcr, it is one y and one c, so 2. + * raw capture this is 1 + */ + horz_start = image_win->left * bpp; + horz_nr_pixels = (image_win->width * bpp) - 1; + vpfe_reg_write(ccdc, (horz_start << VPFE_HORZ_INFO_SPH_SHIFT) | + horz_nr_pixels, VPFE_HORZ_INFO); + + vert_start = image_win->top; + + if (frm_fmt == CCDC_FRMFMT_INTERLACED) { + vert_nr_lines = (image_win->height >> 1) - 1; + vert_start >>= 1; + /* configure VDINT0 */ + val = (vert_start << VPFE_VDINT_VDINT0_SHIFT); + } else { + vert_nr_lines = image_win->height - 1; + /* + * configure VDINT0 and VDINT1. VDINT1 will be at half + * of image height + */ + mid_img = vert_start + (image_win->height / 2); + val = (vert_start << VPFE_VDINT_VDINT0_SHIFT) | + (mid_img & VPFE_VDINT_VDINT1_MASK); + } + + vpfe_reg_write(ccdc, val, VPFE_VDINT); + + vpfe_reg_write(ccdc, (vert_start << VPFE_VERT_START_SLV0_SHIFT) | + vert_start, VPFE_VERT_START); + vpfe_reg_write(ccdc, vert_nr_lines, VPFE_VERT_LINES); +} + +static void vpfe_reg_dump(struct vpfe_ccdc *ccdc) +{ + struct vpfe_device *vpfe = to_vpfe(ccdc); + + vpfe_dbg(3, vpfe, "ALAW: 0x%x\n", vpfe_reg_read(ccdc, VPFE_ALAW)); + vpfe_dbg(3, vpfe, "CLAMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_CLAMP)); + vpfe_dbg(3, vpfe, "DCSUB: 0x%x\n", vpfe_reg_read(ccdc, VPFE_DCSUB)); + vpfe_dbg(3, vpfe, "BLKCMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_BLKCMP)); + vpfe_dbg(3, vpfe, "COLPTN: 0x%x\n", vpfe_reg_read(ccdc, VPFE_COLPTN)); + vpfe_dbg(3, vpfe, "SDOFST: 0x%x\n", vpfe_reg_read(ccdc, VPFE_SDOFST)); + vpfe_dbg(3, vpfe, "SYN_MODE: 0x%x\n", + vpfe_reg_read(ccdc, VPFE_SYNMODE)); + vpfe_dbg(3, vpfe, "HSIZE_OFF: 0x%x\n", + vpfe_reg_read(ccdc, VPFE_HSIZE_OFF)); + vpfe_dbg(3, vpfe, "HORZ_INFO: 0x%x\n", + vpfe_reg_read(ccdc, VPFE_HORZ_INFO)); + vpfe_dbg(3, vpfe, "VERT_START: 0x%x\n", + vpfe_reg_read(ccdc, VPFE_VERT_START)); + vpfe_dbg(3, vpfe, "VERT_LINES: 0x%x\n", + vpfe_reg_read(ccdc, VPFE_VERT_LINES)); +} + +static int +vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc, + struct vpfe_ccdc_config_params_raw *ccdcparam) +{ + struct vpfe_device *vpfe = to_vpfe(ccdc); + u8 max_gamma, max_data; + + if (!ccdcparam->alaw.enable) + return 0; + + max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd); + max_data = ccdc_data_size_max_bit(ccdcparam->data_sz); + + if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 || + ccdcparam->data_sz > VPFE_CCDC_DATA_8BITS || + max_gamma > max_data) { + vpfe_dbg(1, vpfe, "Invalid data line select\n"); + return -EINVAL; + } + + return 0; +} + +static void +vpfe_ccdc_update_raw_params(struct vpfe_ccdc *ccdc, + struct vpfe_ccdc_config_params_raw *raw_params) +{ + struct vpfe_ccdc_config_params_raw *config_params = + &ccdc->ccdc_cfg.bayer.config_params; + + *config_params = *raw_params; +} + +/* + * vpfe_ccdc_restore_defaults() + * This function will write defaults to all CCDC registers + */ +static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc) +{ + int i; + + /* Disable CCDC */ + vpfe_pcr_enable(ccdc, 0); + + /* set all registers to default value */ + for (i = 4; i <= 0x94; i += 4) + vpfe_reg_write(ccdc, 0, i); + + vpfe_reg_write(ccdc, VPFE_NO_CULLING, VPFE_CULLING); + vpfe_reg_write(ccdc, VPFE_CCDC_GAMMA_BITS_11_2, VPFE_ALAW); +} + +static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev) +{ + struct vpfe_device *vpfe = to_vpfe(ccdc); + u32 dma_cntl, pcr; + + pcr = vpfe_reg_read(ccdc, VPFE_PCR); + if (pcr) + vpfe_dbg(1, vpfe, "VPFE_PCR is still set (%x)", pcr); + + dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL); + if ((dma_cntl & VPFE_DMA_CNTL_OVERFLOW)) + vpfe_dbg(1, vpfe, "VPFE_DMA_CNTL_OVERFLOW is still set (%x)", + dma_cntl); + + /* Disable CCDC by resetting all register to default POR values */ + vpfe_ccdc_restore_defaults(ccdc); + + /* Disabled the module at the CONFIG level */ + vpfe_config_enable(ccdc, 0); + + pm_runtime_put_sync(dev); + return 0; +} + +static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params) +{ + struct vpfe_device *vpfe = to_vpfe(ccdc); + struct vpfe_ccdc_config_params_raw raw_params; + int x; + + if (ccdc->ccdc_cfg.if_type != VPFE_RAW_BAYER) + return -EINVAL; + + x = copy_from_user(&raw_params, params, sizeof(raw_params)); + if (x) { + vpfe_dbg(1, vpfe, + "%s: error in copying ccdc params, %d\n", + __func__, x); + return -EFAULT; + } + + if (!vpfe_ccdc_validate_param(ccdc, &raw_params)) { + vpfe_ccdc_update_raw_params(ccdc, &raw_params); + return 0; + } + + return -EINVAL; +} + +/* + * vpfe_ccdc_config_ycbcr() + * This function will configure CCDC for YCbCr video capture + */ +static void vpfe_ccdc_config_ycbcr(struct vpfe_ccdc *ccdc) +{ + struct ccdc_params_ycbcr *params = &ccdc->ccdc_cfg.ycbcr; + u32 syn_mode; + + /* + * first restore the CCDC registers to default values + * This is important since we assume default values to be set in + * a lot of registers that we didn't touch + */ + vpfe_ccdc_restore_defaults(ccdc); + + /* + * configure pixel format, frame format, configure video frame + * format, enable output to SDRAM, enable internal timing generator + * and 8bit pack mode + */ + syn_mode = (((params->pix_fmt & VPFE_SYN_MODE_INPMOD_MASK) << + VPFE_SYN_MODE_INPMOD_SHIFT) | + ((params->frm_fmt & VPFE_SYN_FLDMODE_MASK) << + VPFE_SYN_FLDMODE_SHIFT) | VPFE_VDHDEN_ENABLE | + VPFE_WEN_ENABLE | VPFE_DATA_PACK_ENABLE); + + /* setup BT.656 sync mode */ + if (params->bt656_enable) { + vpfe_reg_write(ccdc, VPFE_REC656IF_BT656_EN, VPFE_REC656IF); + + /* + * configure the FID, VD, HD pin polarity, + * fld,hd pol positive, vd negative, 8-bit data + */ + syn_mode |= VPFE_SYN_MODE_VD_POL_NEGATIVE; + if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT) + syn_mode |= VPFE_SYN_MODE_10BITS; + else + syn_mode |= VPFE_SYN_MODE_8BITS; + } else { + /* y/c external sync mode */ + syn_mode |= (((params->fid_pol & VPFE_FID_POL_MASK) << + VPFE_FID_POL_SHIFT) | + ((params->hd_pol & VPFE_HD_POL_MASK) << + VPFE_HD_POL_SHIFT) | + ((params->vd_pol & VPFE_VD_POL_MASK) << + VPFE_VD_POL_SHIFT)); + } + vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE); + + /* configure video window */ + vpfe_ccdc_setwin(ccdc, ¶ms->win, + params->frm_fmt, params->bytesperpixel); + + /* + * configure the order of y cb cr in SDRAM, and disable latch + * internal register on vsync + */ + if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT) + vpfe_reg_write(ccdc, + (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) | + VPFE_LATCH_ON_VSYNC_DISABLE | + VPFE_CCDCFG_BW656_10BIT, VPFE_CCDCFG); + else + vpfe_reg_write(ccdc, + (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) | + VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG); + + /* + * configure the horizontal line offset. This should be a + * on 32 byte boundary. So clear LSB 5 bits + */ + vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF); + + /* configure the memory line offset */ + if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) + /* two fields are interleaved in memory */ + vpfe_reg_write(ccdc, VPFE_SDOFST_FIELD_INTERLEAVED, + VPFE_SDOFST); +} + +static void +vpfe_ccdc_config_black_clamp(struct vpfe_ccdc *ccdc, + struct vpfe_ccdc_black_clamp *bclamp) +{ + u32 val; + + if (!bclamp->enable) { + /* configure DCSub */ + val = (bclamp->dc_sub) & VPFE_BLK_DC_SUB_MASK; + vpfe_reg_write(ccdc, val, VPFE_DCSUB); + vpfe_reg_write(ccdc, VPFE_CLAMP_DEFAULT_VAL, VPFE_CLAMP); + return; + } + /* + * Configure gain, Start pixel, No of line to be avg, + * No of pixel/line to be avg, & Enable the Black clamping + */ + val = ((bclamp->sgain & VPFE_BLK_SGAIN_MASK) | + ((bclamp->start_pixel & VPFE_BLK_ST_PXL_MASK) << + VPFE_BLK_ST_PXL_SHIFT) | + ((bclamp->sample_ln & VPFE_BLK_SAMPLE_LINE_MASK) << + VPFE_BLK_SAMPLE_LINE_SHIFT) | + ((bclamp->sample_pixel & VPFE_BLK_SAMPLE_LN_MASK) << + VPFE_BLK_SAMPLE_LN_SHIFT) | VPFE_BLK_CLAMP_ENABLE); + vpfe_reg_write(ccdc, val, VPFE_CLAMP); + /* If Black clamping is enable then make dcsub 0 */ + vpfe_reg_write(ccdc, VPFE_DCSUB_DEFAULT_VAL, VPFE_DCSUB); +} + +static void +vpfe_ccdc_config_black_compense(struct vpfe_ccdc *ccdc, + struct vpfe_ccdc_black_compensation *bcomp) +{ + u32 val; + + val = ((bcomp->b & VPFE_BLK_COMP_MASK) | + ((bcomp->gb & VPFE_BLK_COMP_MASK) << + VPFE_BLK_COMP_GB_COMP_SHIFT) | + ((bcomp->gr & VPFE_BLK_COMP_MASK) << + VPFE_BLK_COMP_GR_COMP_SHIFT) | + ((bcomp->r & VPFE_BLK_COMP_MASK) << + VPFE_BLK_COMP_R_COMP_SHIFT)); + vpfe_reg_write(ccdc, val, VPFE_BLKCMP); +} + +/* + * vpfe_ccdc_config_raw() + * This function will configure CCDC for Raw capture mode + */ +static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc) +{ + struct vpfe_device *vpfe = to_vpfe(ccdc); + struct vpfe_ccdc_config_params_raw *config_params = + &ccdc->ccdc_cfg.bayer.config_params; + struct ccdc_params_raw *params = &ccdc->ccdc_cfg.bayer; + unsigned int syn_mode; + unsigned int val; + + /* Reset CCDC */ + vpfe_ccdc_restore_defaults(ccdc); + + /* Disable latching function registers on VSYNC */ + vpfe_reg_write(ccdc, VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG); + + /* + * Configure the vertical sync polarity(SYN_MODE.VDPOL), + * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity + * (SYN_MODE.FLDPOL), frame format(progressive or interlace), + * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output + * SDRAM, enable internal timing generator + */ + syn_mode = (((params->vd_pol & VPFE_VD_POL_MASK) << VPFE_VD_POL_SHIFT) | + ((params->hd_pol & VPFE_HD_POL_MASK) << VPFE_HD_POL_SHIFT) | + ((params->fid_pol & VPFE_FID_POL_MASK) << + VPFE_FID_POL_SHIFT) | ((params->frm_fmt & + VPFE_FRM_FMT_MASK) << VPFE_FRM_FMT_SHIFT) | + ((config_params->data_sz & VPFE_DATA_SZ_MASK) << + VPFE_DATA_SZ_SHIFT) | ((params->pix_fmt & + VPFE_PIX_FMT_MASK) << VPFE_PIX_FMT_SHIFT) | + VPFE_WEN_ENABLE | VPFE_VDHDEN_ENABLE); + + /* Enable and configure aLaw register if needed */ + if (config_params->alaw.enable) { + val = ((config_params->alaw.gamma_wd & + VPFE_ALAW_GAMMA_WD_MASK) | VPFE_ALAW_ENABLE); + vpfe_reg_write(ccdc, val, VPFE_ALAW); + vpfe_dbg(3, vpfe, "\nWriting 0x%x to ALAW...\n", val); + } + + /* Configure video window */ + vpfe_ccdc_setwin(ccdc, ¶ms->win, params->frm_fmt, + params->bytesperpixel); + + /* Configure Black Clamp */ + vpfe_ccdc_config_black_clamp(ccdc, &config_params->blk_clamp); + + /* Configure Black level compensation */ + vpfe_ccdc_config_black_compense(ccdc, &config_params->blk_comp); + + /* If data size is 8 bit then pack the data */ + if ((config_params->data_sz == VPFE_CCDC_DATA_8BITS) || + config_params->alaw.enable) + syn_mode |= VPFE_DATA_PACK_ENABLE; + + /* + * Configure Horizontal offset register. If pack 8 is enabled then + * 1 pixel will take 1 byte + */ + vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF); + + vpfe_dbg(3, vpfe, "Writing %d (%x) to HSIZE_OFF\n", + params->bytesperline, params->bytesperline); + + /* Set value for SDOFST */ + if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) { + if (params->image_invert_enable) { + /* For interlace inverse mode */ + vpfe_reg_write(ccdc, VPFE_INTERLACED_IMAGE_INVERT, + VPFE_SDOFST); + } else { + /* For interlace non inverse mode */ + vpfe_reg_write(ccdc, VPFE_INTERLACED_NO_IMAGE_INVERT, + VPFE_SDOFST); + } + } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) { + vpfe_reg_write(ccdc, VPFE_PROGRESSIVE_NO_IMAGE_INVERT, + VPFE_SDOFST); + } + + vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE); + + vpfe_reg_dump(ccdc); +} + +static inline int +vpfe_ccdc_set_buftype(struct vpfe_ccdc *ccdc, + enum ccdc_buftype buf_type) +{ + if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) + ccdc->ccdc_cfg.bayer.buf_type = buf_type; + else + ccdc->ccdc_cfg.ycbcr.buf_type = buf_type; + + return 0; +} + +static inline enum ccdc_buftype vpfe_ccdc_get_buftype(struct vpfe_ccdc *ccdc) +{ + if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) + return ccdc->ccdc_cfg.bayer.buf_type; + + return ccdc->ccdc_cfg.ycbcr.buf_type; +} + +static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt) +{ + struct vpfe_device *vpfe = to_vpfe(ccdc); + + vpfe_dbg(1, vpfe, "%s: if_type: %d, pixfmt:%s\n", + __func__, ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt)); + + if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) { + ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW; + /* + * Need to clear it in case it was left on + * after the last capture. + */ + ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 0; + + switch (pixfmt) { + case V4L2_PIX_FMT_SBGGR8: + ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 1; + break; + + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_UYVY: + case V4L2_PIX_FMT_YUV420: + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_RGB565X: + break; + + case V4L2_PIX_FMT_SBGGR16: + default: + return -EINVAL; + } + } else { + switch (pixfmt) { + case V4L2_PIX_FMT_YUYV: + ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR; + break; + + case V4L2_PIX_FMT_UYVY: + ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY; + break; + + default: + return -EINVAL; + } + } + + return 0; +} + +static u32 vpfe_ccdc_get_pixel_format(struct vpfe_ccdc *ccdc) +{ + u32 pixfmt; + + if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) { + pixfmt = V4L2_PIX_FMT_YUYV; + } else { + if (ccdc->ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR) + pixfmt = V4L2_PIX_FMT_YUYV; + else + pixfmt = V4L2_PIX_FMT_UYVY; + } + + return pixfmt; +} + +static int +vpfe_ccdc_set_image_window(struct vpfe_ccdc *ccdc, + struct v4l2_rect *win, unsigned int bpp) +{ + if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) { + ccdc->ccdc_cfg.bayer.win = *win; + ccdc->ccdc_cfg.bayer.bytesperpixel = bpp; + ccdc->ccdc_cfg.bayer.bytesperline = ALIGN(win->width * bpp, 32); + } else { + ccdc->ccdc_cfg.ycbcr.win = *win; + ccdc->ccdc_cfg.ycbcr.bytesperpixel = bpp; + ccdc->ccdc_cfg.ycbcr.bytesperline = ALIGN(win->width * bpp, 32); + } + + return 0; +} + +static inline void +vpfe_ccdc_get_image_window(struct vpfe_ccdc *ccdc, + struct v4l2_rect *win) +{ + if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) + *win = ccdc->ccdc_cfg.bayer.win; + else + *win = ccdc->ccdc_cfg.ycbcr.win; +} + +static inline unsigned int vpfe_ccdc_get_line_length(struct vpfe_ccdc *ccdc) +{ + if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) + return ccdc->ccdc_cfg.bayer.bytesperline; + + return ccdc->ccdc_cfg.ycbcr.bytesperline; +} + +static inline int +vpfe_ccdc_set_frame_format(struct vpfe_ccdc *ccdc, + enum ccdc_frmfmt frm_fmt) +{ + if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) + ccdc->ccdc_cfg.bayer.frm_fmt = frm_fmt; + else + ccdc->ccdc_cfg.ycbcr.frm_fmt = frm_fmt; + + return 0; +} + +static inline enum ccdc_frmfmt +vpfe_ccdc_get_frame_format(struct vpfe_ccdc *ccdc) +{ + if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) + return ccdc->ccdc_cfg.bayer.frm_fmt; + + return ccdc->ccdc_cfg.ycbcr.frm_fmt; +} + +static inline int vpfe_ccdc_getfid(struct vpfe_ccdc *ccdc) +{ + return (vpfe_reg_read(ccdc, VPFE_SYNMODE) >> 15) & 1; +} + +static inline void vpfe_set_sdr_addr(struct vpfe_ccdc *ccdc, unsigned long addr) +{ + vpfe_reg_write(ccdc, addr & 0xffffffe0, VPFE_SDR_ADDR); +} + +static int vpfe_ccdc_set_hw_if_params(struct vpfe_ccdc *ccdc, + struct vpfe_hw_if_param *params) +{ + struct vpfe_device *vpfe = to_vpfe(ccdc); + + ccdc->ccdc_cfg.if_type = params->if_type; + + switch (params->if_type) { + case VPFE_BT656: + case VPFE_YCBCR_SYNC_16: + case VPFE_YCBCR_SYNC_8: + case VPFE_BT656_10BIT: + ccdc->ccdc_cfg.ycbcr.vd_pol = params->vdpol; + ccdc->ccdc_cfg.ycbcr.hd_pol = params->hdpol; + break; + + case VPFE_RAW_BAYER: + ccdc->ccdc_cfg.bayer.vd_pol = params->vdpol; + ccdc->ccdc_cfg.bayer.hd_pol = params->hdpol; + if (params->bus_width == 10) + ccdc->ccdc_cfg.bayer.config_params.data_sz = + VPFE_CCDC_DATA_10BITS; + else + ccdc->ccdc_cfg.bayer.config_params.data_sz = + VPFE_CCDC_DATA_8BITS; + vpfe_dbg(1, vpfe, "params.bus_width: %d\n", + params->bus_width); + vpfe_dbg(1, vpfe, "config_params.data_sz: %d\n", + ccdc->ccdc_cfg.bayer.config_params.data_sz); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static void vpfe_clear_intr(struct vpfe_ccdc *ccdc, int vdint) +{ + unsigned int vpfe_int_status; + + vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS); + + switch (vdint) { + /* VD0 interrupt */ + case VPFE_VDINT0: + vpfe_int_status &= ~VPFE_VDINT0; + vpfe_int_status |= VPFE_VDINT0; + break; + + /* VD1 interrupt */ + case VPFE_VDINT1: + vpfe_int_status &= ~VPFE_VDINT1; + vpfe_int_status |= VPFE_VDINT1; + break; + + /* VD2 interrupt */ + case VPFE_VDINT2: + vpfe_int_status &= ~VPFE_VDINT2; + vpfe_int_status |= VPFE_VDINT2; + break; + + /* Clear all interrupts */ + default: + vpfe_int_status &= ~(VPFE_VDINT0 | + VPFE_VDINT1 | + VPFE_VDINT2); + vpfe_int_status |= (VPFE_VDINT0 | + VPFE_VDINT1 | + VPFE_VDINT2); + break; + } + /* Clear specific VDINT from the status register */ + vpfe_reg_write(ccdc, vpfe_int_status, VPFE_IRQ_STS); + + vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS); + + /* Acknowledge that we are done with all interrupts */ + vpfe_reg_write(ccdc, 1, VPFE_IRQ_EOI); +} + +static void vpfe_ccdc_config_defaults(struct vpfe_ccdc *ccdc) +{ + ccdc->ccdc_cfg.if_type = VPFE_RAW_BAYER; + + ccdc->ccdc_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT; + ccdc->ccdc_cfg.ycbcr.frm_fmt = CCDC_FRMFMT_INTERLACED; + ccdc->ccdc_cfg.ycbcr.fid_pol = VPFE_PINPOL_POSITIVE; + ccdc->ccdc_cfg.ycbcr.vd_pol = VPFE_PINPOL_POSITIVE; + ccdc->ccdc_cfg.ycbcr.hd_pol = VPFE_PINPOL_POSITIVE; + ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY; + ccdc->ccdc_cfg.ycbcr.buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED; + + ccdc->ccdc_cfg.ycbcr.win.left = 0; + ccdc->ccdc_cfg.ycbcr.win.top = 0; + ccdc->ccdc_cfg.ycbcr.win.width = 720; + ccdc->ccdc_cfg.ycbcr.win.height = 576; + ccdc->ccdc_cfg.ycbcr.bt656_enable = 1; + + ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW; + ccdc->ccdc_cfg.bayer.frm_fmt = CCDC_FRMFMT_PROGRESSIVE; + ccdc->ccdc_cfg.bayer.fid_pol = VPFE_PINPOL_POSITIVE; + ccdc->ccdc_cfg.bayer.vd_pol = VPFE_PINPOL_POSITIVE; + ccdc->ccdc_cfg.bayer.hd_pol = VPFE_PINPOL_POSITIVE; + + ccdc->ccdc_cfg.bayer.win.left = 0; + ccdc->ccdc_cfg.bayer.win.top = 0; + ccdc->ccdc_cfg.bayer.win.width = 800; + ccdc->ccdc_cfg.bayer.win.height = 600; + ccdc->ccdc_cfg.bayer.config_params.data_sz = VPFE_CCDC_DATA_8BITS; + ccdc->ccdc_cfg.bayer.config_params.alaw.gamma_wd = + VPFE_CCDC_GAMMA_BITS_09_0; +} + +/* + * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings + */ +static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe, + struct v4l2_format *f) +{ + struct v4l2_rect image_win; + enum ccdc_buftype buf_type; + enum ccdc_frmfmt frm_fmt; + + memset(f, 0, sizeof(*f)); + f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win); + f->fmt.pix.width = image_win.width; + f->fmt.pix.height = image_win.height; + f->fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc); + f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * + f->fmt.pix.height; + buf_type = vpfe_ccdc_get_buftype(&vpfe->ccdc); + f->fmt.pix.pixelformat = vpfe_ccdc_get_pixel_format(&vpfe->ccdc); + frm_fmt = vpfe_ccdc_get_frame_format(&vpfe->ccdc); + + if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE) { + f->fmt.pix.field = V4L2_FIELD_NONE; + } else if (frm_fmt == CCDC_FRMFMT_INTERLACED) { + if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) { + f->fmt.pix.field = V4L2_FIELD_INTERLACED; + } else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED) { + f->fmt.pix.field = V4L2_FIELD_SEQ_TB; + } else { + vpfe_err(vpfe, "Invalid buf_type\n"); + return -EINVAL; + } + } else { + vpfe_err(vpfe, "Invalid frm_fmt\n"); + return -EINVAL; + } + return 0; +} + +static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe) +{ + enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED; + u32 bpp; + int ret = 0; + + vpfe_dbg(1, vpfe, "pixelformat: %s\n", + print_fourcc(vpfe->fmt.fmt.pix.pixelformat)); + + if (vpfe_ccdc_set_pixel_format(&vpfe->ccdc, + vpfe->fmt.fmt.pix.pixelformat) < 0) { + vpfe_err(vpfe, "couldn't set pix format in ccdc\n"); + return -EINVAL; + } + + /* configure the image window */ + bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt); + vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, bpp); + + switch (vpfe->fmt.fmt.pix.field) { + case V4L2_FIELD_INTERLACED: + /* do nothing, since it is default */ + ret = vpfe_ccdc_set_buftype( + &vpfe->ccdc, + CCDC_BUFTYPE_FLD_INTERLEAVED); + break; + + case V4L2_FIELD_NONE: + frm_fmt = CCDC_FRMFMT_PROGRESSIVE; + /* buffer type only applicable for interlaced scan */ + break; + + case V4L2_FIELD_SEQ_TB: + ret = vpfe_ccdc_set_buftype( + &vpfe->ccdc, + CCDC_BUFTYPE_FLD_SEPARATED); + break; + + default: + return -EINVAL; + } + + if (ret) + return ret; + + return vpfe_ccdc_set_frame_format(&vpfe->ccdc, frm_fmt); +} + +/* + * vpfe_config_image_format() + * For a given standard, this functions sets up the default + * pix format & crop values in the vpfe device and ccdc. It first + * starts with defaults based values from the standard table. + * It then checks if sub device supports get_fmt and then override the + * values based on that.Sets crop values to match with scan resolution + * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the + * values in ccdc + */ +static int vpfe_config_image_format(struct vpfe_device *vpfe, + v4l2_std_id std_id) +{ + struct vpfe_fmt *fmt; + struct v4l2_mbus_framefmt mbus_fmt; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) { + if (vpfe_standards[i].std_id & std_id) { + vpfe->std_info.active_pixels = + vpfe_standards[i].width; + vpfe->std_info.active_lines = + vpfe_standards[i].height; + vpfe->std_info.frame_format = + vpfe_standards[i].frame_format; + vpfe->std_index = i; + + break; + } + } + + if (i == ARRAY_SIZE(vpfe_standards)) { + vpfe_err(vpfe, "standard not supported\n"); + return -EINVAL; + } + + ret = __subdev_get_format(vpfe, &mbus_fmt); + if (ret) + return ret; + + fmt = find_format_by_code(vpfe, mbus_fmt.code); + if (!fmt) { + vpfe_dbg(3, vpfe, "mbus code format (0x%08x) not found.\n", + mbus_fmt.code); + return -EINVAL; + } + + /* Save current subdev format */ + v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt); + vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + vpfe->fmt.fmt.pix.pixelformat = fmt->fourcc; + vpfe_calc_format_size(vpfe, fmt, &vpfe->fmt); + vpfe->current_vpfe_fmt = fmt; + + /* Update the crop window based on found values */ + vpfe->crop.top = 0; + vpfe->crop.left = 0; + vpfe->crop.width = mbus_fmt.width; + vpfe->crop.height = mbus_fmt.height; + + return vpfe_config_ccdc_image_format(vpfe); +} + +static int vpfe_initialize_device(struct vpfe_device *vpfe) +{ + struct vpfe_subdev_info *sdinfo; + int ret; + + sdinfo = &vpfe->cfg->sub_devs[0]; + sdinfo->sd = vpfe->sd[0]; + vpfe->current_input = 0; + vpfe->std_index = 0; + /* Configure the default format information */ + ret = vpfe_config_image_format(vpfe, + vpfe_standards[vpfe->std_index].std_id); + if (ret) + return ret; + + ret = pm_runtime_resume_and_get(vpfe->pdev); + if (ret < 0) + return ret; + + vpfe_config_enable(&vpfe->ccdc, 1); + + vpfe_ccdc_restore_defaults(&vpfe->ccdc); + + /* Clear all VPFE interrupts */ + vpfe_clear_intr(&vpfe->ccdc, -1); + + return ret; +} + +/* + * vpfe_release : This function is based on the vb2_fop_release + * helper function. + * It has been augmented to handle module power management, + * by disabling/enabling h/w module fcntl clock when necessary. + */ +static int vpfe_release(struct file *file) +{ + struct vpfe_device *vpfe = video_drvdata(file); + bool fh_singular; + int ret; + + mutex_lock(&vpfe->lock); + + /* Save the singular status before we call the clean-up helper */ + fh_singular = v4l2_fh_is_singular_file(file); + + /* the release helper will cleanup any on-going streaming */ + ret = _vb2_fop_release(file, NULL); + + /* + * If this was the last open file. + * Then de-initialize hw module. + */ + if (fh_singular) + vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev); + + mutex_unlock(&vpfe->lock); + + return ret; +} + +/* + * vpfe_open : This function is based on the v4l2_fh_open helper function. + * It has been augmented to handle module power management, + * by disabling/enabling h/w module fcntl clock when necessary. + */ +static int vpfe_open(struct file *file) +{ + struct vpfe_device *vpfe = video_drvdata(file); + int ret; + + mutex_lock(&vpfe->lock); + + ret = v4l2_fh_open(file); + if (ret) { + vpfe_err(vpfe, "v4l2_fh_open failed\n"); + goto unlock; + } + + if (!v4l2_fh_is_singular_file(file)) + goto unlock; + + if (vpfe_initialize_device(vpfe)) { + v4l2_fh_release(file); + ret = -ENODEV; + } + +unlock: + mutex_unlock(&vpfe->lock); + return ret; +} + +/** + * vpfe_schedule_next_buffer: set next buffer address for capture + * @vpfe : ptr to vpfe device + * + * This function will get next buffer from the dma queue and + * set the buffer address in the vpfe register for capture. + * the buffer is marked active + */ +static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe) +{ + dma_addr_t addr; + + spin_lock(&vpfe->dma_queue_lock); + if (list_empty(&vpfe->dma_queue)) { + spin_unlock(&vpfe->dma_queue_lock); + return; + } + + vpfe->next_frm = list_entry(vpfe->dma_queue.next, + struct vpfe_cap_buffer, list); + list_del(&vpfe->next_frm->list); + spin_unlock(&vpfe->dma_queue_lock); + + addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0); + vpfe_set_sdr_addr(&vpfe->ccdc, addr); +} + +static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe) +{ + dma_addr_t addr; + + addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) + + vpfe->field_off; + + vpfe_set_sdr_addr(&vpfe->ccdc, addr); +} + +/* + * vpfe_process_buffer_complete: process a completed buffer + * @vpfe : ptr to vpfe device + * + * This function time stamp the buffer and mark it as DONE. It also + * wake up any process waiting on the QUEUE and set the next buffer + * as current + */ +static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe) +{ + vpfe->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns(); + vpfe->cur_frm->vb.field = vpfe->fmt.fmt.pix.field; + vpfe->cur_frm->vb.sequence = vpfe->sequence++; + vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE); + vpfe->cur_frm = vpfe->next_frm; +} + +static void vpfe_handle_interlaced_irq(struct vpfe_device *vpfe, + enum v4l2_field field) +{ + int fid; + + /* interlaced or TB capture check which field + * we are in hardware + */ + fid = vpfe_ccdc_getfid(&vpfe->ccdc); + + /* switch the software maintained field id */ + vpfe->field ^= 1; + if (fid == vpfe->field) { + /* we are in-sync here,continue */ + if (fid == 0) { + /* + * One frame is just being captured. If the + * next frame is available, release the + * current frame and move on + */ + if (vpfe->cur_frm != vpfe->next_frm) + vpfe_process_buffer_complete(vpfe); + + if (vpfe->stopping) + return; + + /* + * based on whether the two fields are stored + * interleave or separately in memory, + * reconfigure the CCDC memory address + */ + if (field == V4L2_FIELD_SEQ_TB) + vpfe_schedule_bottom_field(vpfe); + } else { + /* + * if one field is just being captured configure + * the next frame get the next frame from the empty + * queue if no frame is available hold on to the + * current buffer + */ + if (vpfe->cur_frm == vpfe->next_frm) + vpfe_schedule_next_buffer(vpfe); + } + } else if (fid == 0) { + /* + * out of sync. Recover from any hardware out-of-sync. + * May loose one frame + */ + vpfe->field = fid; + } +} + +/* + * vpfe_isr : ISR handler for vpfe capture (VINT0) + * @irq: irq number + * @dev_id: dev_id ptr + * + * It changes status of the captured buffer, takes next buffer from the queue + * and sets its address in VPFE registers + */ +static irqreturn_t vpfe_isr(int irq, void *dev) +{ + struct vpfe_device *vpfe = (struct vpfe_device *)dev; + enum v4l2_field field = vpfe->fmt.fmt.pix.field; + int intr_status, stopping = vpfe->stopping; + + intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS); + + if (intr_status & VPFE_VDINT0) { + if (field == V4L2_FIELD_NONE) { + if (vpfe->cur_frm != vpfe->next_frm) + vpfe_process_buffer_complete(vpfe); + } else { + vpfe_handle_interlaced_irq(vpfe, field); + } + if (stopping) { + vpfe->stopping = false; + complete(&vpfe->capture_stop); + } + } + + if (intr_status & VPFE_VDINT1 && !stopping) { + if (field == V4L2_FIELD_NONE && + vpfe->cur_frm == vpfe->next_frm) + vpfe_schedule_next_buffer(vpfe); + } + + vpfe_clear_intr(&vpfe->ccdc, intr_status); + + return IRQ_HANDLED; +} + +static inline void vpfe_detach_irq(struct vpfe_device *vpfe) +{ + unsigned int intr = VPFE_VDINT0; + enum ccdc_frmfmt frame_format; + + frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc); + if (frame_format == CCDC_FRMFMT_PROGRESSIVE) + intr |= VPFE_VDINT1; + + vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_CLR); +} + +static inline void vpfe_attach_irq(struct vpfe_device *vpfe) +{ + unsigned int intr = VPFE_VDINT0; + enum ccdc_frmfmt frame_format; + + frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc); + if (frame_format == CCDC_FRMFMT_PROGRESSIVE) + intr |= VPFE_VDINT1; + + vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_SET); +} + +static int vpfe_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) +{ + struct vpfe_device *vpfe = video_drvdata(file); + + strscpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver)); + strscpy(cap->card, "TI AM437x VPFE", sizeof(cap->card)); + snprintf(cap->bus_info, sizeof(cap->bus_info), + "platform:%s", vpfe->v4l2_dev.name); + return 0; +} + +/* get the format set at output pad of the adjacent subdev */ +static int __subdev_get_format(struct vpfe_device *vpfe, + struct v4l2_mbus_framefmt *fmt) +{ + struct v4l2_subdev *sd = vpfe->current_subdev->sd; + struct v4l2_subdev_format sd_fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = 0, + }; + struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format; + int ret; + + ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt); + if (ret) + return ret; + + *fmt = *mbus_fmt; + + vpfe_dbg(1, vpfe, "%s: %dx%d code:%04X\n", __func__, + fmt->width, fmt->height, fmt->code); + + return 0; +} + +/* set the format at output pad of the adjacent subdev */ +static int __subdev_set_format(struct vpfe_device *vpfe, + struct v4l2_mbus_framefmt *fmt) +{ + struct v4l2_subdev *sd = vpfe->current_subdev->sd; + struct v4l2_subdev_format sd_fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = 0, + }; + struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format; + int ret; + + *mbus_fmt = *fmt; + + ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sd_fmt); + if (ret) + return ret; + + vpfe_dbg(1, vpfe, "%s %dx%d code:%04X\n", __func__, + fmt->width, fmt->height, fmt->code); + + return 0; +} + +static int vpfe_calc_format_size(struct vpfe_device *vpfe, + const struct vpfe_fmt *fmt, + struct v4l2_format *f) +{ + u32 bpp; + + if (!fmt) { + vpfe_dbg(3, vpfe, "No vpfe_fmt provided!\n"); + return -EINVAL; + } + + bpp = __get_bytesperpixel(vpfe, fmt); + + /* pitch should be 32 bytes aligned */ + f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.width * bpp, 32); + f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * + f->fmt.pix.height; + + vpfe_dbg(3, vpfe, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n", + __func__, print_fourcc(f->fmt.pix.pixelformat), + f->fmt.pix.width, f->fmt.pix.height, + f->fmt.pix.bytesperline, f->fmt.pix.sizeimage); + + return 0; +} + +static int vpfe_g_fmt(struct file *file, void *priv, + struct v4l2_format *fmt) +{ + struct vpfe_device *vpfe = video_drvdata(file); + + *fmt = vpfe->fmt; + + return 0; +} + +static int vpfe_enum_fmt(struct file *file, void *priv, + struct v4l2_fmtdesc *f) +{ + struct vpfe_device *vpfe = video_drvdata(file); + struct vpfe_subdev_info *sdinfo; + struct vpfe_fmt *fmt; + + sdinfo = vpfe->current_subdev; + if (!sdinfo->sd) + return -EINVAL; + + if (f->index >= vpfe->num_active_fmt) + return -EINVAL; + + fmt = vpfe->active_fmt[f->index]; + + f->pixelformat = fmt->fourcc; + + vpfe_dbg(1, vpfe, "%s: mbus index: %d code: %x pixelformat: %s\n", + __func__, f->index, fmt->code, print_fourcc(fmt->fourcc)); + + return 0; +} + +static int vpfe_try_fmt(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct vpfe_device *vpfe = video_drvdata(file); + struct v4l2_subdev *sd = vpfe->current_subdev->sd; + const struct vpfe_fmt *fmt; + struct v4l2_subdev_frame_size_enum fse = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + int ret, found; + + fmt = find_format_by_pix(vpfe, f->fmt.pix.pixelformat); + if (!fmt) { + /* default to first entry */ + vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n", + f->fmt.pix.pixelformat); + fmt = vpfe->active_fmt[0]; + f->fmt.pix.pixelformat = fmt->fourcc; + } + + f->fmt.pix.field = vpfe->fmt.fmt.pix.field; + + /* check for/find a valid width/height */ + ret = 0; + found = false; + fse.pad = 0; + fse.code = fmt->code; + for (fse.index = 0; ; fse.index++) { + ret = v4l2_subdev_call(sd, pad, enum_frame_size, + NULL, &fse); + if (ret) + break; + + if (f->fmt.pix.width == fse.max_width && + f->fmt.pix.height == fse.max_height) { + found = true; + break; + } else if (f->fmt.pix.width >= fse.min_width && + f->fmt.pix.width <= fse.max_width && + f->fmt.pix.height >= fse.min_height && + f->fmt.pix.height <= fse.max_height) { + found = true; + break; + } + } + + if (!found) { + /* use existing values as default */ + f->fmt.pix.width = vpfe->fmt.fmt.pix.width; + f->fmt.pix.height = vpfe->fmt.fmt.pix.height; + } + + /* + * Use current colorspace for now, it will get + * updated properly during s_fmt + */ + f->fmt.pix.colorspace = vpfe->fmt.fmt.pix.colorspace; + return vpfe_calc_format_size(vpfe, fmt, f); +} + +static int vpfe_s_fmt(struct file *file, void *priv, + struct v4l2_format *fmt) +{ + struct vpfe_device *vpfe = video_drvdata(file); + struct vpfe_fmt *f; + struct v4l2_mbus_framefmt mbus_fmt; + int ret; + + /* If streaming is started, return error */ + if (vb2_is_busy(&vpfe->buffer_queue)) { + vpfe_err(vpfe, "%s device busy\n", __func__); + return -EBUSY; + } + + ret = vpfe_try_fmt(file, priv, fmt); + if (ret < 0) + return ret; + + f = find_format_by_pix(vpfe, fmt->fmt.pix.pixelformat); + + v4l2_fill_mbus_format(&mbus_fmt, &fmt->fmt.pix, f->code); + + ret = __subdev_set_format(vpfe, &mbus_fmt); + if (ret) + return ret; + + /* Just double check nothing has gone wrong */ + if (mbus_fmt.code != f->code) { + vpfe_dbg(3, vpfe, + "%s subdev changed format on us, this should not happen\n", + __func__); + return -EINVAL; + } + + v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt); + vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + vpfe->fmt.fmt.pix.pixelformat = f->fourcc; + vpfe_calc_format_size(vpfe, f, &vpfe->fmt); + *fmt = vpfe->fmt; + vpfe->current_vpfe_fmt = f; + + /* Update the crop window based on found values */ + vpfe->crop.width = fmt->fmt.pix.width; + vpfe->crop.height = fmt->fmt.pix.height; + + /* set image capture parameters in the ccdc */ + return vpfe_config_ccdc_image_format(vpfe); +} + +static int vpfe_enum_size(struct file *file, void *priv, + struct v4l2_frmsizeenum *fsize) +{ + struct vpfe_device *vpfe = video_drvdata(file); + struct v4l2_subdev_frame_size_enum fse = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + struct v4l2_subdev *sd = vpfe->current_subdev->sd; + struct vpfe_fmt *fmt; + int ret; + + /* check for valid format */ + fmt = find_format_by_pix(vpfe, fsize->pixel_format); + if (!fmt) { + vpfe_dbg(3, vpfe, "Invalid pixel code: %x\n", + fsize->pixel_format); + return -EINVAL; + } + + memset(fsize->reserved, 0x0, sizeof(fsize->reserved)); + + fse.index = fsize->index; + fse.pad = 0; + fse.code = fmt->code; + ret = v4l2_subdev_call(sd, pad, enum_frame_size, NULL, &fse); + if (ret) + return ret; + + vpfe_dbg(1, vpfe, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n", + __func__, fse.index, fse.code, fse.min_width, fse.max_width, + fse.min_height, fse.max_height); + + fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; + fsize->discrete.width = fse.max_width; + fsize->discrete.height = fse.max_height; + + vpfe_dbg(1, vpfe, "%s: index: %d pixformat: %s size: %dx%d\n", + __func__, fsize->index, print_fourcc(fsize->pixel_format), + fsize->discrete.width, fsize->discrete.height); + + return 0; +} + +/* + * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a + * given app input index + */ +static int +vpfe_get_subdev_input_index(struct vpfe_device *vpfe, + int *subdev_index, + int *subdev_input_index, + int app_input_index) +{ + int i, j = 0; + + for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) { + if (app_input_index < (j + 1)) { + *subdev_index = i; + *subdev_input_index = app_input_index - j; + return 0; + } + j++; + } + return -EINVAL; +} + +/* + * vpfe_get_app_input - Get app input index for a given subdev input index + * driver stores the input index of the current sub device and translate it + * when application request the current input + */ +static int vpfe_get_app_input_index(struct vpfe_device *vpfe, + int *app_input_index) +{ + struct vpfe_config *cfg = vpfe->cfg; + struct vpfe_subdev_info *sdinfo; + struct i2c_client *client; + struct i2c_client *curr_client; + int i, j = 0; + + curr_client = v4l2_get_subdevdata(vpfe->current_subdev->sd); + for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) { + sdinfo = &cfg->sub_devs[i]; + client = v4l2_get_subdevdata(sdinfo->sd); + if (client->addr == curr_client->addr && + client->adapter->nr == curr_client->adapter->nr) { + if (vpfe->current_input >= 1) + return -1; + *app_input_index = j + vpfe->current_input; + return 0; + } + j++; + } + return -EINVAL; +} + +static int vpfe_enum_input(struct file *file, void *priv, + struct v4l2_input *inp) +{ + struct vpfe_device *vpfe = video_drvdata(file); + struct vpfe_subdev_info *sdinfo; + int subdev, index; + + if (vpfe_get_subdev_input_index(vpfe, &subdev, &index, + inp->index) < 0) { + vpfe_dbg(1, vpfe, + "input information not found for the subdev\n"); + return -EINVAL; + } + sdinfo = &vpfe->cfg->sub_devs[subdev]; + *inp = sdinfo->inputs[index]; + + return 0; +} + +static int vpfe_g_input(struct file *file, void *priv, unsigned int *index) +{ + struct vpfe_device *vpfe = video_drvdata(file); + + return vpfe_get_app_input_index(vpfe, index); +} + +/* Assumes caller is holding vpfe_dev->lock */ +static int vpfe_set_input(struct vpfe_device *vpfe, unsigned int index) +{ + int subdev_index = 0, inp_index = 0; + struct vpfe_subdev_info *sdinfo; + struct vpfe_route *route; + u32 input, output; + int ret; + + /* If streaming is started, return error */ + if (vb2_is_busy(&vpfe->buffer_queue)) { + vpfe_err(vpfe, "%s device busy\n", __func__); + return -EBUSY; + } + ret = vpfe_get_subdev_input_index(vpfe, + &subdev_index, + &inp_index, + index); + if (ret < 0) { + vpfe_err(vpfe, "invalid input index: %d\n", index); + goto get_out; + } + + sdinfo = &vpfe->cfg->sub_devs[subdev_index]; + sdinfo->sd = vpfe->sd[subdev_index]; + route = &sdinfo->routes[inp_index]; + if (route && sdinfo->can_route) { + input = route->input; + output = route->output; + if (sdinfo->sd) { + ret = v4l2_subdev_call(sdinfo->sd, video, + s_routing, input, output, 0); + if (ret) { + vpfe_err(vpfe, "s_routing failed\n"); + ret = -EINVAL; + goto get_out; + } + } + + } + + vpfe->current_subdev = sdinfo; + if (sdinfo->sd) + vpfe->v4l2_dev.ctrl_handler = sdinfo->sd->ctrl_handler; + vpfe->current_input = index; + vpfe->std_index = 0; + + /* set the bus/interface parameter for the sub device in ccdc */ + ret = vpfe_ccdc_set_hw_if_params(&vpfe->ccdc, &sdinfo->vpfe_param); + if (ret) + return ret; + + /* set the default image parameters in the device */ + return vpfe_config_image_format(vpfe, + vpfe_standards[vpfe->std_index].std_id); + +get_out: + return ret; +} + +static int vpfe_s_input(struct file *file, void *priv, unsigned int index) +{ + struct vpfe_device *vpfe = video_drvdata(file); + + return vpfe_set_input(vpfe, index); +} + +static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id) +{ + struct vpfe_device *vpfe = video_drvdata(file); + struct vpfe_subdev_info *sdinfo; + + sdinfo = vpfe->current_subdev; + if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD)) + return -ENODATA; + + /* Call querystd function of decoder device */ + return v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id, + video, querystd, std_id); +} + +static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id) +{ + struct vpfe_device *vpfe = video_drvdata(file); + struct vpfe_subdev_info *sdinfo; + int ret; + + sdinfo = vpfe->current_subdev; + if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD)) + return -ENODATA; + + /* if trying to set the same std then nothing to do */ + if (vpfe_standards[vpfe->std_index].std_id == std_id) + return 0; + + /* If streaming is started, return error */ + if (vb2_is_busy(&vpfe->buffer_queue)) { + vpfe_err(vpfe, "%s device busy\n", __func__); + ret = -EBUSY; + return ret; + } + + ret = v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id, + video, s_std, std_id); + if (ret < 0) { + vpfe_err(vpfe, "Failed to set standard\n"); + return ret; + } + ret = vpfe_config_image_format(vpfe, std_id); + + return ret; +} + +static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id) +{ + struct vpfe_device *vpfe = video_drvdata(file); + struct vpfe_subdev_info *sdinfo; + + sdinfo = vpfe->current_subdev; + if (sdinfo->inputs[0].capabilities != V4L2_IN_CAP_STD) + return -ENODATA; + + *std_id = vpfe_standards[vpfe->std_index].std_id; + + return 0; +} + +/* + * vpfe_calculate_offsets : This function calculates buffers offset + * for top and bottom field + */ +static void vpfe_calculate_offsets(struct vpfe_device *vpfe) +{ + struct v4l2_rect image_win; + + vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win); + vpfe->field_off = image_win.height * image_win.width; +} + +/* + * vpfe_queue_setup - Callback function for buffer setup. + * @vq: vb2_queue ptr + * @nbuffers: ptr to number of buffers requested by application + * @nplanes:: contains number of distinct video planes needed to hold a frame + * @sizes[]: contains the size (in bytes) of each plane. + * @alloc_devs: ptr to allocation context + * + * This callback function is called when reqbuf() is called to adjust + * the buffer count and buffer size + */ +static int vpfe_queue_setup(struct vb2_queue *vq, + unsigned int *nbuffers, unsigned int *nplanes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct vpfe_device *vpfe = vb2_get_drv_priv(vq); + unsigned size = vpfe->fmt.fmt.pix.sizeimage; + + if (vq->num_buffers + *nbuffers < 3) + *nbuffers = 3 - vq->num_buffers; + + if (*nplanes) { + if (sizes[0] < size) + return -EINVAL; + size = sizes[0]; + } + + *nplanes = 1; + sizes[0] = size; + + vpfe_dbg(1, vpfe, + "nbuffers=%d, size=%u\n", *nbuffers, sizes[0]); + + /* Calculate field offset */ + vpfe_calculate_offsets(vpfe); + + return 0; +} + +/* + * vpfe_buffer_prepare : callback function for buffer prepare + * @vb: ptr to vb2_buffer + * + * This is the callback function for buffer prepare when vb2_qbuf() + * function is called. The buffer is prepared and user space virtual address + * or user address is converted into physical address + */ +static int vpfe_buffer_prepare(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue); + + vb2_set_plane_payload(vb, 0, vpfe->fmt.fmt.pix.sizeimage); + + if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) + return -EINVAL; + + vbuf->field = vpfe->fmt.fmt.pix.field; + + return 0; +} + +/* + * vpfe_buffer_queue : Callback function to add buffer to DMA queue + * @vb: ptr to vb2_buffer + */ +static void vpfe_buffer_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue); + struct vpfe_cap_buffer *buf = to_vpfe_buffer(vbuf); + unsigned long flags = 0; + + /* add the buffer to the DMA queue */ + spin_lock_irqsave(&vpfe->dma_queue_lock, flags); + list_add_tail(&buf->list, &vpfe->dma_queue); + spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags); +} + +static void vpfe_return_all_buffers(struct vpfe_device *vpfe, + enum vb2_buffer_state state) +{ + struct vpfe_cap_buffer *buf, *node; + unsigned long flags; + + spin_lock_irqsave(&vpfe->dma_queue_lock, flags); + list_for_each_entry_safe(buf, node, &vpfe->dma_queue, list) { + vb2_buffer_done(&buf->vb.vb2_buf, state); + list_del(&buf->list); + } + + if (vpfe->cur_frm) + vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, state); + + if (vpfe->next_frm && vpfe->next_frm != vpfe->cur_frm) + vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf, state); + + vpfe->cur_frm = NULL; + vpfe->next_frm = NULL; + spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags); +} + +/* + * vpfe_start_streaming : Starts the DMA engine for streaming + * @vb: ptr to vb2_buffer + * @count: number of buffers + */ +static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count) +{ + struct vpfe_device *vpfe = vb2_get_drv_priv(vq); + struct vpfe_subdev_info *sdinfo; + unsigned long flags; + unsigned long addr; + int ret; + + spin_lock_irqsave(&vpfe->dma_queue_lock, flags); + + vpfe->field = 0; + vpfe->sequence = 0; + + sdinfo = vpfe->current_subdev; + + vpfe_attach_irq(vpfe); + + vpfe->stopping = false; + init_completion(&vpfe->capture_stop); + + if (vpfe->ccdc.ccdc_cfg.if_type == VPFE_RAW_BAYER) + vpfe_ccdc_config_raw(&vpfe->ccdc); + else + vpfe_ccdc_config_ycbcr(&vpfe->ccdc); + + /* Get the next frame from the buffer queue */ + vpfe->next_frm = list_entry(vpfe->dma_queue.next, + struct vpfe_cap_buffer, list); + vpfe->cur_frm = vpfe->next_frm; + /* Remove buffer from the buffer queue */ + list_del(&vpfe->cur_frm->list); + spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags); + + addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb.vb2_buf, 0); + + vpfe_set_sdr_addr(&vpfe->ccdc, (unsigned long)(addr)); + + vpfe_pcr_enable(&vpfe->ccdc, 1); + + ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 1); + if (ret < 0) { + vpfe_err(vpfe, "Error in attaching interrupt handle\n"); + goto err; + } + + return 0; + +err: + vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_QUEUED); + vpfe_pcr_enable(&vpfe->ccdc, 0); + return ret; +} + +/* + * vpfe_stop_streaming : Stop the DMA engine + * @vq: ptr to vb2_queue + * + * This callback stops the DMA engine and any remaining buffers + * in the DMA queue are released. + */ +static void vpfe_stop_streaming(struct vb2_queue *vq) +{ + struct vpfe_device *vpfe = vb2_get_drv_priv(vq); + struct vpfe_subdev_info *sdinfo; + int ret; + + vpfe_pcr_enable(&vpfe->ccdc, 0); + + /* Wait for the last frame to be captured */ + vpfe->stopping = true; + wait_for_completion_timeout(&vpfe->capture_stop, + msecs_to_jiffies(250)); + + vpfe_detach_irq(vpfe); + + sdinfo = vpfe->current_subdev; + ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 0); + if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) + vpfe_dbg(1, vpfe, "stream off failed in subdev\n"); + + /* release all active buffers */ + vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_ERROR); +} + +static int vpfe_g_pixelaspect(struct file *file, void *priv, + int type, struct v4l2_fract *f) +{ + struct vpfe_device *vpfe = video_drvdata(file); + + if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || + vpfe->std_index >= ARRAY_SIZE(vpfe_standards)) + return -EINVAL; + + *f = vpfe_standards[vpfe->std_index].pixelaspect; + + return 0; +} + +static int +vpfe_g_selection(struct file *file, void *fh, struct v4l2_selection *s) +{ + struct vpfe_device *vpfe = video_drvdata(file); + + if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || + vpfe->std_index >= ARRAY_SIZE(vpfe_standards)) + return -EINVAL; + + switch (s->target) { + case V4L2_SEL_TGT_CROP_BOUNDS: + case V4L2_SEL_TGT_CROP_DEFAULT: + s->r.left = 0; + s->r.top = 0; + s->r.width = vpfe_standards[vpfe->std_index].width; + s->r.height = vpfe_standards[vpfe->std_index].height; + break; + + case V4L2_SEL_TGT_CROP: + s->r = vpfe->crop; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int +vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s) +{ + struct vpfe_device *vpfe = video_drvdata(file); + struct v4l2_rect cr = vpfe->crop; + struct v4l2_rect r = s->r; + u32 bpp; + + /* If streaming is started, return error */ + if (vb2_is_busy(&vpfe->buffer_queue)) { + vpfe_err(vpfe, "%s device busy\n", __func__); + return -EBUSY; + } + + if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || + s->target != V4L2_SEL_TGT_CROP) + return -EINVAL; + + v4l_bound_align_image(&r.width, 0, cr.width, 0, + &r.height, 0, cr.height, 0, 0); + + r.left = clamp_t(unsigned int, r.left, 0, cr.width - r.width); + r.top = clamp_t(unsigned int, r.top, 0, cr.height - r.height); + + if (s->flags & V4L2_SEL_FLAG_LE && !v4l2_rect_enclosed(&r, &s->r)) + return -ERANGE; + + if (s->flags & V4L2_SEL_FLAG_GE && !v4l2_rect_enclosed(&s->r, &r)) + return -ERANGE; + + s->r = vpfe->crop = r; + + bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt); + vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, bpp); + vpfe->fmt.fmt.pix.width = r.width; + vpfe->fmt.fmt.pix.height = r.height; + vpfe->fmt.fmt.pix.bytesperline = + vpfe_ccdc_get_line_length(&vpfe->ccdc); + vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline * + vpfe->fmt.fmt.pix.height; + + vpfe_dbg(1, vpfe, "cropped (%d,%d)/%dx%d of %dx%d\n", + r.left, r.top, r.width, r.height, cr.width, cr.height); + + return 0; +} + +static long vpfe_ioctl_default(struct file *file, void *priv, + bool valid_prio, unsigned int cmd, void *param) +{ + struct vpfe_device *vpfe = video_drvdata(file); + int ret; + + if (!valid_prio) { + vpfe_err(vpfe, "%s device busy\n", __func__); + return -EBUSY; + } + + /* If streaming is started, return error */ + if (vb2_is_busy(&vpfe->buffer_queue)) { + vpfe_err(vpfe, "%s device busy\n", __func__); + return -EBUSY; + } + + switch (cmd) { + case VIDIOC_AM437X_CCDC_CFG: + ret = vpfe_ccdc_set_params(&vpfe->ccdc, (void __user *)param); + if (ret) { + vpfe_dbg(2, vpfe, + "Error setting parameters in CCDC\n"); + return ret; + } + ret = vpfe_get_ccdc_image_format(vpfe, + &vpfe->fmt); + if (ret < 0) { + vpfe_dbg(2, vpfe, + "Invalid image format at CCDC\n"); + return ret; + } + break; + + default: + ret = -ENOTTY; + break; + } + + return ret; +} + +static const struct vb2_ops vpfe_video_qops = { + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, + .queue_setup = vpfe_queue_setup, + .buf_prepare = vpfe_buffer_prepare, + .buf_queue = vpfe_buffer_queue, + .start_streaming = vpfe_start_streaming, + .stop_streaming = vpfe_stop_streaming, +}; + +/* vpfe capture driver file operations */ +static const struct v4l2_file_operations vpfe_fops = { + .owner = THIS_MODULE, + .open = vpfe_open, + .release = vpfe_release, + .read = vb2_fop_read, + .poll = vb2_fop_poll, + .unlocked_ioctl = video_ioctl2, + .mmap = vb2_fop_mmap, +}; + +/* vpfe capture ioctl operations */ +static const struct v4l2_ioctl_ops vpfe_ioctl_ops = { + .vidioc_querycap = vpfe_querycap, + .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt, + .vidioc_g_fmt_vid_cap = vpfe_g_fmt, + .vidioc_s_fmt_vid_cap = vpfe_s_fmt, + .vidioc_try_fmt_vid_cap = vpfe_try_fmt, + + .vidioc_enum_framesizes = vpfe_enum_size, + + .vidioc_enum_input = vpfe_enum_input, + .vidioc_g_input = vpfe_g_input, + .vidioc_s_input = vpfe_s_input, + + .vidioc_querystd = vpfe_querystd, + .vidioc_s_std = vpfe_s_std, + .vidioc_g_std = vpfe_g_std, + + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + + .vidioc_log_status = v4l2_ctrl_log_status, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, + + .vidioc_g_pixelaspect = vpfe_g_pixelaspect, + .vidioc_g_selection = vpfe_g_selection, + .vidioc_s_selection = vpfe_s_selection, + + .vidioc_default = vpfe_ioctl_default, +}; + +static int +vpfe_async_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asd) +{ + struct vpfe_device *vpfe = container_of(notifier->v4l2_dev, + struct vpfe_device, v4l2_dev); + struct vpfe_subdev_info *sdinfo; + struct vpfe_fmt *fmt; + int ret = 0; + bool found = false; + int i, j, k; + + for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) { + if (vpfe->cfg->asd[i]->match.fwnode == + asd[i].match.fwnode) { + sdinfo = &vpfe->cfg->sub_devs[i]; + vpfe->sd[i] = subdev; + vpfe->sd[i]->grp_id = sdinfo->grp_id; + found = true; + break; + } + } + + if (!found) { + vpfe_info(vpfe, "sub device (%s) not matched\n", subdev->name); + return -EINVAL; + } + + vpfe->video_dev.tvnorms |= sdinfo->inputs[0].std; + + vpfe->num_active_fmt = 0; + for (j = 0, i = 0; (ret != -EINVAL); ++j) { + struct v4l2_subdev_mbus_code_enum mbus_code = { + .index = j, + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + + ret = v4l2_subdev_call(subdev, pad, enum_mbus_code, + NULL, &mbus_code); + if (ret) + continue; + + vpfe_dbg(3, vpfe, + "subdev %s: code: %04x idx: %d\n", + subdev->name, mbus_code.code, j); + + for (k = 0; k < ARRAY_SIZE(formats); k++) { + fmt = &formats[k]; + if (mbus_code.code != fmt->code) + continue; + vpfe->active_fmt[i] = fmt; + vpfe_dbg(3, vpfe, + "matched fourcc: %s code: %04x idx: %d\n", + print_fourcc(fmt->fourcc), mbus_code.code, i); + vpfe->num_active_fmt = ++i; + } + } + + if (!i) { + vpfe_err(vpfe, "No suitable format reported by subdev %s\n", + subdev->name); + return -EINVAL; + } + return 0; +} + +static int vpfe_probe_complete(struct vpfe_device *vpfe) +{ + struct video_device *vdev; + struct vb2_queue *q; + int err; + + spin_lock_init(&vpfe->dma_queue_lock); + mutex_init(&vpfe->lock); + + vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + /* set first sub device as current one */ + vpfe->current_subdev = &vpfe->cfg->sub_devs[0]; + vpfe->v4l2_dev.ctrl_handler = vpfe->sd[0]->ctrl_handler; + + err = vpfe_set_input(vpfe, 0); + if (err) + goto probe_out; + + /* Initialize videobuf2 queue as per the buffer type */ + q = &vpfe->buffer_queue; + q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; + q->drv_priv = vpfe; + q->ops = &vpfe_video_qops; + q->mem_ops = &vb2_dma_contig_memops; + q->buf_struct_size = sizeof(struct vpfe_cap_buffer); + q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + q->lock = &vpfe->lock; + q->min_buffers_needed = 1; + q->dev = vpfe->pdev; + + err = vb2_queue_init(q); + if (err) { + vpfe_err(vpfe, "vb2_queue_init() failed\n"); + goto probe_out; + } + + INIT_LIST_HEAD(&vpfe->dma_queue); + + vdev = &vpfe->video_dev; + strscpy(vdev->name, VPFE_MODULE_NAME, sizeof(vdev->name)); + vdev->release = video_device_release_empty; + vdev->fops = &vpfe_fops; + vdev->ioctl_ops = &vpfe_ioctl_ops; + vdev->v4l2_dev = &vpfe->v4l2_dev; + vdev->vfl_dir = VFL_DIR_RX; + vdev->queue = q; + vdev->lock = &vpfe->lock; + vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | + V4L2_CAP_READWRITE; + video_set_drvdata(vdev, vpfe); + err = video_register_device(&vpfe->video_dev, VFL_TYPE_VIDEO, -1); + if (err) { + vpfe_err(vpfe, + "Unable to register video device.\n"); + goto probe_out; + } + + return 0; + +probe_out: + v4l2_device_unregister(&vpfe->v4l2_dev); + return err; +} + +static int vpfe_async_complete(struct v4l2_async_notifier *notifier) +{ + struct vpfe_device *vpfe = container_of(notifier->v4l2_dev, + struct vpfe_device, v4l2_dev); + + return vpfe_probe_complete(vpfe); +} + +static const struct v4l2_async_notifier_operations vpfe_async_ops = { + .bound = vpfe_async_bound, + .complete = vpfe_async_complete, +}; + +static struct vpfe_config * +vpfe_get_pdata(struct vpfe_device *vpfe) +{ + struct device_node *endpoint = NULL; + struct device *dev = vpfe->pdev; + struct vpfe_subdev_info *sdinfo; + struct vpfe_config *pdata; + unsigned int flags; + unsigned int i; + int err; + + dev_dbg(dev, "vpfe_get_pdata\n"); + + v4l2_async_nf_init(&vpfe->notifier, &vpfe->v4l2_dev); + + if (!IS_ENABLED(CONFIG_OF) || !dev->of_node) + return dev->platform_data; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + for (i = 0; ; i++) { + struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 }; + struct device_node *rem; + + endpoint = of_graph_get_next_endpoint(dev->of_node, endpoint); + if (!endpoint) + break; + + sdinfo = &pdata->sub_devs[i]; + sdinfo->grp_id = 0; + + /* we only support camera */ + sdinfo->inputs[0].index = i; + strscpy(sdinfo->inputs[0].name, "Camera", + sizeof(sdinfo->inputs[0].name)); + sdinfo->inputs[0].type = V4L2_INPUT_TYPE_CAMERA; + sdinfo->inputs[0].std = V4L2_STD_ALL; + sdinfo->inputs[0].capabilities = V4L2_IN_CAP_STD; + + sdinfo->can_route = 0; + sdinfo->routes = NULL; + + of_property_read_u32(endpoint, "ti,am437x-vpfe-interface", + &sdinfo->vpfe_param.if_type); + if (sdinfo->vpfe_param.if_type < 0 || + sdinfo->vpfe_param.if_type > 4) { + sdinfo->vpfe_param.if_type = VPFE_RAW_BAYER; + } + + err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint), + &bus_cfg); + if (err) { + dev_err(dev, "Could not parse the endpoint\n"); + goto cleanup; + } + + sdinfo->vpfe_param.bus_width = bus_cfg.bus.parallel.bus_width; + + if (sdinfo->vpfe_param.bus_width < 8 || + sdinfo->vpfe_param.bus_width > 16) { + dev_err(dev, "Invalid bus width.\n"); + goto cleanup; + } + + flags = bus_cfg.bus.parallel.flags; + + if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) + sdinfo->vpfe_param.hdpol = 1; + + if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) + sdinfo->vpfe_param.vdpol = 1; + + rem = of_graph_get_remote_port_parent(endpoint); + if (!rem) { + dev_err(dev, "Remote device at %pOF not found\n", + endpoint); + goto cleanup; + } + + pdata->asd[i] = v4l2_async_nf_add_fwnode(&vpfe->notifier, + of_fwnode_handle(rem), + struct v4l2_async_connection); + of_node_put(rem); + if (IS_ERR(pdata->asd[i])) + goto cleanup; + } + + of_node_put(endpoint); + return pdata; + +cleanup: + v4l2_async_nf_cleanup(&vpfe->notifier); + of_node_put(endpoint); + return NULL; +} + +/* + * vpfe_probe : This function creates device entries by register + * itself to the V4L2 driver and initializes fields of each + * device objects + */ +static int vpfe_probe(struct platform_device *pdev) +{ + struct vpfe_config *vpfe_cfg; + struct vpfe_device *vpfe; + struct vpfe_ccdc *ccdc; + int ret; + + vpfe = devm_kzalloc(&pdev->dev, sizeof(*vpfe), GFP_KERNEL); + if (!vpfe) + return -ENOMEM; + + vpfe->pdev = &pdev->dev; + + ret = v4l2_device_register(&pdev->dev, &vpfe->v4l2_dev); + if (ret) { + vpfe_err(vpfe, "Unable to register v4l2 device.\n"); + return ret; + } + + vpfe_cfg = vpfe_get_pdata(vpfe); + if (!vpfe_cfg) { + dev_err(&pdev->dev, "No platform data\n"); + ret = -EINVAL; + goto probe_out_cleanup; + } + + vpfe->cfg = vpfe_cfg; + ccdc = &vpfe->ccdc; + + ccdc->ccdc_cfg.base_addr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ccdc->ccdc_cfg.base_addr)) { + ret = PTR_ERR(ccdc->ccdc_cfg.base_addr); + goto probe_out_cleanup; + } + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto probe_out_cleanup; + vpfe->irq = ret; + + ret = devm_request_irq(vpfe->pdev, vpfe->irq, vpfe_isr, 0, + "vpfe_capture0", vpfe); + if (ret) { + dev_err(&pdev->dev, "Unable to request interrupt\n"); + ret = -EINVAL; + goto probe_out_cleanup; + } + + /* set the driver data in platform device */ + platform_set_drvdata(pdev, vpfe); + /* Enabling module functional clock */ + pm_runtime_enable(&pdev->dev); + + /* for now just enable it here instead of waiting for the open */ + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) { + vpfe_err(vpfe, "Unable to resume device.\n"); + goto probe_out_cleanup; + } + + vpfe_ccdc_config_defaults(ccdc); + + pm_runtime_put_sync(&pdev->dev); + + vpfe->sd = devm_kcalloc(&pdev->dev, + ARRAY_SIZE(vpfe->cfg->asd), + sizeof(struct v4l2_subdev *), + GFP_KERNEL); + if (!vpfe->sd) { + ret = -ENOMEM; + goto probe_out_cleanup; + } + + vpfe->notifier.ops = &vpfe_async_ops; + ret = v4l2_async_nf_register(&vpfe->notifier); + if (ret) { + vpfe_err(vpfe, "Error registering async notifier\n"); + ret = -EINVAL; + goto probe_out_cleanup; + } + + return 0; + +probe_out_cleanup: + v4l2_async_nf_cleanup(&vpfe->notifier); + v4l2_device_unregister(&vpfe->v4l2_dev); + return ret; +} + +/* + * vpfe_remove : It un-register device from V4L2 driver + */ +static void vpfe_remove(struct platform_device *pdev) +{ + struct vpfe_device *vpfe = platform_get_drvdata(pdev); + + pm_runtime_disable(&pdev->dev); + + v4l2_async_nf_unregister(&vpfe->notifier); + v4l2_async_nf_cleanup(&vpfe->notifier); + video_unregister_device(&vpfe->video_dev); + v4l2_device_unregister(&vpfe->v4l2_dev); +} + +#ifdef CONFIG_PM_SLEEP + +static void vpfe_save_context(struct vpfe_ccdc *ccdc) +{ + ccdc->ccdc_ctx[VPFE_PCR >> 2] = vpfe_reg_read(ccdc, VPFE_PCR); + ccdc->ccdc_ctx[VPFE_SYNMODE >> 2] = vpfe_reg_read(ccdc, VPFE_SYNMODE); + ccdc->ccdc_ctx[VPFE_SDOFST >> 2] = vpfe_reg_read(ccdc, VPFE_SDOFST); + ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2] = vpfe_reg_read(ccdc, VPFE_SDR_ADDR); + ccdc->ccdc_ctx[VPFE_CLAMP >> 2] = vpfe_reg_read(ccdc, VPFE_CLAMP); + ccdc->ccdc_ctx[VPFE_DCSUB >> 2] = vpfe_reg_read(ccdc, VPFE_DCSUB); + ccdc->ccdc_ctx[VPFE_COLPTN >> 2] = vpfe_reg_read(ccdc, VPFE_COLPTN); + ccdc->ccdc_ctx[VPFE_BLKCMP >> 2] = vpfe_reg_read(ccdc, VPFE_BLKCMP); + ccdc->ccdc_ctx[VPFE_VDINT >> 2] = vpfe_reg_read(ccdc, VPFE_VDINT); + ccdc->ccdc_ctx[VPFE_ALAW >> 2] = vpfe_reg_read(ccdc, VPFE_ALAW); + ccdc->ccdc_ctx[VPFE_REC656IF >> 2] = vpfe_reg_read(ccdc, VPFE_REC656IF); + ccdc->ccdc_ctx[VPFE_CCDCFG >> 2] = vpfe_reg_read(ccdc, VPFE_CCDCFG); + ccdc->ccdc_ctx[VPFE_CULLING >> 2] = vpfe_reg_read(ccdc, VPFE_CULLING); + ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2] = vpfe_reg_read(ccdc, + VPFE_HD_VD_WID); + ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2] = vpfe_reg_read(ccdc, + VPFE_PIX_LINES); + ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2] = vpfe_reg_read(ccdc, + VPFE_HORZ_INFO); + ccdc->ccdc_ctx[VPFE_VERT_START >> 2] = vpfe_reg_read(ccdc, + VPFE_VERT_START); + ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2] = vpfe_reg_read(ccdc, + VPFE_VERT_LINES); + ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2] = vpfe_reg_read(ccdc, + VPFE_HSIZE_OFF); +} + +static int vpfe_suspend(struct device *dev) +{ + struct vpfe_device *vpfe = dev_get_drvdata(dev); + struct vpfe_ccdc *ccdc = &vpfe->ccdc; + + /* only do full suspend if streaming has started */ + if (vb2_start_streaming_called(&vpfe->buffer_queue)) { + /* + * ignore RPM resume errors here, as it is already too late. + * A check like that should happen earlier, either at + * open() or just before start streaming. + */ + pm_runtime_get_sync(dev); + vpfe_config_enable(ccdc, 1); + + /* Save VPFE context */ + vpfe_save_context(ccdc); + + /* Disable CCDC */ + vpfe_pcr_enable(ccdc, 0); + vpfe_config_enable(ccdc, 0); + + /* Disable both master and slave clock */ + pm_runtime_put_sync(dev); + } + + /* Select sleep pin state */ + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static void vpfe_restore_context(struct vpfe_ccdc *ccdc) +{ + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SYNMODE >> 2], VPFE_SYNMODE); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CULLING >> 2], VPFE_CULLING); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDOFST >> 2], VPFE_SDOFST); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2], VPFE_SDR_ADDR); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CLAMP >> 2], VPFE_CLAMP); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_DCSUB >> 2], VPFE_DCSUB); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_COLPTN >> 2], VPFE_COLPTN); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_BLKCMP >> 2], VPFE_BLKCMP); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VDINT >> 2], VPFE_VDINT); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_ALAW >> 2], VPFE_ALAW); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_REC656IF >> 2], VPFE_REC656IF); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CCDCFG >> 2], VPFE_CCDCFG); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PCR >> 2], VPFE_PCR); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2], + VPFE_HD_VD_WID); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2], + VPFE_PIX_LINES); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2], + VPFE_HORZ_INFO); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_START >> 2], + VPFE_VERT_START); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2], + VPFE_VERT_LINES); + vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2], + VPFE_HSIZE_OFF); +} + +static int vpfe_resume(struct device *dev) +{ + struct vpfe_device *vpfe = dev_get_drvdata(dev); + struct vpfe_ccdc *ccdc = &vpfe->ccdc; + + /* only do full resume if streaming has started */ + if (vb2_start_streaming_called(&vpfe->buffer_queue)) { + /* Enable both master and slave clock */ + pm_runtime_get_sync(dev); + vpfe_config_enable(ccdc, 1); + + /* Restore VPFE context */ + vpfe_restore_context(ccdc); + + vpfe_config_enable(ccdc, 0); + pm_runtime_put_sync(dev); + } + + /* Select default pin state */ + pinctrl_pm_select_default_state(dev); + + return 0; +} + +#endif + +static SIMPLE_DEV_PM_OPS(vpfe_pm_ops, vpfe_suspend, vpfe_resume); + +static const struct of_device_id vpfe_of_match[] = { + { .compatible = "ti,am437x-vpfe", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, vpfe_of_match); + +static struct platform_driver vpfe_driver = { + .probe = vpfe_probe, + .remove_new = vpfe_remove, + .driver = { + .name = VPFE_MODULE_NAME, + .pm = &vpfe_pm_ops, + .of_match_table = vpfe_of_match, + }, +}; + +module_platform_driver(vpfe_driver); + +MODULE_AUTHOR("Texas Instruments"); +MODULE_DESCRIPTION("TI AM437x VPFE driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(VPFE_VERSION); diff --git a/drivers/media/platform/ti/am437x/am437x-vpfe.h b/drivers/media/platform/ti/am437x/am437x-vpfe.h new file mode 100644 index 0000000000..50c3c793b3 --- /dev/null +++ b/drivers/media/platform/ti/am437x/am437x-vpfe.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013 - 2014 Texas Instruments, Inc. + * + * Benoit Parrot + * Lad, Prabhakar + */ + +#ifndef AM437X_VPFE_H +#define AM437X_VPFE_H + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "am437x-vpfe_regs.h" + +enum vpfe_pin_pol { + VPFE_PINPOL_POSITIVE = 0, + VPFE_PINPOL_NEGATIVE, +}; + +enum vpfe_hw_if_type { + /* Raw Bayer */ + VPFE_RAW_BAYER = 0, + /* BT656 - 8 bit */ + VPFE_BT656, + /* BT656 - 10 bit */ + VPFE_BT656_10BIT, + /* YCbCr - 8 bit with external sync */ + VPFE_YCBCR_SYNC_8, + /* YCbCr - 16 bit with external sync */ + VPFE_YCBCR_SYNC_16, +}; + +/* interface description */ +struct vpfe_hw_if_param { + enum vpfe_hw_if_type if_type; + enum vpfe_pin_pol hdpol; + enum vpfe_pin_pol vdpol; + unsigned int bus_width; +}; + +#define VPFE_MAX_SUBDEV 1 +#define VPFE_MAX_INPUTS 1 + +struct vpfe_std_info { + int active_pixels; + int active_lines; + /* current frame format */ + int frame_format; +}; + +struct vpfe_route { + u32 input; + u32 output; +}; + +struct vpfe_subdev_info { + /* Sub device group id */ + int grp_id; + /* inputs available at the sub device */ + struct v4l2_input inputs[VPFE_MAX_INPUTS]; + /* Sub dev routing information for each input */ + struct vpfe_route *routes; + /* check if sub dev supports routing */ + int can_route; + /* ccdc bus/interface configuration */ + struct vpfe_hw_if_param vpfe_param; + struct v4l2_subdev *sd; +}; + +struct vpfe_config { + /* information about each subdev */ + struct vpfe_subdev_info sub_devs[VPFE_MAX_SUBDEV]; + /* Flat array, arranged in groups */ + struct v4l2_async_connection *asd[VPFE_MAX_SUBDEV]; +}; + +struct vpfe_cap_buffer { + struct vb2_v4l2_buffer vb; + struct list_head list; +}; + +enum ccdc_pixfmt { + CCDC_PIXFMT_RAW = 0, + CCDC_PIXFMT_YCBCR_16BIT, + CCDC_PIXFMT_YCBCR_8BIT, +}; + +enum ccdc_frmfmt { + CCDC_FRMFMT_PROGRESSIVE = 0, + CCDC_FRMFMT_INTERLACED, +}; + +/* PIXEL ORDER IN MEMORY from LSB to MSB */ +/* only applicable for 8-bit input mode */ +enum ccdc_pixorder { + CCDC_PIXORDER_YCBYCR, + CCDC_PIXORDER_CBYCRY, +}; + +enum ccdc_buftype { + CCDC_BUFTYPE_FLD_INTERLEAVED, + CCDC_BUFTYPE_FLD_SEPARATED +}; + + +/* returns the highest bit used for the gamma */ +static inline u8 ccdc_gamma_width_max_bit(enum vpfe_ccdc_gamma_width width) +{ + return 15 - width; +} + +/* returns the highest bit used for this data size */ +static inline u8 ccdc_data_size_max_bit(enum vpfe_ccdc_data_size sz) +{ + return sz == VPFE_CCDC_DATA_8BITS ? 7 : 15 - sz; +} + +/* Structure for CCDC configuration parameters for raw capture mode */ +struct ccdc_params_raw { + /* pixel format */ + enum ccdc_pixfmt pix_fmt; + /* progressive or interlaced frame */ + enum ccdc_frmfmt frm_fmt; + struct v4l2_rect win; + /* Current Format Bytes Per Pixels */ + unsigned int bytesperpixel; + /* Current Format Bytes per Lines + * (Aligned to 32 bytes) used for HORZ_INFO + */ + unsigned int bytesperline; + /* field id polarity */ + enum vpfe_pin_pol fid_pol; + /* vertical sync polarity */ + enum vpfe_pin_pol vd_pol; + /* horizontal sync polarity */ + enum vpfe_pin_pol hd_pol; + /* interleaved or separated fields */ + enum ccdc_buftype buf_type; + /* + * enable to store the image in inverse + * order in memory(bottom to top) + */ + unsigned char image_invert_enable; + /* configurable parameters */ + struct vpfe_ccdc_config_params_raw config_params; +}; + +struct ccdc_params_ycbcr { + /* pixel format */ + enum ccdc_pixfmt pix_fmt; + /* progressive or interlaced frame */ + enum ccdc_frmfmt frm_fmt; + struct v4l2_rect win; + /* Current Format Bytes Per Pixels */ + unsigned int bytesperpixel; + /* Current Format Bytes per Lines + * (Aligned to 32 bytes) used for HORZ_INFO + */ + unsigned int bytesperline; + /* field id polarity */ + enum vpfe_pin_pol fid_pol; + /* vertical sync polarity */ + enum vpfe_pin_pol vd_pol; + /* horizontal sync polarity */ + enum vpfe_pin_pol hd_pol; + /* enable BT.656 embedded sync mode */ + int bt656_enable; + /* cb:y:cr:y or y:cb:y:cr in memory */ + enum ccdc_pixorder pix_order; + /* interleaved or separated fields */ + enum ccdc_buftype buf_type; +}; + +/* + * CCDC operational configuration + */ +struct ccdc_config { + /* CCDC interface type */ + enum vpfe_hw_if_type if_type; + /* Raw Bayer configuration */ + struct ccdc_params_raw bayer; + /* YCbCr configuration */ + struct ccdc_params_ycbcr ycbcr; + /* ccdc base address */ + void __iomem *base_addr; +}; + +struct vpfe_ccdc { + struct ccdc_config ccdc_cfg; + u32 ccdc_ctx[VPFE_REG_END / sizeof(u32)]; +}; + +/* + * struct vpfe_fmt - VPFE media bus format information + * fourcc: V4L2 pixel format code + * code: V4L2 media bus format code + * bitsperpixel: Bits per pixel over the bus + */ +struct vpfe_fmt { + u32 fourcc; + u32 code; + u32 bitsperpixel; +}; + +/* + * When formats[] is modified make sure to adjust this value also. + * Expect compile time warnings if VPFE_NUM_FORMATS is smaller then + * the number of elements in formats[]. + */ +#define VPFE_NUM_FORMATS 10 + +struct vpfe_device { + /* V4l2 specific parameters */ + /* Identifies video device for this channel */ + struct video_device video_dev; + /* sub devices */ + struct v4l2_subdev **sd; + /* vpfe cfg */ + struct vpfe_config *cfg; + /* V4l2 device */ + struct v4l2_device v4l2_dev; + /* parent device */ + struct device *pdev; + /* subdevice async Notifier */ + struct v4l2_async_notifier notifier; + /* Indicates id of the field which is being displayed */ + unsigned field; + unsigned sequence; + /* current interface type */ + struct vpfe_hw_if_param vpfe_if_params; + /* ptr to currently selected sub device */ + struct vpfe_subdev_info *current_subdev; + /* current input at the sub device */ + int current_input; + /* Keeps track of the information about the standard */ + struct vpfe_std_info std_info; + /* std index into std table */ + int std_index; + /* IRQs used when CCDC output to SDRAM */ + unsigned int irq; + /* Pointer pointing to current v4l2_buffer */ + struct vpfe_cap_buffer *cur_frm; + /* Pointer pointing to next v4l2_buffer */ + struct vpfe_cap_buffer *next_frm; + /* Used to store pixel format */ + struct v4l2_format fmt; + /* Used to keep a reference to the current vpfe_fmt */ + struct vpfe_fmt *current_vpfe_fmt; + struct vpfe_fmt *active_fmt[VPFE_NUM_FORMATS]; + unsigned int num_active_fmt; + + /* + * used when IMP is chained to store the crop window which + * is different from the image window + */ + struct v4l2_rect crop; + /* Buffer queue used in vb2 */ + struct vb2_queue buffer_queue; + /* Queue of filled frames */ + struct list_head dma_queue; + /* IRQ lock for DMA queue */ + spinlock_t dma_queue_lock; + /* lock used to access this structure */ + struct mutex lock; + /* + * offset where second field starts from the starting of the + * buffer for field separated YCbCr formats + */ + u32 field_off; + struct vpfe_ccdc ccdc; + int stopping; + struct completion capture_stop; +}; + +#endif /* AM437X_VPFE_H */ diff --git a/drivers/media/platform/ti/am437x/am437x-vpfe_regs.h b/drivers/media/platform/ti/am437x/am437x-vpfe_regs.h new file mode 100644 index 0000000000..63ecdca3b9 --- /dev/null +++ b/drivers/media/platform/ti/am437x/am437x-vpfe_regs.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * TI AM437x Image Sensor Interface Registers + * + * Copyright (C) 2013 - 2014 Texas Instruments, Inc. + * + * Benoit Parrot + * Lad, Prabhakar + */ + +#ifndef AM437X_VPFE_REGS_H +#define AM437X_VPFE_REGS_H + +/* VPFE module register offset */ +#define VPFE_REVISION 0x0 +#define VPFE_PCR 0x4 +#define VPFE_SYNMODE 0x8 +#define VPFE_HD_VD_WID 0xc +#define VPFE_PIX_LINES 0x10 +#define VPFE_HORZ_INFO 0x14 +#define VPFE_VERT_START 0x18 +#define VPFE_VERT_LINES 0x1c +#define VPFE_CULLING 0x20 +#define VPFE_HSIZE_OFF 0x24 +#define VPFE_SDOFST 0x28 +#define VPFE_SDR_ADDR 0x2c +#define VPFE_CLAMP 0x30 +#define VPFE_DCSUB 0x34 +#define VPFE_COLPTN 0x38 +#define VPFE_BLKCMP 0x3c +#define VPFE_VDINT 0x48 +#define VPFE_ALAW 0x4c +#define VPFE_REC656IF 0x50 +#define VPFE_CCDCFG 0x54 +#define VPFE_DMA_CNTL 0x98 +#define VPFE_SYSCONFIG 0x104 +#define VPFE_CONFIG 0x108 +#define VPFE_IRQ_EOI 0x110 +#define VPFE_IRQ_STS_RAW 0x114 +#define VPFE_IRQ_STS 0x118 +#define VPFE_IRQ_EN_SET 0x11c +#define VPFE_IRQ_EN_CLR 0x120 +#define VPFE_REG_END 0x124 + +/* Define bit fields within selected registers */ +#define VPFE_FID_POL_MASK 1 +#define VPFE_FID_POL_SHIFT 4 +#define VPFE_HD_POL_MASK 1 +#define VPFE_HD_POL_SHIFT 3 +#define VPFE_VD_POL_MASK 1 +#define VPFE_VD_POL_SHIFT 2 +#define VPFE_HSIZE_OFF_MASK 0xffffffe0 +#define VPFE_32BYTE_ALIGN_VAL 31 +#define VPFE_FRM_FMT_MASK 0x1 +#define VPFE_FRM_FMT_SHIFT 7 +#define VPFE_DATA_SZ_MASK 7 +#define VPFE_DATA_SZ_SHIFT 8 +#define VPFE_PIX_FMT_MASK 3 +#define VPFE_PIX_FMT_SHIFT 12 +#define VPFE_VP2SDR_DISABLE 0xfffbffff +#define VPFE_WEN_ENABLE BIT(17) +#define VPFE_SDR2RSZ_DISABLE 0xfff7ffff +#define VPFE_VDHDEN_ENABLE BIT(16) +#define VPFE_LPF_ENABLE BIT(14) +#define VPFE_ALAW_ENABLE BIT(3) +#define VPFE_ALAW_GAMMA_WD_MASK 7 +#define VPFE_BLK_CLAMP_ENABLE BIT(31) +#define VPFE_BLK_SGAIN_MASK 0x1f +#define VPFE_BLK_ST_PXL_MASK 0x7fff +#define VPFE_BLK_ST_PXL_SHIFT 10 +#define VPFE_BLK_SAMPLE_LN_MASK 7 +#define VPFE_BLK_SAMPLE_LN_SHIFT 28 +#define VPFE_BLK_SAMPLE_LINE_MASK 7 +#define VPFE_BLK_SAMPLE_LINE_SHIFT 25 +#define VPFE_BLK_DC_SUB_MASK 0x03fff +#define VPFE_BLK_COMP_MASK 0xff +#define VPFE_BLK_COMP_GB_COMP_SHIFT 8 +#define VPFE_BLK_COMP_GR_COMP_SHIFT 16 +#define VPFE_BLK_COMP_R_COMP_SHIFT 24 +#define VPFE_LATCH_ON_VSYNC_DISABLE BIT(15) +#define VPFE_DATA_PACK_ENABLE BIT(11) +#define VPFE_HORZ_INFO_SPH_SHIFT 16 +#define VPFE_VERT_START_SLV0_SHIFT 16 +#define VPFE_VDINT_VDINT0_SHIFT 16 +#define VPFE_VDINT_VDINT1_MASK 0xffff +#define VPFE_PPC_RAW 1 +#define VPFE_DCSUB_DEFAULT_VAL 0 +#define VPFE_CLAMP_DEFAULT_VAL 0 +#define VPFE_COLPTN_VAL 0xbb11bb11 +#define VPFE_TWO_BYTES_PER_PIXEL 2 +#define VPFE_INTERLACED_IMAGE_INVERT 0x4b6d +#define VPFE_INTERLACED_NO_IMAGE_INVERT 0x0249 +#define VPFE_PROGRESSIVE_IMAGE_INVERT 0x4000 +#define VPFE_PROGRESSIVE_NO_IMAGE_INVERT 0 +#define VPFE_INTERLACED_HEIGHT_SHIFT 1 +#define VPFE_SYN_MODE_INPMOD_SHIFT 12 +#define VPFE_SYN_MODE_INPMOD_MASK 3 +#define VPFE_SYN_MODE_8BITS (7 << 8) +#define VPFE_SYN_MODE_10BITS (6 << 8) +#define VPFE_SYN_MODE_11BITS (5 << 8) +#define VPFE_SYN_MODE_12BITS (4 << 8) +#define VPFE_SYN_MODE_13BITS (3 << 8) +#define VPFE_SYN_MODE_14BITS (2 << 8) +#define VPFE_SYN_MODE_15BITS (1 << 8) +#define VPFE_SYN_MODE_16BITS (0 << 8) +#define VPFE_SYN_FLDMODE_MASK 1 +#define VPFE_SYN_FLDMODE_SHIFT 7 +#define VPFE_REC656IF_BT656_EN 3 +#define VPFE_SYN_MODE_VD_POL_NEGATIVE BIT(2) +#define VPFE_CCDCFG_Y8POS_SHIFT 11 +#define VPFE_CCDCFG_BW656_10BIT BIT(5) +#define VPFE_SDOFST_FIELD_INTERLEAVED 0x249 +#define VPFE_NO_CULLING 0xffff00ff +#define VPFE_VDINT0 BIT(0) +#define VPFE_VDINT1 BIT(1) +#define VPFE_VDINT2 BIT(2) +#define VPFE_DMA_CNTL_OVERFLOW BIT(31) + +#define VPFE_CONFIG_PCLK_INV_SHIFT 0 +#define VPFE_CONFIG_PCLK_INV_MASK 1 +#define VPFE_CONFIG_PCLK_INV_NOT_INV 0 +#define VPFE_CONFIG_PCLK_INV_INV 1 +#define VPFE_CONFIG_EN_SHIFT 1 +#define VPFE_CONFIG_EN_MASK 2 +#define VPFE_CONFIG_EN_DISABLE 0 +#define VPFE_CONFIG_EN_ENABLE 1 +#define VPFE_CONFIG_ST_SHIFT 2 +#define VPFE_CONFIG_ST_MASK 4 +#define VPFE_CONFIG_ST_OCP_ACTIVE 0 +#define VPFE_CONFIG_ST_OCP_STANDBY 1 + +#endif /* AM437X_VPFE_REGS_H */ diff --git a/drivers/media/platform/ti/cal/Makefile b/drivers/media/platform/ti/cal/Makefile new file mode 100644 index 0000000000..45ac35585f --- /dev/null +++ b/drivers/media/platform/ti/cal/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_VIDEO_TI_CAL) += ti-cal.o +ti-cal-y := cal.o cal-camerarx.o cal-video.o diff --git a/drivers/media/platform/ti/cal/cal-camerarx.c b/drivers/media/platform/ti/cal/cal-camerarx.c new file mode 100644 index 0000000000..1a4273bbe7 --- /dev/null +++ b/drivers/media/platform/ti/cal/cal-camerarx.c @@ -0,0 +1,895 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * TI Camera Access Layer (CAL) - CAMERARX + * + * Copyright (c) 2015-2020 Texas Instruments Inc. + * + * Authors: + * Benoit Parrot + * Laurent Pinchart + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "cal.h" +#include "cal_regs.h" + +/* ------------------------------------------------------------------ + * I/O Register Accessors + * ------------------------------------------------------------------ + */ + +static inline u32 camerarx_read(struct cal_camerarx *phy, u32 offset) +{ + return ioread32(phy->base + offset); +} + +static inline void camerarx_write(struct cal_camerarx *phy, u32 offset, u32 val) +{ + iowrite32(val, phy->base + offset); +} + +/* ------------------------------------------------------------------ + * CAMERARX Management + * ------------------------------------------------------------------ + */ + +static s64 cal_camerarx_get_ext_link_freq(struct cal_camerarx *phy) +{ + struct v4l2_mbus_config_mipi_csi2 *mipi_csi2 = &phy->endpoint.bus.mipi_csi2; + u32 num_lanes = mipi_csi2->num_data_lanes; + const struct cal_format_info *fmtinfo; + struct v4l2_subdev_state *state; + struct v4l2_mbus_framefmt *fmt; + u32 bpp; + s64 freq; + + state = v4l2_subdev_get_locked_active_state(&phy->subdev); + + fmt = v4l2_subdev_get_pad_format(&phy->subdev, state, CAL_CAMERARX_PAD_SINK); + + fmtinfo = cal_format_by_code(fmt->code); + if (!fmtinfo) + return -EINVAL; + + bpp = fmtinfo->bpp; + + freq = v4l2_get_link_freq(phy->source->ctrl_handler, bpp, 2 * num_lanes); + if (freq < 0) { + phy_err(phy, "failed to get link freq for subdev '%s'\n", + phy->source->name); + return freq; + } + + phy_dbg(3, phy, "Source Link Freq: %llu\n", freq); + + return freq; +} + +static void cal_camerarx_lane_config(struct cal_camerarx *phy) +{ + u32 val = cal_read(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance)); + u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK; + u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK; + struct v4l2_mbus_config_mipi_csi2 *mipi_csi2 = + &phy->endpoint.bus.mipi_csi2; + int lane; + + cal_set_field(&val, mipi_csi2->clock_lane + 1, lane_mask); + cal_set_field(&val, mipi_csi2->lane_polarities[0], polarity_mask); + for (lane = 0; lane < mipi_csi2->num_data_lanes; lane++) { + /* + * Every lane are one nibble apart starting with the + * clock followed by the data lanes so shift masks by 4. + */ + lane_mask <<= 4; + polarity_mask <<= 4; + cal_set_field(&val, mipi_csi2->data_lanes[lane] + 1, lane_mask); + cal_set_field(&val, mipi_csi2->lane_polarities[lane + 1], + polarity_mask); + } + + cal_write(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), val); + phy_dbg(3, phy, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", + phy->instance, val); +} + +static void cal_camerarx_enable(struct cal_camerarx *phy) +{ + u32 num_lanes = phy->cal->data->camerarx[phy->instance].num_lanes; + + regmap_field_write(phy->fields[F_CAMMODE], 0); + /* Always enable all lanes at the phy control level */ + regmap_field_write(phy->fields[F_LANEENABLE], (1 << num_lanes) - 1); + /* F_CSI_MODE is not present on every architecture */ + if (phy->fields[F_CSI_MODE]) + regmap_field_write(phy->fields[F_CSI_MODE], 1); + regmap_field_write(phy->fields[F_CTRLCLKEN], 1); +} + +void cal_camerarx_disable(struct cal_camerarx *phy) +{ + regmap_field_write(phy->fields[F_CTRLCLKEN], 0); +} + +/* + * TCLK values are OK at their reset values + */ +#define TCLK_TERM 0 +#define TCLK_MISS 1 +#define TCLK_SETTLE 14 + +static void cal_camerarx_config(struct cal_camerarx *phy, s64 link_freq) +{ + unsigned int reg0, reg1; + unsigned int ths_term, ths_settle; + + /* DPHY timing configuration */ + + /* THS_TERM: Programmed value = floor(20 ns/DDRClk period) */ + ths_term = div_s64(20 * link_freq, 1000 * 1000 * 1000); + phy_dbg(1, phy, "ths_term: %d (0x%02x)\n", ths_term, ths_term); + + /* THS_SETTLE: Programmed value = floor(105 ns/DDRClk period) + 4 */ + ths_settle = div_s64(105 * link_freq, 1000 * 1000 * 1000) + 4; + phy_dbg(1, phy, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle); + + reg0 = camerarx_read(phy, CAL_CSI2_PHY_REG0); + cal_set_field(®0, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE, + CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK); + cal_set_field(®0, ths_term, CAL_CSI2_PHY_REG0_THS_TERM_MASK); + cal_set_field(®0, ths_settle, CAL_CSI2_PHY_REG0_THS_SETTLE_MASK); + + phy_dbg(1, phy, "CSI2_%d_REG0 = 0x%08x\n", phy->instance, reg0); + camerarx_write(phy, CAL_CSI2_PHY_REG0, reg0); + + reg1 = camerarx_read(phy, CAL_CSI2_PHY_REG1); + cal_set_field(®1, TCLK_TERM, CAL_CSI2_PHY_REG1_TCLK_TERM_MASK); + cal_set_field(®1, 0xb8, CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK); + cal_set_field(®1, TCLK_MISS, + CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK); + cal_set_field(®1, TCLK_SETTLE, CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK); + + phy_dbg(1, phy, "CSI2_%d_REG1 = 0x%08x\n", phy->instance, reg1); + camerarx_write(phy, CAL_CSI2_PHY_REG1, reg1); +} + +static void cal_camerarx_power(struct cal_camerarx *phy, bool enable) +{ + u32 target_state; + unsigned int i; + + target_state = enable ? CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON : + CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF; + + cal_write_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), + target_state, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK); + + for (i = 0; i < 10; i++) { + u32 current_state; + + current_state = cal_read_field(phy->cal, + CAL_CSI2_COMPLEXIO_CFG(phy->instance), + CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK); + + if (current_state == target_state) + break; + + usleep_range(1000, 1100); + } + + if (i == 10) + phy_err(phy, "Failed to power %s complexio\n", + enable ? "up" : "down"); +} + +static void cal_camerarx_wait_reset(struct cal_camerarx *phy) +{ + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(750); + while (time_before(jiffies, timeout)) { + if (cal_read_field(phy->cal, + CAL_CSI2_COMPLEXIO_CFG(phy->instance), + CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) == + CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED) + break; + usleep_range(500, 5000); + } + + if (cal_read_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), + CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) != + CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED) + phy_err(phy, "Timeout waiting for Complex IO reset done\n"); +} + +static void cal_camerarx_wait_stop_state(struct cal_camerarx *phy) +{ + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(750); + while (time_before(jiffies, timeout)) { + if (cal_read_field(phy->cal, + CAL_CSI2_TIMING(phy->instance), + CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK) == 0) + break; + usleep_range(500, 5000); + } + + if (cal_read_field(phy->cal, CAL_CSI2_TIMING(phy->instance), + CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK) != 0) + phy_err(phy, "Timeout waiting for stop state\n"); +} + +static void cal_camerarx_enable_irqs(struct cal_camerarx *phy) +{ + const u32 cio_err_mask = + CAL_CSI2_COMPLEXIO_IRQ_LANE_ERRORS_MASK | + CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK | + CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK | + CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK; + const u32 vc_err_mask = + CAL_CSI2_VC_IRQ_CS_IRQ_MASK(0) | + CAL_CSI2_VC_IRQ_CS_IRQ_MASK(1) | + CAL_CSI2_VC_IRQ_CS_IRQ_MASK(2) | + CAL_CSI2_VC_IRQ_CS_IRQ_MASK(3) | + CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(0) | + CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(1) | + CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(2) | + CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(3); + + /* Enable CIO & VC error IRQs. */ + cal_write(phy->cal, CAL_HL_IRQENABLE_SET(0), + CAL_HL_IRQ_CIO_MASK(phy->instance) | + CAL_HL_IRQ_VC_MASK(phy->instance)); + cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance), + cio_err_mask); + cal_write(phy->cal, CAL_CSI2_VC_IRQENABLE(phy->instance), + vc_err_mask); +} + +static void cal_camerarx_disable_irqs(struct cal_camerarx *phy) +{ + /* Disable CIO error irqs */ + cal_write(phy->cal, CAL_HL_IRQENABLE_CLR(0), + CAL_HL_IRQ_CIO_MASK(phy->instance) | + CAL_HL_IRQ_VC_MASK(phy->instance)); + cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance), 0); + cal_write(phy->cal, CAL_CSI2_VC_IRQENABLE(phy->instance), 0); +} + +static void cal_camerarx_ppi_enable(struct cal_camerarx *phy) +{ + cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance), + 1, CAL_CSI2_PPI_CTRL_ECC_EN_MASK); + + cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance), + 1, CAL_CSI2_PPI_CTRL_IF_EN_MASK); +} + +static void cal_camerarx_ppi_disable(struct cal_camerarx *phy) +{ + cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance), + 0, CAL_CSI2_PPI_CTRL_IF_EN_MASK); +} + +static int cal_camerarx_start(struct cal_camerarx *phy) +{ + s64 link_freq; + u32 sscounter; + u32 val; + int ret; + + if (phy->enable_count > 0) { + phy->enable_count++; + return 0; + } + + link_freq = cal_camerarx_get_ext_link_freq(phy); + if (link_freq < 0) + return link_freq; + + ret = v4l2_subdev_call(phy->source, core, s_power, 1); + if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) { + phy_err(phy, "power on failed in subdev\n"); + return ret; + } + + cal_camerarx_enable_irqs(phy); + + /* + * CSI-2 PHY Link Initialization Sequence, according to the DRA74xP / + * DRA75xP / DRA76xP / DRA77xP TRM. The DRA71x / DRA72x and the AM65x / + * DRA80xM TRMs have a slightly simplified sequence. + */ + + /* + * 1. Configure all CSI-2 low level protocol registers to be ready to + * receive signals/data from the CSI-2 PHY. + * + * i.-v. Configure the lanes position and polarity. + */ + cal_camerarx_lane_config(phy); + + /* + * vi.-vii. Configure D-PHY mode, enable the required lanes and + * enable the CAMERARX clock. + */ + cal_camerarx_enable(phy); + + /* + * 2. CSI PHY and link initialization sequence. + * + * a. Deassert the CSI-2 PHY reset. Do not wait for reset completion + * at this point, as it requires the external source to send the + * CSI-2 HS clock. + */ + cal_write_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), + CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL, + CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK); + phy_dbg(3, phy, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x De-assert Complex IO Reset\n", + phy->instance, + cal_read(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance))); + + /* Dummy read to allow SCP reset to complete. */ + camerarx_read(phy, CAL_CSI2_PHY_REG0); + + /* Program the PHY timing parameters. */ + cal_camerarx_config(phy, link_freq); + + /* + * b. Assert the FORCERXMODE signal. + * + * The stop-state-counter is based on fclk cycles, and we always use + * the x16 and x4 settings, so stop-state-timeout = + * fclk-cycle * 16 * 4 * counter. + * + * Stop-state-timeout must be more than 100us as per CSI-2 spec, so we + * calculate a timeout that's 100us (rounding up). + */ + sscounter = DIV_ROUND_UP(clk_get_rate(phy->cal->fclk), 10000 * 16 * 4); + + val = cal_read(phy->cal, CAL_CSI2_TIMING(phy->instance)); + cal_set_field(&val, 1, CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK); + cal_set_field(&val, 1, CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK); + cal_set_field(&val, sscounter, + CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK); + cal_write(phy->cal, CAL_CSI2_TIMING(phy->instance), val); + phy_dbg(3, phy, "CAL_CSI2_TIMING(%d) = 0x%08x Stop States\n", + phy->instance, + cal_read(phy->cal, CAL_CSI2_TIMING(phy->instance))); + + /* Assert the FORCERXMODE signal. */ + cal_write_field(phy->cal, CAL_CSI2_TIMING(phy->instance), + 1, CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK); + phy_dbg(3, phy, "CAL_CSI2_TIMING(%d) = 0x%08x Force RXMODE\n", + phy->instance, + cal_read(phy->cal, CAL_CSI2_TIMING(phy->instance))); + + /* + * c. Connect pull-down on CSI-2 PHY link (using pad control). + * + * This is not required on DRA71x, DRA72x, AM65x and DRA80xM. Not + * implemented. + */ + + /* + * d. Power up the CSI-2 PHY. + * e. Check whether the state status reaches the ON state. + */ + cal_camerarx_power(phy, true); + + /* + * Start the source to enable the CSI-2 HS clock. We can now wait for + * CSI-2 PHY reset to complete. + */ + ret = v4l2_subdev_call(phy->source, video, s_stream, 1); + if (ret) { + v4l2_subdev_call(phy->source, core, s_power, 0); + cal_camerarx_disable_irqs(phy); + phy_err(phy, "stream on failed in subdev\n"); + return ret; + } + + cal_camerarx_wait_reset(phy); + + /* f. Wait for STOPSTATE=1 for all enabled lane modules. */ + cal_camerarx_wait_stop_state(phy); + + phy_dbg(1, phy, "CSI2_%u_REG1 = 0x%08x (bits 31-28 should be set)\n", + phy->instance, camerarx_read(phy, CAL_CSI2_PHY_REG1)); + + /* + * g. Disable pull-down on CSI-2 PHY link (using pad control). + * + * This is not required on DRA71x, DRA72x, AM65x and DRA80xM. Not + * implemented. + */ + + /* Finally, enable the PHY Protocol Interface (PPI). */ + cal_camerarx_ppi_enable(phy); + + phy->enable_count++; + + return 0; +} + +static void cal_camerarx_stop(struct cal_camerarx *phy) +{ + int ret; + + if (--phy->enable_count > 0) + return; + + cal_camerarx_ppi_disable(phy); + + cal_camerarx_disable_irqs(phy); + + cal_camerarx_power(phy, false); + + /* Assert Complex IO Reset */ + cal_write_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), + CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL, + CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK); + + phy_dbg(3, phy, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Complex IO in Reset\n", + phy->instance, + cal_read(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance))); + + /* Disable the phy */ + cal_camerarx_disable(phy); + + if (v4l2_subdev_call(phy->source, video, s_stream, 0)) + phy_err(phy, "stream off failed in subdev\n"); + + ret = v4l2_subdev_call(phy->source, core, s_power, 0); + if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) + phy_err(phy, "power off failed in subdev\n"); +} + +/* + * Errata i913: CSI2 LDO Needs to be disabled when module is powered on + * + * Enabling CSI2 LDO shorts it to core supply. It is crucial the 2 CSI2 + * LDOs on the device are disabled if CSI-2 module is powered on + * (0x4845 B304 | 0x4845 B384 [28:27] = 0x1) or in ULPS (0x4845 B304 + * | 0x4845 B384 [28:27] = 0x2) mode. Common concerns include: high + * current draw on the module supply in active mode. + * + * Errata does not apply when CSI-2 module is powered off + * (0x4845 B304 | 0x4845 B384 [28:27] = 0x0). + * + * SW Workaround: + * Set the following register bits to disable the LDO, + * which is essentially CSI2 REG10 bit 6: + * + * Core 0: 0x4845 B828 = 0x0000 0040 + * Core 1: 0x4845 B928 = 0x0000 0040 + */ +void cal_camerarx_i913_errata(struct cal_camerarx *phy) +{ + u32 reg10 = camerarx_read(phy, CAL_CSI2_PHY_REG10); + + cal_set_field(®10, 1, CAL_CSI2_PHY_REG10_I933_LDO_DISABLE_MASK); + + phy_dbg(1, phy, "CSI2_%d_REG10 = 0x%08x\n", phy->instance, reg10); + camerarx_write(phy, CAL_CSI2_PHY_REG10, reg10); +} + +static int cal_camerarx_regmap_init(struct cal_dev *cal, + struct cal_camerarx *phy) +{ + const struct cal_camerarx_data *phy_data; + unsigned int i; + + if (!cal->data) + return -EINVAL; + + phy_data = &cal->data->camerarx[phy->instance]; + + for (i = 0; i < F_MAX_FIELDS; i++) { + struct reg_field field = { + .reg = cal->syscon_camerrx_offset, + .lsb = phy_data->fields[i].lsb, + .msb = phy_data->fields[i].msb, + }; + + /* + * Here we update the reg offset with the + * value found in DT + */ + phy->fields[i] = devm_regmap_field_alloc(cal->dev, + cal->syscon_camerrx, + field); + if (IS_ERR(phy->fields[i])) { + cal_err(cal, "Unable to allocate regmap fields\n"); + return PTR_ERR(phy->fields[i]); + } + } + + return 0; +} + +static int cal_camerarx_parse_dt(struct cal_camerarx *phy) +{ + struct v4l2_fwnode_endpoint *endpoint = &phy->endpoint; + char data_lanes[V4L2_MBUS_CSI2_MAX_DATA_LANES * 2]; + struct device_node *ep_node; + unsigned int i; + int ret; + + /* + * Find the endpoint node for the port corresponding to the PHY + * instance, and parse its CSI-2-related properties. + */ + ep_node = of_graph_get_endpoint_by_regs(phy->cal->dev->of_node, + phy->instance, 0); + if (!ep_node) { + /* + * The endpoint is not mandatory, not all PHY instances need to + * be connected in DT. + */ + phy_dbg(3, phy, "Port has no endpoint\n"); + return 0; + } + + endpoint->bus_type = V4L2_MBUS_CSI2_DPHY; + ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_node), endpoint); + if (ret < 0) { + phy_err(phy, "Failed to parse endpoint\n"); + goto done; + } + + for (i = 0; i < endpoint->bus.mipi_csi2.num_data_lanes; i++) { + unsigned int lane = endpoint->bus.mipi_csi2.data_lanes[i]; + + if (lane > 4) { + phy_err(phy, "Invalid position %u for data lane %u\n", + lane, i); + ret = -EINVAL; + goto done; + } + + data_lanes[i*2] = '0' + lane; + data_lanes[i*2+1] = ' '; + } + + data_lanes[i*2-1] = '\0'; + + phy_dbg(3, phy, + "CSI-2 bus: clock lane <%u>, data lanes <%s>, flags 0x%08x\n", + endpoint->bus.mipi_csi2.clock_lane, data_lanes, + endpoint->bus.mipi_csi2.flags); + + /* Retrieve the connected device and store it for later use. */ + phy->source_ep_node = of_graph_get_remote_endpoint(ep_node); + phy->source_node = of_graph_get_port_parent(phy->source_ep_node); + if (!phy->source_node) { + phy_dbg(3, phy, "Can't get remote parent\n"); + of_node_put(phy->source_ep_node); + ret = -EINVAL; + goto done; + } + + phy_dbg(1, phy, "Found connected device %pOFn\n", phy->source_node); + +done: + of_node_put(ep_node); + return ret; +} + +/* ------------------------------------------------------------------ + * V4L2 Subdev Operations + * ------------------------------------------------------------------ + */ + +static inline struct cal_camerarx *to_cal_camerarx(struct v4l2_subdev *sd) +{ + return container_of(sd, struct cal_camerarx, subdev); +} + +static int cal_camerarx_sd_s_stream(struct v4l2_subdev *sd, int enable) +{ + struct cal_camerarx *phy = to_cal_camerarx(sd); + struct v4l2_subdev_state *state; + int ret = 0; + + state = v4l2_subdev_lock_and_get_active_state(sd); + + if (enable) + ret = cal_camerarx_start(phy); + else + cal_camerarx_stop(phy); + + v4l2_subdev_unlock_state(state); + + return ret; +} + +static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct cal_camerarx *phy = to_cal_camerarx(sd); + + /* No transcoding, source and sink codes must match. */ + if (cal_rx_pad_is_source(code->pad)) { + struct v4l2_mbus_framefmt *fmt; + + if (code->index > 0) + return -EINVAL; + + fmt = v4l2_subdev_get_pad_format(&phy->subdev, state, + CAL_CAMERARX_PAD_SINK); + code->code = fmt->code; + } else { + if (code->index >= cal_num_formats) + return -EINVAL; + + code->code = cal_formats[code->index].code; + } + + return 0; +} + +static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_size_enum *fse) +{ + const struct cal_format_info *fmtinfo; + + if (fse->index > 0) + return -EINVAL; + + /* No transcoding, source and sink formats must match. */ + if (cal_rx_pad_is_source(fse->pad)) { + struct v4l2_mbus_framefmt *fmt; + + fmt = v4l2_subdev_get_pad_format(sd, state, + CAL_CAMERARX_PAD_SINK); + if (fse->code != fmt->code) + return -EINVAL; + + fse->min_width = fmt->width; + fse->max_width = fmt->width; + fse->min_height = fmt->height; + fse->max_height = fmt->height; + } else { + fmtinfo = cal_format_by_code(fse->code); + if (!fmtinfo) + return -EINVAL; + + fse->min_width = CAL_MIN_WIDTH_BYTES * 8 / ALIGN(fmtinfo->bpp, 8); + fse->max_width = CAL_MAX_WIDTH_BYTES * 8 / ALIGN(fmtinfo->bpp, 8); + fse->min_height = CAL_MIN_HEIGHT_LINES; + fse->max_height = CAL_MAX_HEIGHT_LINES; + } + + return 0; +} + +static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + const struct cal_format_info *fmtinfo; + struct v4l2_mbus_framefmt *fmt; + unsigned int bpp; + + /* No transcoding, source and sink formats must match. */ + if (cal_rx_pad_is_source(format->pad)) + return v4l2_subdev_get_fmt(sd, state, format); + + /* + * Default to the first format if the requested media bus code isn't + * supported. + */ + fmtinfo = cal_format_by_code(format->format.code); + if (!fmtinfo) + fmtinfo = &cal_formats[0]; + + /* Clamp the size, update the code. The colorspace is accepted as-is. */ + bpp = ALIGN(fmtinfo->bpp, 8); + + format->format.width = clamp_t(unsigned int, format->format.width, + CAL_MIN_WIDTH_BYTES * 8 / bpp, + CAL_MAX_WIDTH_BYTES * 8 / bpp); + format->format.height = clamp_t(unsigned int, format->format.height, + CAL_MIN_HEIGHT_LINES, + CAL_MAX_HEIGHT_LINES); + format->format.code = fmtinfo->code; + format->format.field = V4L2_FIELD_NONE; + + /* Store the format and propagate it to the source pad. */ + + fmt = v4l2_subdev_get_pad_format(sd, state, CAL_CAMERARX_PAD_SINK); + *fmt = format->format; + + fmt = v4l2_subdev_get_pad_format(sd, state, + CAL_CAMERARX_PAD_FIRST_SOURCE); + *fmt = format->format; + + return 0; +} + +static int cal_camerarx_sd_init_cfg(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state) +{ + struct v4l2_subdev_format format = { + .which = state ? V4L2_SUBDEV_FORMAT_TRY + : V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = CAL_CAMERARX_PAD_SINK, + .format = { + .width = 640, + .height = 480, + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .field = V4L2_FIELD_NONE, + .colorspace = V4L2_COLORSPACE_SRGB, + .ycbcr_enc = V4L2_YCBCR_ENC_601, + .quantization = V4L2_QUANTIZATION_LIM_RANGE, + .xfer_func = V4L2_XFER_FUNC_SRGB, + }, + }; + + return cal_camerarx_sd_set_fmt(sd, state, &format); +} + +static int cal_camerarx_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_frame_desc *fd) +{ + struct cal_camerarx *phy = to_cal_camerarx(sd); + struct v4l2_mbus_frame_desc remote_desc; + const struct media_pad *remote_pad; + int ret; + + remote_pad = media_pad_remote_pad_first(&phy->pads[CAL_CAMERARX_PAD_SINK]); + if (!remote_pad) + return -EPIPE; + + ret = v4l2_subdev_call(phy->source, pad, get_frame_desc, + remote_pad->index, &remote_desc); + if (ret) + return ret; + + if (remote_desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) { + cal_err(phy->cal, + "Frame descriptor does not describe CSI-2 link"); + return -EINVAL; + } + + if (remote_desc.num_entries > 1) + cal_err(phy->cal, + "Multiple streams not supported in remote frame descriptor, using the first one\n"); + + fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2; + fd->num_entries = 1; + fd->entry[0] = remote_desc.entry[0]; + + return 0; +} + +static const struct v4l2_subdev_video_ops cal_camerarx_video_ops = { + .s_stream = cal_camerarx_sd_s_stream, +}; + +static const struct v4l2_subdev_pad_ops cal_camerarx_pad_ops = { + .init_cfg = cal_camerarx_sd_init_cfg, + .enum_mbus_code = cal_camerarx_sd_enum_mbus_code, + .enum_frame_size = cal_camerarx_sd_enum_frame_size, + .get_fmt = v4l2_subdev_get_fmt, + .set_fmt = cal_camerarx_sd_set_fmt, + .get_frame_desc = cal_camerarx_get_frame_desc, +}; + +static const struct v4l2_subdev_ops cal_camerarx_subdev_ops = { + .video = &cal_camerarx_video_ops, + .pad = &cal_camerarx_pad_ops, +}; + +static struct media_entity_operations cal_camerarx_media_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +/* ------------------------------------------------------------------ + * Create and Destroy + * ------------------------------------------------------------------ + */ + +struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal, + unsigned int instance) +{ + struct platform_device *pdev = to_platform_device(cal->dev); + struct cal_camerarx *phy; + struct v4l2_subdev *sd; + unsigned int i; + int ret; + + phy = devm_kzalloc(cal->dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return ERR_PTR(-ENOMEM); + + phy->cal = cal; + phy->instance = instance; + + spin_lock_init(&phy->vc_lock); + + phy->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + (instance == 0) ? + "cal_rx_core0" : + "cal_rx_core1"); + phy->base = devm_ioremap_resource(cal->dev, phy->res); + if (IS_ERR(phy->base)) { + cal_err(cal, "failed to ioremap\n"); + return ERR_CAST(phy->base); + } + + cal_dbg(1, cal, "ioresource %s at %pa - %pa\n", + phy->res->name, &phy->res->start, &phy->res->end); + + ret = cal_camerarx_regmap_init(cal, phy); + if (ret) + return ERR_PTR(ret); + + ret = cal_camerarx_parse_dt(phy); + if (ret) + return ERR_PTR(ret); + + /* Initialize the V4L2 subdev and media entity. */ + sd = &phy->subdev; + v4l2_subdev_init(sd, &cal_camerarx_subdev_ops); + sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; + sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE; + snprintf(sd->name, sizeof(sd->name), "CAMERARX%u", instance); + sd->dev = cal->dev; + + phy->pads[CAL_CAMERARX_PAD_SINK].flags = MEDIA_PAD_FL_SINK; + for (i = CAL_CAMERARX_PAD_FIRST_SOURCE; i < CAL_CAMERARX_NUM_PADS; ++i) + phy->pads[i].flags = MEDIA_PAD_FL_SOURCE; + sd->entity.ops = &cal_camerarx_media_ops; + ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(phy->pads), + phy->pads); + if (ret) + goto err_node_put; + + ret = v4l2_subdev_init_finalize(sd); + if (ret) + goto err_entity_cleanup; + + ret = v4l2_device_register_subdev(&cal->v4l2_dev, sd); + if (ret) + goto err_free_state; + + return phy; + +err_free_state: + v4l2_subdev_cleanup(sd); +err_entity_cleanup: + media_entity_cleanup(&phy->subdev.entity); +err_node_put: + of_node_put(phy->source_ep_node); + of_node_put(phy->source_node); + return ERR_PTR(ret); +} + +void cal_camerarx_destroy(struct cal_camerarx *phy) +{ + if (!phy) + return; + + v4l2_device_unregister_subdev(&phy->subdev); + v4l2_subdev_cleanup(&phy->subdev); + media_entity_cleanup(&phy->subdev.entity); + of_node_put(phy->source_ep_node); + of_node_put(phy->source_node); +} diff --git a/drivers/media/platform/ti/cal/cal-video.c b/drivers/media/platform/ti/cal/cal-video.c new file mode 100644 index 0000000000..a8abcd0fee --- /dev/null +++ b/drivers/media/platform/ti/cal/cal-video.c @@ -0,0 +1,1062 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * TI Camera Access Layer (CAL) - Video Device + * + * Copyright (c) 2015-2020 Texas Instruments Inc. + * + * Authors: + * Benoit Parrot + * Laurent Pinchart + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cal.h" + +/* Print Four-character-code (FOURCC) */ +static char *fourcc_to_str(u32 fmt) +{ + static char code[5]; + + code[0] = (unsigned char)(fmt & 0xff); + code[1] = (unsigned char)((fmt >> 8) & 0xff); + code[2] = (unsigned char)((fmt >> 16) & 0xff); + code[3] = (unsigned char)((fmt >> 24) & 0xff); + code[4] = '\0'; + + return code; +} + +/* ------------------------------------------------------------------ + * V4L2 Common IOCTLs + * ------------------------------------------------------------------ + */ + +static int cal_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) +{ + strscpy(cap->driver, CAL_MODULE_NAME, sizeof(cap->driver)); + strscpy(cap->card, CAL_MODULE_NAME, sizeof(cap->card)); + + return 0; +} + +static int cal_g_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + + *f = ctx->v_fmt; + + return 0; +} + +/* ------------------------------------------------------------------ + * V4L2 Video Node Centric IOCTLs + * ------------------------------------------------------------------ + */ + +static const struct cal_format_info *find_format_by_pix(struct cal_ctx *ctx, + u32 pixelformat) +{ + const struct cal_format_info *fmtinfo; + unsigned int k; + + for (k = 0; k < ctx->num_active_fmt; k++) { + fmtinfo = ctx->active_fmt[k]; + if (fmtinfo->fourcc == pixelformat) + return fmtinfo; + } + + return NULL; +} + +static const struct cal_format_info *find_format_by_code(struct cal_ctx *ctx, + u32 code) +{ + const struct cal_format_info *fmtinfo; + unsigned int k; + + for (k = 0; k < ctx->num_active_fmt; k++) { + fmtinfo = ctx->active_fmt[k]; + if (fmtinfo->code == code) + return fmtinfo; + } + + return NULL; +} + +static int cal_legacy_enum_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_fmtdesc *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_format_info *fmtinfo; + + if (f->index >= ctx->num_active_fmt) + return -EINVAL; + + fmtinfo = ctx->active_fmt[f->index]; + + f->pixelformat = fmtinfo->fourcc; + f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + return 0; +} + +static int __subdev_get_format(struct cal_ctx *ctx, + struct v4l2_mbus_framefmt *fmt) +{ + struct v4l2_subdev_format sd_fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = 0, + }; + struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format; + int ret; + + ret = v4l2_subdev_call(ctx->phy->source, pad, get_fmt, NULL, &sd_fmt); + if (ret) + return ret; + + *fmt = *mbus_fmt; + + ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__, + fmt->width, fmt->height, fmt->code); + + return 0; +} + +static int __subdev_set_format(struct cal_ctx *ctx, + struct v4l2_mbus_framefmt *fmt) +{ + struct v4l2_subdev_format sd_fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = 0, + }; + struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format; + int ret; + + *mbus_fmt = *fmt; + + ret = v4l2_subdev_call(ctx->phy->source, pad, set_fmt, NULL, &sd_fmt); + if (ret) + return ret; + + ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__, + fmt->width, fmt->height, fmt->code); + + return 0; +} + +static void cal_calc_format_size(struct cal_ctx *ctx, + const struct cal_format_info *fmtinfo, + struct v4l2_format *f) +{ + u32 bpl, max_width; + + /* + * Maximum width is bound by the DMA max width in bytes. + * We need to recalculate the actual maxi width depending on the + * number of bytes per pixels required. + */ + max_width = CAL_MAX_WIDTH_BYTES / (ALIGN(fmtinfo->bpp, 8) >> 3); + v4l_bound_align_image(&f->fmt.pix.width, 48, max_width, 2, + &f->fmt.pix.height, 32, CAL_MAX_HEIGHT_LINES, + 0, 0); + + bpl = (f->fmt.pix.width * ALIGN(fmtinfo->bpp, 8)) >> 3; + f->fmt.pix.bytesperline = ALIGN(bpl, 16); + + f->fmt.pix.sizeimage = f->fmt.pix.height * + f->fmt.pix.bytesperline; + + ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n", + __func__, fourcc_to_str(f->fmt.pix.pixelformat), + f->fmt.pix.width, f->fmt.pix.height, + f->fmt.pix.bytesperline, f->fmt.pix.sizeimage); +} + +static int cal_legacy_try_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_format_info *fmtinfo; + struct v4l2_subdev_frame_size_enum fse = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + int found; + + fmtinfo = find_format_by_pix(ctx, f->fmt.pix.pixelformat); + if (!fmtinfo) { + ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n", + f->fmt.pix.pixelformat); + + /* Just get the first one enumerated */ + fmtinfo = ctx->active_fmt[0]; + f->fmt.pix.pixelformat = fmtinfo->fourcc; + } + + f->fmt.pix.field = ctx->v_fmt.fmt.pix.field; + + /* check for/find a valid width/height */ + found = false; + fse.pad = 0; + fse.code = fmtinfo->code; + for (fse.index = 0; ; fse.index++) { + int ret; + + ret = v4l2_subdev_call(ctx->phy->source, pad, enum_frame_size, + NULL, &fse); + if (ret) + break; + + if ((f->fmt.pix.width == fse.max_width) && + (f->fmt.pix.height == fse.max_height)) { + found = true; + break; + } else if ((f->fmt.pix.width >= fse.min_width) && + (f->fmt.pix.width <= fse.max_width) && + (f->fmt.pix.height >= fse.min_height) && + (f->fmt.pix.height <= fse.max_height)) { + found = true; + break; + } + } + + if (!found) { + /* use existing values as default */ + f->fmt.pix.width = ctx->v_fmt.fmt.pix.width; + f->fmt.pix.height = ctx->v_fmt.fmt.pix.height; + } + + /* + * Use current colorspace for now, it will get + * updated properly during s_fmt + */ + f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace; + cal_calc_format_size(ctx, fmtinfo, f); + return 0; +} + +static int cal_legacy_s_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + struct vb2_queue *q = &ctx->vb_vidq; + struct v4l2_subdev_format sd_fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = CAL_CAMERARX_PAD_SINK, + }; + const struct cal_format_info *fmtinfo; + int ret; + + if (vb2_is_busy(q)) { + ctx_dbg(3, ctx, "%s device busy\n", __func__); + return -EBUSY; + } + + ret = cal_legacy_try_fmt_vid_cap(file, priv, f); + if (ret < 0) + return ret; + + fmtinfo = find_format_by_pix(ctx, f->fmt.pix.pixelformat); + + v4l2_fill_mbus_format(&sd_fmt.format, &f->fmt.pix, fmtinfo->code); + + ret = __subdev_set_format(ctx, &sd_fmt.format); + if (ret) + return ret; + + /* Just double check nothing has gone wrong */ + if (sd_fmt.format.code != fmtinfo->code) { + ctx_dbg(3, ctx, + "%s subdev changed format on us, this should not happen\n", + __func__); + return -EINVAL; + } + + v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &sd_fmt.format); + ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + ctx->v_fmt.fmt.pix.pixelformat = fmtinfo->fourcc; + ctx->v_fmt.fmt.pix.field = sd_fmt.format.field; + cal_calc_format_size(ctx, fmtinfo, &ctx->v_fmt); + + v4l2_subdev_call(&ctx->phy->subdev, pad, set_fmt, NULL, &sd_fmt); + + ctx->fmtinfo = fmtinfo; + *f = ctx->v_fmt; + + return 0; +} + +static int cal_legacy_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_format_info *fmtinfo; + struct v4l2_subdev_frame_size_enum fse = { + .index = fsize->index, + .pad = 0, + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + int ret; + + /* check for valid format */ + fmtinfo = find_format_by_pix(ctx, fsize->pixel_format); + if (!fmtinfo) { + ctx_dbg(3, ctx, "Invalid pixel code: %x\n", + fsize->pixel_format); + return -EINVAL; + } + + fse.code = fmtinfo->code; + + ret = v4l2_subdev_call(ctx->phy->source, pad, enum_frame_size, NULL, + &fse); + if (ret) + return ret; + + ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n", + __func__, fse.index, fse.code, fse.min_width, fse.max_width, + fse.min_height, fse.max_height); + + fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; + fsize->discrete.width = fse.max_width; + fsize->discrete.height = fse.max_height; + + return 0; +} + +static int cal_legacy_enum_input(struct file *file, void *priv, + struct v4l2_input *inp) +{ + if (inp->index > 0) + return -EINVAL; + + inp->type = V4L2_INPUT_TYPE_CAMERA; + sprintf(inp->name, "Camera %u", inp->index); + return 0; +} + +static int cal_legacy_g_input(struct file *file, void *priv, unsigned int *i) +{ + *i = 0; + return 0; +} + +static int cal_legacy_s_input(struct file *file, void *priv, unsigned int i) +{ + return i > 0 ? -EINVAL : 0; +} + +/* timeperframe is arbitrary and continuous */ +static int cal_legacy_enum_frameintervals(struct file *file, void *priv, + struct v4l2_frmivalenum *fival) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_format_info *fmtinfo; + struct v4l2_subdev_frame_interval_enum fie = { + .index = fival->index, + .width = fival->width, + .height = fival->height, + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + int ret; + + fmtinfo = find_format_by_pix(ctx, fival->pixel_format); + if (!fmtinfo) + return -EINVAL; + + fie.code = fmtinfo->code; + ret = v4l2_subdev_call(ctx->phy->source, pad, enum_frame_interval, + NULL, &fie); + if (ret) + return ret; + fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; + fival->discrete = fie.interval; + + return 0; +} + +static int cal_legacy_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a) +{ + struct cal_ctx *ctx = video_drvdata(file); + + return v4l2_g_parm_cap(video_devdata(file), ctx->phy->source, a); +} + +static int cal_legacy_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) +{ + struct cal_ctx *ctx = video_drvdata(file); + + return v4l2_s_parm_cap(video_devdata(file), ctx->phy->source, a); +} + +static const struct v4l2_ioctl_ops cal_ioctl_legacy_ops = { + .vidioc_querycap = cal_querycap, + .vidioc_enum_fmt_vid_cap = cal_legacy_enum_fmt_vid_cap, + .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = cal_legacy_try_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = cal_legacy_s_fmt_vid_cap, + .vidioc_enum_framesizes = cal_legacy_enum_framesizes, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_enum_input = cal_legacy_enum_input, + .vidioc_g_input = cal_legacy_g_input, + .vidioc_s_input = cal_legacy_s_input, + .vidioc_enum_frameintervals = cal_legacy_enum_frameintervals, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_log_status = v4l2_ctrl_log_status, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, + .vidioc_g_parm = cal_legacy_g_parm, + .vidioc_s_parm = cal_legacy_s_parm, +}; + +/* ------------------------------------------------------------------ + * V4L2 Media Controller Centric IOCTLs + * ------------------------------------------------------------------ + */ + +static int cal_mc_enum_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_fmtdesc *f) +{ + unsigned int i; + unsigned int idx; + + if (f->index >= cal_num_formats) + return -EINVAL; + + idx = 0; + + for (i = 0; i < cal_num_formats; ++i) { + if (f->mbus_code && cal_formats[i].code != f->mbus_code) + continue; + + if (idx == f->index) { + f->pixelformat = cal_formats[i].fourcc; + f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + return 0; + } + + idx++; + } + + return -EINVAL; +} + +static void cal_mc_try_fmt(struct cal_ctx *ctx, struct v4l2_format *f, + const struct cal_format_info **info) +{ + struct v4l2_pix_format *format = &f->fmt.pix; + const struct cal_format_info *fmtinfo; + unsigned int bpp; + + /* + * Default to the first format if the requested pixel format code isn't + * supported. + */ + fmtinfo = cal_format_by_fourcc(f->fmt.pix.pixelformat); + if (!fmtinfo) + fmtinfo = &cal_formats[0]; + + /* + * Clamp the size, update the pixel format. The field and colorspace are + * accepted as-is, except for V4L2_FIELD_ANY that is turned into + * V4L2_FIELD_NONE. + */ + bpp = ALIGN(fmtinfo->bpp, 8); + + format->width = clamp_t(unsigned int, format->width, + CAL_MIN_WIDTH_BYTES * 8 / bpp, + CAL_MAX_WIDTH_BYTES * 8 / bpp); + format->height = clamp_t(unsigned int, format->height, + CAL_MIN_HEIGHT_LINES, CAL_MAX_HEIGHT_LINES); + format->pixelformat = fmtinfo->fourcc; + + if (format->field == V4L2_FIELD_ANY) + format->field = V4L2_FIELD_NONE; + + /* + * Calculate the number of bytes per line and the image size. The + * hardware stores the stride as a number of 16 bytes words, in a + * signed 15-bit value. Only 14 bits are thus usable. + */ + format->bytesperline = ALIGN(clamp(format->bytesperline, + format->width * bpp / 8, + ((1U << 14) - 1) * 16), 16); + + format->sizeimage = format->height * format->bytesperline; + + format->colorspace = ctx->v_fmt.fmt.pix.colorspace; + + if (info) + *info = fmtinfo; + + ctx_dbg(3, ctx, "%s: %s %ux%u (bytesperline %u sizeimage %u)\n", + __func__, fourcc_to_str(format->pixelformat), + format->width, format->height, + format->bytesperline, format->sizeimage); +} + +static int cal_mc_try_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + + cal_mc_try_fmt(ctx, f, NULL); + return 0; +} + +static int cal_mc_s_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_format_info *fmtinfo; + + if (vb2_is_busy(&ctx->vb_vidq)) { + ctx_dbg(3, ctx, "%s device busy\n", __func__); + return -EBUSY; + } + + cal_mc_try_fmt(ctx, f, &fmtinfo); + + ctx->v_fmt = *f; + ctx->fmtinfo = fmtinfo; + + return 0; +} + +static int cal_mc_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_format_info *fmtinfo; + unsigned int bpp; + + if (fsize->index > 0) + return -EINVAL; + + fmtinfo = cal_format_by_fourcc(fsize->pixel_format); + if (!fmtinfo) { + ctx_dbg(3, ctx, "Invalid pixel format 0x%08x\n", + fsize->pixel_format); + return -EINVAL; + } + + bpp = ALIGN(fmtinfo->bpp, 8); + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + fsize->stepwise.min_width = CAL_MIN_WIDTH_BYTES * 8 / bpp; + fsize->stepwise.max_width = CAL_MAX_WIDTH_BYTES * 8 / bpp; + fsize->stepwise.step_width = 64 / bpp; + fsize->stepwise.min_height = CAL_MIN_HEIGHT_LINES; + fsize->stepwise.max_height = CAL_MAX_HEIGHT_LINES; + fsize->stepwise.step_height = 1; + + return 0; +} + +static const struct v4l2_ioctl_ops cal_ioctl_mc_ops = { + .vidioc_querycap = cal_querycap, + .vidioc_enum_fmt_vid_cap = cal_mc_enum_fmt_vid_cap, + .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = cal_mc_try_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = cal_mc_s_fmt_vid_cap, + .vidioc_enum_framesizes = cal_mc_enum_framesizes, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_log_status = v4l2_ctrl_log_status, +}; + +/* ------------------------------------------------------------------ + * videobuf2 Common Operations + * ------------------------------------------------------------------ + */ + +static int cal_queue_setup(struct vb2_queue *vq, + unsigned int *nbuffers, unsigned int *nplanes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct cal_ctx *ctx = vb2_get_drv_priv(vq); + unsigned int size = ctx->v_fmt.fmt.pix.sizeimage; + + if (vq->num_buffers + *nbuffers < 3) + *nbuffers = 3 - vq->num_buffers; + + if (*nplanes) { + if (sizes[0] < size) + return -EINVAL; + size = sizes[0]; + } + + *nplanes = 1; + sizes[0] = size; + + ctx_dbg(3, ctx, "nbuffers=%d, size=%d\n", *nbuffers, sizes[0]); + + return 0; +} + +static int cal_buffer_prepare(struct vb2_buffer *vb) +{ + struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + struct cal_buffer *buf = container_of(vb, struct cal_buffer, + vb.vb2_buf); + unsigned long size; + + size = ctx->v_fmt.fmt.pix.sizeimage; + if (vb2_plane_size(vb, 0) < size) { + ctx_err(ctx, + "data will not fit into plane (%lu < %lu)\n", + vb2_plane_size(vb, 0), size); + return -EINVAL; + } + + vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size); + return 0; +} + +static void cal_buffer_queue(struct vb2_buffer *vb) +{ + struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + struct cal_buffer *buf = container_of(vb, struct cal_buffer, + vb.vb2_buf); + unsigned long flags; + + /* recheck locking */ + spin_lock_irqsave(&ctx->dma.lock, flags); + list_add_tail(&buf->list, &ctx->dma.queue); + spin_unlock_irqrestore(&ctx->dma.lock, flags); +} + +static void cal_release_buffers(struct cal_ctx *ctx, + enum vb2_buffer_state state) +{ + struct cal_buffer *buf, *tmp; + + /* Release all queued buffers. */ + spin_lock_irq(&ctx->dma.lock); + + list_for_each_entry_safe(buf, tmp, &ctx->dma.queue, list) { + list_del(&buf->list); + vb2_buffer_done(&buf->vb.vb2_buf, state); + } + + if (ctx->dma.pending) { + vb2_buffer_done(&ctx->dma.pending->vb.vb2_buf, state); + ctx->dma.pending = NULL; + } + + if (ctx->dma.active) { + vb2_buffer_done(&ctx->dma.active->vb.vb2_buf, state); + ctx->dma.active = NULL; + } + + spin_unlock_irq(&ctx->dma.lock); +} + +/* ------------------------------------------------------------------ + * videobuf2 Operations + * ------------------------------------------------------------------ + */ + +static int cal_video_check_format(struct cal_ctx *ctx) +{ + const struct v4l2_mbus_framefmt *format; + struct v4l2_subdev_state *state; + struct media_pad *remote_pad; + int ret = 0; + + remote_pad = media_pad_remote_pad_first(&ctx->pad); + if (!remote_pad) + return -ENODEV; + + state = v4l2_subdev_lock_and_get_active_state(&ctx->phy->subdev); + + format = v4l2_subdev_get_pad_format(&ctx->phy->subdev, state, remote_pad->index); + if (!format) { + ret = -EINVAL; + goto out; + } + + if (ctx->fmtinfo->code != format->code || + ctx->v_fmt.fmt.pix.height != format->height || + ctx->v_fmt.fmt.pix.width != format->width || + ctx->v_fmt.fmt.pix.field != format->field) { + ret = -EPIPE; + goto out; + } + +out: + v4l2_subdev_unlock_state(state); + + return ret; +} + +static int cal_start_streaming(struct vb2_queue *vq, unsigned int count) +{ + struct cal_ctx *ctx = vb2_get_drv_priv(vq); + struct cal_buffer *buf; + dma_addr_t addr; + int ret; + + ret = video_device_pipeline_alloc_start(&ctx->vdev); + if (ret < 0) { + ctx_err(ctx, "Failed to start media pipeline: %d\n", ret); + goto error_release_buffers; + } + + /* + * Verify that the currently configured format matches the output of + * the connected CAMERARX. + */ + ret = cal_video_check_format(ctx); + if (ret < 0) { + ctx_dbg(3, ctx, + "Format mismatch between CAMERARX and video node\n"); + goto error_pipeline; + } + + ret = cal_ctx_prepare(ctx); + if (ret) { + ctx_err(ctx, "Failed to prepare context: %d\n", ret); + goto error_pipeline; + } + + spin_lock_irq(&ctx->dma.lock); + buf = list_first_entry(&ctx->dma.queue, struct cal_buffer, list); + ctx->dma.active = buf; + list_del(&buf->list); + spin_unlock_irq(&ctx->dma.lock); + + addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); + + ret = pm_runtime_resume_and_get(ctx->cal->dev); + if (ret < 0) + goto error_pipeline; + + cal_ctx_set_dma_addr(ctx, addr); + cal_ctx_start(ctx); + + ret = v4l2_subdev_call(&ctx->phy->subdev, video, s_stream, 1); + if (ret) + goto error_stop; + + if (cal_debug >= 4) + cal_quickdump_regs(ctx->cal); + + return 0; + +error_stop: + cal_ctx_stop(ctx); + pm_runtime_put_sync(ctx->cal->dev); + cal_ctx_unprepare(ctx); + +error_pipeline: + video_device_pipeline_stop(&ctx->vdev); +error_release_buffers: + cal_release_buffers(ctx, VB2_BUF_STATE_QUEUED); + + return ret; +} + +static void cal_stop_streaming(struct vb2_queue *vq) +{ + struct cal_ctx *ctx = vb2_get_drv_priv(vq); + + cal_ctx_stop(ctx); + + v4l2_subdev_call(&ctx->phy->subdev, video, s_stream, 0); + + pm_runtime_put_sync(ctx->cal->dev); + + cal_ctx_unprepare(ctx); + + cal_release_buffers(ctx, VB2_BUF_STATE_ERROR); + + video_device_pipeline_stop(&ctx->vdev); +} + +static const struct vb2_ops cal_video_qops = { + .queue_setup = cal_queue_setup, + .buf_prepare = cal_buffer_prepare, + .buf_queue = cal_buffer_queue, + .start_streaming = cal_start_streaming, + .stop_streaming = cal_stop_streaming, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, +}; + +/* ------------------------------------------------------------------ + * V4L2 Initialization and Registration + * ------------------------------------------------------------------ + */ + +static const struct v4l2_file_operations cal_fops = { + .owner = THIS_MODULE, + .open = v4l2_fh_open, + .release = vb2_fop_release, + .poll = vb2_fop_poll, + .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */ + .mmap = vb2_fop_mmap, +}; + +static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx) +{ + struct v4l2_mbus_framefmt mbus_fmt; + const struct cal_format_info *fmtinfo; + unsigned int i, j, k; + int ret = 0; + + /* Enumerate sub device formats and enable all matching local formats */ + ctx->active_fmt = devm_kcalloc(ctx->cal->dev, cal_num_formats, + sizeof(*ctx->active_fmt), GFP_KERNEL); + if (!ctx->active_fmt) + return -ENOMEM; + + ctx->num_active_fmt = 0; + + for (j = 0, i = 0; ; ++j) { + struct v4l2_subdev_mbus_code_enum mbus_code = { + .index = j, + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + + ret = v4l2_subdev_call(ctx->phy->source, pad, enum_mbus_code, + NULL, &mbus_code); + if (ret == -EINVAL) + break; + + if (ret) { + ctx_err(ctx, "Error enumerating mbus codes in subdev %s: %d\n", + ctx->phy->source->name, ret); + return ret; + } + + ctx_dbg(2, ctx, + "subdev %s: code: %04x idx: %u\n", + ctx->phy->source->name, mbus_code.code, j); + + for (k = 0; k < cal_num_formats; k++) { + fmtinfo = &cal_formats[k]; + + if (mbus_code.code == fmtinfo->code) { + ctx->active_fmt[i] = fmtinfo; + ctx_dbg(2, ctx, + "matched fourcc: %s: code: %04x idx: %u\n", + fourcc_to_str(fmtinfo->fourcc), + fmtinfo->code, i); + ctx->num_active_fmt = ++i; + } + } + } + + if (i == 0) { + ctx_err(ctx, "No suitable format reported by subdev %s\n", + ctx->phy->source->name); + return -EINVAL; + } + + ret = __subdev_get_format(ctx, &mbus_fmt); + if (ret) + return ret; + + fmtinfo = find_format_by_code(ctx, mbus_fmt.code); + if (!fmtinfo) { + ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n", + mbus_fmt.code); + return -EINVAL; + } + + /* Save current format */ + v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt); + ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + ctx->v_fmt.fmt.pix.pixelformat = fmtinfo->fourcc; + cal_calc_format_size(ctx, fmtinfo, &ctx->v_fmt); + ctx->fmtinfo = fmtinfo; + + return 0; +} + +static int cal_ctx_v4l2_init_mc_format(struct cal_ctx *ctx) +{ + const struct cal_format_info *fmtinfo; + struct v4l2_pix_format *pix_fmt = &ctx->v_fmt.fmt.pix; + + fmtinfo = cal_format_by_code(MEDIA_BUS_FMT_UYVY8_1X16); + if (!fmtinfo) + return -EINVAL; + + pix_fmt->width = 640; + pix_fmt->height = 480; + pix_fmt->field = V4L2_FIELD_NONE; + pix_fmt->colorspace = V4L2_COLORSPACE_SRGB; + pix_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601; + pix_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE; + pix_fmt->xfer_func = V4L2_XFER_FUNC_SRGB; + pix_fmt->pixelformat = fmtinfo->fourcc; + + ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + /* Save current format */ + cal_calc_format_size(ctx, fmtinfo, &ctx->v_fmt); + ctx->fmtinfo = fmtinfo; + + return 0; +} + +int cal_ctx_v4l2_register(struct cal_ctx *ctx) +{ + struct video_device *vfd = &ctx->vdev; + int ret; + + if (!cal_mc_api) { + struct v4l2_ctrl_handler *hdl = &ctx->ctrl_handler; + + ret = cal_ctx_v4l2_init_formats(ctx); + if (ret) { + ctx_err(ctx, "Failed to init formats: %d\n", ret); + return ret; + } + + ret = v4l2_ctrl_add_handler(hdl, ctx->phy->source->ctrl_handler, + NULL, true); + if (ret < 0) { + ctx_err(ctx, "Failed to add source ctrl handler\n"); + return ret; + } + } else { + ret = cal_ctx_v4l2_init_mc_format(ctx); + if (ret) { + ctx_err(ctx, "Failed to init format: %d\n", ret); + return ret; + } + } + + ret = video_register_device(vfd, VFL_TYPE_VIDEO, cal_video_nr); + if (ret < 0) { + ctx_err(ctx, "Failed to register video device\n"); + return ret; + } + + ret = media_create_pad_link(&ctx->phy->subdev.entity, + CAL_CAMERARX_PAD_FIRST_SOURCE, + &vfd->entity, 0, + MEDIA_LNK_FL_IMMUTABLE | + MEDIA_LNK_FL_ENABLED); + if (ret) { + ctx_err(ctx, "Failed to create media link for context %u\n", + ctx->dma_ctx); + video_unregister_device(vfd); + return ret; + } + + ctx_info(ctx, "V4L2 device registered as %s\n", + video_device_node_name(vfd)); + + return 0; +} + +void cal_ctx_v4l2_unregister(struct cal_ctx *ctx) +{ + ctx_dbg(1, ctx, "unregistering %s\n", + video_device_node_name(&ctx->vdev)); + + video_unregister_device(&ctx->vdev); +} + +int cal_ctx_v4l2_init(struct cal_ctx *ctx) +{ + struct video_device *vfd = &ctx->vdev; + struct vb2_queue *q = &ctx->vb_vidq; + int ret; + + INIT_LIST_HEAD(&ctx->dma.queue); + spin_lock_init(&ctx->dma.lock); + mutex_init(&ctx->mutex); + init_waitqueue_head(&ctx->dma.wait); + + /* Initialize the vb2 queue. */ + q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + q->io_modes = VB2_MMAP | VB2_DMABUF; + q->drv_priv = ctx; + q->buf_struct_size = sizeof(struct cal_buffer); + q->ops = &cal_video_qops; + q->mem_ops = &vb2_dma_contig_memops; + q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + q->lock = &ctx->mutex; + q->min_buffers_needed = 3; + q->dev = ctx->cal->dev; + + ret = vb2_queue_init(q); + if (ret) + return ret; + + /* Initialize the video device and media entity. */ + vfd->fops = &cal_fops; + vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING + | (cal_mc_api ? V4L2_CAP_IO_MC : 0); + vfd->v4l2_dev = &ctx->cal->v4l2_dev; + vfd->queue = q; + snprintf(vfd->name, sizeof(vfd->name), "CAL output %u", ctx->dma_ctx); + vfd->release = video_device_release_empty; + vfd->ioctl_ops = cal_mc_api ? &cal_ioctl_mc_ops : &cal_ioctl_legacy_ops; + vfd->lock = &ctx->mutex; + video_set_drvdata(vfd, ctx); + + ctx->pad.flags = MEDIA_PAD_FL_SINK; + ret = media_entity_pads_init(&vfd->entity, 1, &ctx->pad); + if (ret < 0) + return ret; + + if (!cal_mc_api) { + /* Initialize the control handler. */ + struct v4l2_ctrl_handler *hdl = &ctx->ctrl_handler; + + ret = v4l2_ctrl_handler_init(hdl, 11); + if (ret < 0) { + ctx_err(ctx, "Failed to init ctrl handler\n"); + goto error; + } + + vfd->ctrl_handler = hdl; + } + + return 0; + +error: + media_entity_cleanup(&vfd->entity); + return ret; +} + +void cal_ctx_v4l2_cleanup(struct cal_ctx *ctx) +{ + if (!cal_mc_api) + v4l2_ctrl_handler_free(&ctx->ctrl_handler); + + media_entity_cleanup(&ctx->vdev.entity); +} diff --git a/drivers/media/platform/ti/cal/cal.c b/drivers/media/platform/ti/cal/cal.c new file mode 100644 index 0000000000..528909ae4b --- /dev/null +++ b/drivers/media/platform/ti/cal/cal.c @@ -0,0 +1,1343 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * TI Camera Access Layer (CAL) - Driver + * + * Copyright (c) 2015-2020 Texas Instruments Inc. + * + * Authors: + * Benoit Parrot + * Laurent Pinchart + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "cal.h" +#include "cal_regs.h" + +MODULE_DESCRIPTION("TI CAL driver"); +MODULE_AUTHOR("Benoit Parrot, "); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("0.1.0"); + +int cal_video_nr = -1; +module_param_named(video_nr, cal_video_nr, uint, 0644); +MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect"); + +unsigned int cal_debug; +module_param_named(debug, cal_debug, uint, 0644); +MODULE_PARM_DESC(debug, "activates debug info"); + +#ifdef CONFIG_VIDEO_TI_CAL_MC +#define CAL_MC_API_DEFAULT 1 +#else +#define CAL_MC_API_DEFAULT 0 +#endif + +bool cal_mc_api = CAL_MC_API_DEFAULT; +module_param_named(mc_api, cal_mc_api, bool, 0444); +MODULE_PARM_DESC(mc_api, "activates the MC API"); + +/* ------------------------------------------------------------------ + * Format Handling + * ------------------------------------------------------------------ + */ + +const struct cal_format_info cal_formats[] = { + { + .fourcc = V4L2_PIX_FMT_YUYV, + .code = MEDIA_BUS_FMT_YUYV8_1X16, + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_UYVY, + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_YVYU, + .code = MEDIA_BUS_FMT_YVYU8_1X16, + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_VYUY, + .code = MEDIA_BUS_FMT_VYUY8_1X16, + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_RGB565, + .code = MEDIA_BUS_FMT_RGB565_1X16, + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_SBGGR8, + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .bpp = 8, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG8, + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .bpp = 8, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG8, + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .bpp = 8, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB8, + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .bpp = 8, + }, { + .fourcc = V4L2_PIX_FMT_SBGGR10, + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .bpp = 10, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG10, + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .bpp = 10, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG10, + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .bpp = 10, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB10, + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .bpp = 10, + }, { + .fourcc = V4L2_PIX_FMT_SBGGR12, + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .bpp = 12, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG12, + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .bpp = 12, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG12, + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .bpp = 12, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB12, + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .bpp = 12, + }, +}; + +const unsigned int cal_num_formats = ARRAY_SIZE(cal_formats); + +const struct cal_format_info *cal_format_by_fourcc(u32 fourcc) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(cal_formats); ++i) { + if (cal_formats[i].fourcc == fourcc) + return &cal_formats[i]; + } + + return NULL; +} + +const struct cal_format_info *cal_format_by_code(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(cal_formats); ++i) { + if (cal_formats[i].code == code) + return &cal_formats[i]; + } + + return NULL; +} + +/* ------------------------------------------------------------------ + * Platform Data + * ------------------------------------------------------------------ + */ + +static const struct cal_camerarx_data dra72x_cal_camerarx[] = { + { + .fields = { + [F_CTRLCLKEN] = { 10, 10 }, + [F_CAMMODE] = { 11, 12 }, + [F_LANEENABLE] = { 13, 16 }, + [F_CSI_MODE] = { 17, 17 }, + }, + .num_lanes = 4, + }, + { + .fields = { + [F_CTRLCLKEN] = { 0, 0 }, + [F_CAMMODE] = { 1, 2 }, + [F_LANEENABLE] = { 3, 4 }, + [F_CSI_MODE] = { 5, 5 }, + }, + .num_lanes = 2, + }, +}; + +static const struct cal_data dra72x_cal_data = { + .camerarx = dra72x_cal_camerarx, + .num_csi2_phy = ARRAY_SIZE(dra72x_cal_camerarx), +}; + +static const struct cal_data dra72x_es1_cal_data = { + .camerarx = dra72x_cal_camerarx, + .num_csi2_phy = ARRAY_SIZE(dra72x_cal_camerarx), + .flags = DRA72_CAL_PRE_ES2_LDO_DISABLE, +}; + +static const struct cal_camerarx_data dra76x_cal_csi_phy[] = { + { + .fields = { + [F_CTRLCLKEN] = { 8, 8 }, + [F_CAMMODE] = { 9, 10 }, + [F_CSI_MODE] = { 11, 11 }, + [F_LANEENABLE] = { 27, 31 }, + }, + .num_lanes = 5, + }, + { + .fields = { + [F_CTRLCLKEN] = { 0, 0 }, + [F_CAMMODE] = { 1, 2 }, + [F_CSI_MODE] = { 3, 3 }, + [F_LANEENABLE] = { 24, 26 }, + }, + .num_lanes = 3, + }, +}; + +static const struct cal_data dra76x_cal_data = { + .camerarx = dra76x_cal_csi_phy, + .num_csi2_phy = ARRAY_SIZE(dra76x_cal_csi_phy), +}; + +static const struct cal_camerarx_data am654_cal_csi_phy[] = { + { + .fields = { + [F_CTRLCLKEN] = { 15, 15 }, + [F_CAMMODE] = { 24, 25 }, + [F_LANEENABLE] = { 0, 4 }, + }, + .num_lanes = 5, + }, +}; + +static const struct cal_data am654_cal_data = { + .camerarx = am654_cal_csi_phy, + .num_csi2_phy = ARRAY_SIZE(am654_cal_csi_phy), +}; + +/* ------------------------------------------------------------------ + * I/O Register Accessors + * ------------------------------------------------------------------ + */ + +void cal_quickdump_regs(struct cal_dev *cal) +{ + unsigned int i; + + cal_info(cal, "CAL Registers @ 0x%pa:\n", &cal->res->start); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4, + (__force const void *)cal->base, + resource_size(cal->res), false); + + for (i = 0; i < cal->data->num_csi2_phy; ++i) { + struct cal_camerarx *phy = cal->phy[i]; + + cal_info(cal, "CSI2 Core %u Registers @ %pa:\n", i, + &phy->res->start); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4, + (__force const void *)phy->base, + resource_size(phy->res), + false); + } +} + +/* ------------------------------------------------------------------ + * Context Management + * ------------------------------------------------------------------ + */ + +#define CAL_MAX_PIX_PROC 4 + +static int cal_reserve_pix_proc(struct cal_dev *cal) +{ + unsigned long ret; + + spin_lock(&cal->v4l2_dev.lock); + + ret = find_first_zero_bit(&cal->reserved_pix_proc_mask, CAL_MAX_PIX_PROC); + + if (ret == CAL_MAX_PIX_PROC) { + spin_unlock(&cal->v4l2_dev.lock); + return -ENOSPC; + } + + cal->reserved_pix_proc_mask |= BIT(ret); + + spin_unlock(&cal->v4l2_dev.lock); + + return ret; +} + +static void cal_release_pix_proc(struct cal_dev *cal, unsigned int pix_proc_num) +{ + spin_lock(&cal->v4l2_dev.lock); + + cal->reserved_pix_proc_mask &= ~BIT(pix_proc_num); + + spin_unlock(&cal->v4l2_dev.lock); +} + +static void cal_ctx_csi2_config(struct cal_ctx *ctx) +{ + u32 val; + + val = cal_read(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx)); + cal_set_field(&val, ctx->cport, CAL_CSI2_CTX_CPORT_MASK); + /* + * DT type: MIPI CSI-2 Specs + * 0x1: All - DT filter is disabled + * 0x24: RGB888 1 pixel = 3 bytes + * 0x2B: RAW10 4 pixels = 5 bytes + * 0x2A: RAW8 1 pixel = 1 byte + * 0x1E: YUV422 2 pixels = 4 bytes + */ + cal_set_field(&val, ctx->datatype, CAL_CSI2_CTX_DT_MASK); + cal_set_field(&val, ctx->vc, CAL_CSI2_CTX_VC_MASK); + cal_set_field(&val, ctx->v_fmt.fmt.pix.height, CAL_CSI2_CTX_LINES_MASK); + cal_set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK); + cal_set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE, + CAL_CSI2_CTX_PACK_MODE_MASK); + cal_write(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx), val); + ctx_dbg(3, ctx, "CAL_CSI2_CTX(%u, %u) = 0x%08x\n", + ctx->phy->instance, ctx->csi2_ctx, + cal_read(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx))); +} + +static void cal_ctx_pix_proc_config(struct cal_ctx *ctx) +{ + u32 val, extract, pack; + + switch (ctx->fmtinfo->bpp) { + case 8: + extract = CAL_PIX_PROC_EXTRACT_B8; + pack = CAL_PIX_PROC_PACK_B8; + break; + case 10: + extract = CAL_PIX_PROC_EXTRACT_B10_MIPI; + pack = CAL_PIX_PROC_PACK_B16; + break; + case 12: + extract = CAL_PIX_PROC_EXTRACT_B12_MIPI; + pack = CAL_PIX_PROC_PACK_B16; + break; + case 16: + extract = CAL_PIX_PROC_EXTRACT_B16_LE; + pack = CAL_PIX_PROC_PACK_B16; + break; + default: + /* + * If you see this warning then it means that you added + * some new entry in the cal_formats[] array with a different + * bit per pixel values then the one supported below. + * Either add support for the new bpp value below or adjust + * the new entry to use one of the value below. + * + * Instead of failing here just use 8 bpp as a default. + */ + dev_warn_once(ctx->cal->dev, + "%s:%d:%s: bpp:%d unsupported! Overwritten with 8.\n", + __FILE__, __LINE__, __func__, ctx->fmtinfo->bpp); + extract = CAL_PIX_PROC_EXTRACT_B8; + pack = CAL_PIX_PROC_PACK_B8; + break; + } + + val = cal_read(ctx->cal, CAL_PIX_PROC(ctx->pix_proc)); + cal_set_field(&val, extract, CAL_PIX_PROC_EXTRACT_MASK); + cal_set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK); + cal_set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK); + cal_set_field(&val, pack, CAL_PIX_PROC_PACK_MASK); + cal_set_field(&val, ctx->cport, CAL_PIX_PROC_CPORT_MASK); + cal_set_field(&val, 1, CAL_PIX_PROC_EN_MASK); + cal_write(ctx->cal, CAL_PIX_PROC(ctx->pix_proc), val); + ctx_dbg(3, ctx, "CAL_PIX_PROC(%u) = 0x%08x\n", ctx->pix_proc, + cal_read(ctx->cal, CAL_PIX_PROC(ctx->pix_proc))); +} + +static void cal_ctx_wr_dma_config(struct cal_ctx *ctx) +{ + unsigned int stride = ctx->v_fmt.fmt.pix.bytesperline; + u32 val; + + val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx)); + cal_set_field(&val, ctx->cport, CAL_WR_DMA_CTRL_CPORT_MASK); + cal_set_field(&val, ctx->v_fmt.fmt.pix.height, + CAL_WR_DMA_CTRL_YSIZE_MASK); + cal_set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT, + CAL_WR_DMA_CTRL_DTAG_MASK); + cal_set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR, + CAL_WR_DMA_CTRL_PATTERN_MASK); + cal_set_field(&val, 1, CAL_WR_DMA_CTRL_STALL_RD_MASK); + cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val); + ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->dma_ctx, + cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx))); + + cal_write_field(ctx->cal, CAL_WR_DMA_OFST(ctx->dma_ctx), + stride / 16, CAL_WR_DMA_OFST_MASK); + ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->dma_ctx, + cal_read(ctx->cal, CAL_WR_DMA_OFST(ctx->dma_ctx))); + + val = cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx)); + /* 64 bit word means no skipping */ + cal_set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK); + /* + * The XSIZE field is expressed in 64-bit units and prevents overflows + * in case of synchronization issues by limiting the number of bytes + * written per line. + */ + cal_set_field(&val, stride / 8, CAL_WR_DMA_XSIZE_MASK); + cal_write(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx), val); + ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->dma_ctx, + cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx))); +} + +void cal_ctx_set_dma_addr(struct cal_ctx *ctx, dma_addr_t addr) +{ + cal_write(ctx->cal, CAL_WR_DMA_ADDR(ctx->dma_ctx), addr); +} + +static void cal_ctx_wr_dma_enable(struct cal_ctx *ctx) +{ + u32 val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx)); + + cal_set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST, + CAL_WR_DMA_CTRL_MODE_MASK); + cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val); +} + +static void cal_ctx_wr_dma_disable(struct cal_ctx *ctx) +{ + u32 val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx)); + + cal_set_field(&val, CAL_WR_DMA_CTRL_MODE_DIS, + CAL_WR_DMA_CTRL_MODE_MASK); + cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val); +} + +static bool cal_ctx_wr_dma_stopped(struct cal_ctx *ctx) +{ + bool stopped; + + spin_lock_irq(&ctx->dma.lock); + stopped = ctx->dma.state == CAL_DMA_STOPPED; + spin_unlock_irq(&ctx->dma.lock); + + return stopped; +} + +static int +cal_get_remote_frame_desc_entry(struct cal_ctx *ctx, + struct v4l2_mbus_frame_desc_entry *entry) +{ + struct v4l2_mbus_frame_desc fd; + struct media_pad *phy_source_pad; + int ret; + + phy_source_pad = media_pad_remote_pad_first(&ctx->pad); + if (!phy_source_pad) + return -ENODEV; + + ret = v4l2_subdev_call(&ctx->phy->subdev, pad, get_frame_desc, + phy_source_pad->index, &fd); + if (ret) + return ret; + + if (fd.num_entries != 1) + return -EINVAL; + + *entry = fd.entry[0]; + + return 0; +} + +int cal_ctx_prepare(struct cal_ctx *ctx) +{ + struct v4l2_mbus_frame_desc_entry entry; + int ret; + + ret = cal_get_remote_frame_desc_entry(ctx, &entry); + + if (ret == -ENOIOCTLCMD) { + ctx->vc = 0; + ctx->datatype = CAL_CSI2_CTX_DT_ANY; + } else if (!ret) { + ctx_dbg(2, ctx, "Framedesc: len %u, vc %u, dt %#x\n", + entry.length, entry.bus.csi2.vc, entry.bus.csi2.dt); + + ctx->vc = entry.bus.csi2.vc; + ctx->datatype = entry.bus.csi2.dt; + } else { + return ret; + } + + ctx->use_pix_proc = !ctx->fmtinfo->meta; + + if (ctx->use_pix_proc) { + ret = cal_reserve_pix_proc(ctx->cal); + if (ret < 0) { + ctx_err(ctx, "Failed to reserve pix proc: %d\n", ret); + return ret; + } + + ctx->pix_proc = ret; + } + + return 0; +} + +void cal_ctx_unprepare(struct cal_ctx *ctx) +{ + if (ctx->use_pix_proc) + cal_release_pix_proc(ctx->cal, ctx->pix_proc); +} + +void cal_ctx_start(struct cal_ctx *ctx) +{ + struct cal_camerarx *phy = ctx->phy; + + /* + * Reset the frame number & sequence number, but only if the + * virtual channel is not already in use. + */ + + spin_lock(&phy->vc_lock); + + if (phy->vc_enable_count[ctx->vc]++ == 0) { + phy->vc_frame_number[ctx->vc] = 0; + phy->vc_sequence[ctx->vc] = 0; + } + + spin_unlock(&phy->vc_lock); + + ctx->dma.state = CAL_DMA_RUNNING; + + /* Configure the CSI-2, pixel processing and write DMA contexts. */ + cal_ctx_csi2_config(ctx); + if (ctx->use_pix_proc) + cal_ctx_pix_proc_config(ctx); + cal_ctx_wr_dma_config(ctx); + + /* Enable IRQ_WDMA_END and IRQ_WDMA_START. */ + cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(1), + CAL_HL_IRQ_WDMA_END_MASK(ctx->dma_ctx)); + cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(2), + CAL_HL_IRQ_WDMA_START_MASK(ctx->dma_ctx)); + + cal_ctx_wr_dma_enable(ctx); +} + +void cal_ctx_stop(struct cal_ctx *ctx) +{ + struct cal_camerarx *phy = ctx->phy; + long timeout; + + WARN_ON(phy->vc_enable_count[ctx->vc] == 0); + + spin_lock(&phy->vc_lock); + phy->vc_enable_count[ctx->vc]--; + spin_unlock(&phy->vc_lock); + + /* + * Request DMA stop and wait until it completes. If completion times + * out, forcefully disable the DMA. + */ + spin_lock_irq(&ctx->dma.lock); + ctx->dma.state = CAL_DMA_STOP_REQUESTED; + spin_unlock_irq(&ctx->dma.lock); + + timeout = wait_event_timeout(ctx->dma.wait, cal_ctx_wr_dma_stopped(ctx), + msecs_to_jiffies(500)); + if (!timeout) { + ctx_err(ctx, "failed to disable dma cleanly\n"); + cal_ctx_wr_dma_disable(ctx); + } + + /* Disable IRQ_WDMA_END and IRQ_WDMA_START. */ + cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(1), + CAL_HL_IRQ_WDMA_END_MASK(ctx->dma_ctx)); + cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(2), + CAL_HL_IRQ_WDMA_START_MASK(ctx->dma_ctx)); + + ctx->dma.state = CAL_DMA_STOPPED; + + /* Disable CSI2 context */ + cal_write(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx), 0); + + /* Disable pix proc */ + if (ctx->use_pix_proc) + cal_write(ctx->cal, CAL_PIX_PROC(ctx->pix_proc), 0); +} + +/* ------------------------------------------------------------------ + * IRQ Handling + * ------------------------------------------------------------------ + */ + +/* + * Track a sequence number for each virtual channel, which is shared by + * all contexts using the same virtual channel. This is done using the + * CSI-2 frame number as a base. + */ +static void cal_update_seq_number(struct cal_ctx *ctx) +{ + struct cal_dev *cal = ctx->cal; + struct cal_camerarx *phy = ctx->phy; + u16 prev_frame_num, frame_num; + u8 vc = ctx->vc; + + frame_num = + cal_read(cal, CAL_CSI2_STATUS(phy->instance, ctx->csi2_ctx)) & + 0xffff; + + if (phy->vc_frame_number[vc] != frame_num) { + prev_frame_num = phy->vc_frame_number[vc]; + + if (prev_frame_num >= frame_num) + phy->vc_sequence[vc] += 1; + else + phy->vc_sequence[vc] += frame_num - prev_frame_num; + + phy->vc_frame_number[vc] = frame_num; + } +} + +static inline void cal_irq_wdma_start(struct cal_ctx *ctx) +{ + spin_lock(&ctx->dma.lock); + + if (ctx->dma.state == CAL_DMA_STOP_REQUESTED) { + /* + * If a stop is requested, disable the write DMA context + * immediately. The CAL_WR_DMA_CTRL_j.MODE field is shadowed, + * the current frame will complete and the DMA will then stop. + */ + cal_ctx_wr_dma_disable(ctx); + ctx->dma.state = CAL_DMA_STOP_PENDING; + } else if (!list_empty(&ctx->dma.queue) && !ctx->dma.pending) { + /* + * Otherwise, if a new buffer is available, queue it to the + * hardware. + */ + struct cal_buffer *buf; + dma_addr_t addr; + + buf = list_first_entry(&ctx->dma.queue, struct cal_buffer, + list); + addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); + cal_ctx_set_dma_addr(ctx, addr); + + ctx->dma.pending = buf; + list_del(&buf->list); + } + + spin_unlock(&ctx->dma.lock); + + cal_update_seq_number(ctx); +} + +static inline void cal_irq_wdma_end(struct cal_ctx *ctx) +{ + struct cal_buffer *buf = NULL; + + spin_lock(&ctx->dma.lock); + + /* If the DMA context was stopping, it is now stopped. */ + if (ctx->dma.state == CAL_DMA_STOP_PENDING) { + ctx->dma.state = CAL_DMA_STOPPED; + wake_up(&ctx->dma.wait); + } + + /* If a new buffer was queued, complete the current buffer. */ + if (ctx->dma.pending) { + buf = ctx->dma.active; + ctx->dma.active = ctx->dma.pending; + ctx->dma.pending = NULL; + } + + spin_unlock(&ctx->dma.lock); + + if (buf) { + buf->vb.vb2_buf.timestamp = ktime_get_ns(); + buf->vb.field = ctx->v_fmt.fmt.pix.field; + buf->vb.sequence = ctx->phy->vc_sequence[ctx->vc]; + + vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); + } +} + +static void cal_irq_handle_wdma(struct cal_ctx *ctx, bool start, bool end) +{ + /* + * CAL HW interrupts are inherently racy. If we get both start and end + * interrupts, we don't know what has happened: did the DMA for a single + * frame start and end, or did one frame end and a new frame start? + * + * Usually for normal pixel frames we get the interrupts separately. If + * we do get both, we have to guess. The assumption in the code below is + * that the active vertical area is larger than the blanking vertical + * area, and thus it is more likely that we get the end of the old frame + * and the start of a new frame. + * + * However, for embedded data, which is only a few lines high, we always + * get both interrupts. Here the assumption is that we get both for the + * same frame. + */ + if (ctx->v_fmt.fmt.pix.height < 10) { + if (start) + cal_irq_wdma_start(ctx); + + if (end) + cal_irq_wdma_end(ctx); + } else { + if (end) + cal_irq_wdma_end(ctx); + + if (start) + cal_irq_wdma_start(ctx); + } +} + +static irqreturn_t cal_irq(int irq_cal, void *data) +{ + struct cal_dev *cal = data; + u32 status[3]; + unsigned int i; + + for (i = 0; i < 3; ++i) { + status[i] = cal_read(cal, CAL_HL_IRQSTATUS(i)); + if (status[i]) + cal_write(cal, CAL_HL_IRQSTATUS(i), status[i]); + } + + if (status[0]) { + if (status[0] & CAL_HL_IRQ_OCPO_ERR_MASK) + dev_err_ratelimited(cal->dev, "OCPO ERROR\n"); + + for (i = 0; i < cal->data->num_csi2_phy; ++i) { + if (status[0] & CAL_HL_IRQ_CIO_MASK(i)) { + u32 cio_stat = cal_read(cal, + CAL_CSI2_COMPLEXIO_IRQSTATUS(i)); + + dev_err_ratelimited(cal->dev, + "CIO%u error: %#08x\n", i, cio_stat); + + cal_write(cal, CAL_CSI2_COMPLEXIO_IRQSTATUS(i), + cio_stat); + } + + if (status[0] & CAL_HL_IRQ_VC_MASK(i)) { + u32 vc_stat = cal_read(cal, CAL_CSI2_VC_IRQSTATUS(i)); + + dev_err_ratelimited(cal->dev, + "CIO%u VC error: %#08x\n", + i, vc_stat); + + cal_write(cal, CAL_CSI2_VC_IRQSTATUS(i), vc_stat); + } + } + } + + for (i = 0; i < cal->num_contexts; ++i) { + bool end = !!(status[1] & CAL_HL_IRQ_WDMA_END_MASK(i)); + bool start = !!(status[2] & CAL_HL_IRQ_WDMA_START_MASK(i)); + + if (start || end) + cal_irq_handle_wdma(cal->ctx[i], start, end); + } + + return IRQ_HANDLED; +} + +/* ------------------------------------------------------------------ + * Asynchronous V4L2 subdev binding + * ------------------------------------------------------------------ + */ + +struct cal_v4l2_async_subdev { + struct v4l2_async_connection asd; /* Must be first */ + struct cal_camerarx *phy; +}; + +static inline struct cal_v4l2_async_subdev * +to_cal_asd(struct v4l2_async_connection *asd) +{ + return container_of(asd, struct cal_v4l2_async_subdev, asd); +} + +static int cal_async_notifier_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asd) +{ + struct cal_camerarx *phy = to_cal_asd(asd)->phy; + int pad; + int ret; + + if (phy->source) { + phy_info(phy, "Rejecting subdev %s (Already set!!)", + subdev->name); + return 0; + } + + phy->source = subdev; + phy_dbg(1, phy, "Using source %s for capture\n", subdev->name); + + pad = media_entity_get_fwnode_pad(&subdev->entity, + of_fwnode_handle(phy->source_ep_node), + MEDIA_PAD_FL_SOURCE); + if (pad < 0) { + phy_err(phy, "Source %s has no connected source pad\n", + subdev->name); + return pad; + } + + ret = media_create_pad_link(&subdev->entity, pad, + &phy->subdev.entity, CAL_CAMERARX_PAD_SINK, + MEDIA_LNK_FL_IMMUTABLE | + MEDIA_LNK_FL_ENABLED); + if (ret) { + phy_err(phy, "Failed to create media link for source %s\n", + subdev->name); + return ret; + } + + return 0; +} + +static int cal_async_notifier_complete(struct v4l2_async_notifier *notifier) +{ + struct cal_dev *cal = container_of(notifier, struct cal_dev, notifier); + unsigned int i; + int ret; + + for (i = 0; i < cal->num_contexts; ++i) { + ret = cal_ctx_v4l2_register(cal->ctx[i]); + if (ret) + goto err_ctx_unreg; + } + + if (!cal_mc_api) + return 0; + + ret = v4l2_device_register_subdev_nodes(&cal->v4l2_dev); + if (ret) + goto err_ctx_unreg; + + return 0; + +err_ctx_unreg: + for (; i > 0; --i) { + if (!cal->ctx[i - 1]) + continue; + + cal_ctx_v4l2_unregister(cal->ctx[i - 1]); + } + + return ret; +} + +static const struct v4l2_async_notifier_operations cal_async_notifier_ops = { + .bound = cal_async_notifier_bound, + .complete = cal_async_notifier_complete, +}; + +static int cal_async_notifier_register(struct cal_dev *cal) +{ + unsigned int i; + int ret; + + v4l2_async_nf_init(&cal->notifier, &cal->v4l2_dev); + cal->notifier.ops = &cal_async_notifier_ops; + + for (i = 0; i < cal->data->num_csi2_phy; ++i) { + struct cal_camerarx *phy = cal->phy[i]; + struct cal_v4l2_async_subdev *casd; + struct fwnode_handle *fwnode; + + if (!phy->source_node) + continue; + + fwnode = of_fwnode_handle(phy->source_node); + casd = v4l2_async_nf_add_fwnode(&cal->notifier, + fwnode, + struct cal_v4l2_async_subdev); + if (IS_ERR(casd)) { + phy_err(phy, "Failed to add subdev to notifier\n"); + ret = PTR_ERR(casd); + goto error; + } + + casd->phy = phy; + } + + ret = v4l2_async_nf_register(&cal->notifier); + if (ret) { + cal_err(cal, "Error registering async notifier\n"); + goto error; + } + + return 0; + +error: + v4l2_async_nf_cleanup(&cal->notifier); + return ret; +} + +static void cal_async_notifier_unregister(struct cal_dev *cal) +{ + v4l2_async_nf_unregister(&cal->notifier); + v4l2_async_nf_cleanup(&cal->notifier); +} + +/* ------------------------------------------------------------------ + * Media and V4L2 device handling + * ------------------------------------------------------------------ + */ + +/* + * Register user-facing devices. To be called at the end of the probe function + * when all resources are initialized and ready. + */ +static int cal_media_register(struct cal_dev *cal) +{ + int ret; + + ret = media_device_register(&cal->mdev); + if (ret) { + cal_err(cal, "Failed to register media device\n"); + return ret; + } + + /* + * Register the async notifier. This may trigger registration of the + * V4L2 video devices if all subdevs are ready. + */ + ret = cal_async_notifier_register(cal); + if (ret) { + media_device_unregister(&cal->mdev); + return ret; + } + + return 0; +} + +/* + * Unregister the user-facing devices, but don't free memory yet. To be called + * at the beginning of the remove function, to disallow access from userspace. + */ +static void cal_media_unregister(struct cal_dev *cal) +{ + unsigned int i; + + /* Unregister all the V4L2 video devices. */ + for (i = 0; i < cal->num_contexts; i++) + cal_ctx_v4l2_unregister(cal->ctx[i]); + + cal_async_notifier_unregister(cal); + media_device_unregister(&cal->mdev); +} + +/* + * Initialize the in-kernel objects. To be called at the beginning of the probe + * function, before the V4L2 device is used by the driver. + */ +static int cal_media_init(struct cal_dev *cal) +{ + struct media_device *mdev = &cal->mdev; + int ret; + + mdev->dev = cal->dev; + mdev->hw_revision = cal->revision; + strscpy(mdev->model, "CAL", sizeof(mdev->model)); + media_device_init(mdev); + + /* + * Initialize the V4L2 device (despite the function name, this performs + * initialization, not registration). + */ + cal->v4l2_dev.mdev = mdev; + ret = v4l2_device_register(cal->dev, &cal->v4l2_dev); + if (ret) { + cal_err(cal, "Failed to register V4L2 device\n"); + return ret; + } + + vb2_dma_contig_set_max_seg_size(cal->dev, DMA_BIT_MASK(32)); + + return 0; +} + +/* + * Cleanup the in-kernel objects, freeing memory. To be called at the very end + * of the remove sequence, when nothing (including userspace) can access the + * objects anymore. + */ +static void cal_media_cleanup(struct cal_dev *cal) +{ + v4l2_device_unregister(&cal->v4l2_dev); + media_device_cleanup(&cal->mdev); + + vb2_dma_contig_clear_max_seg_size(cal->dev); +} + +/* ------------------------------------------------------------------ + * Initialization and module stuff + * ------------------------------------------------------------------ + */ + +static struct cal_ctx *cal_ctx_create(struct cal_dev *cal, int inst) +{ + struct cal_ctx *ctx; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + + ctx->cal = cal; + ctx->phy = cal->phy[inst]; + ctx->dma_ctx = inst; + ctx->csi2_ctx = inst; + ctx->cport = inst; + + ret = cal_ctx_v4l2_init(ctx); + if (ret) { + kfree(ctx); + return NULL; + } + + return ctx; +} + +static void cal_ctx_destroy(struct cal_ctx *ctx) +{ + cal_ctx_v4l2_cleanup(ctx); + + kfree(ctx); +} + +static const struct of_device_id cal_of_match[] = { + { + .compatible = "ti,dra72-cal", + .data = (void *)&dra72x_cal_data, + }, + { + .compatible = "ti,dra72-pre-es2-cal", + .data = (void *)&dra72x_es1_cal_data, + }, + { + .compatible = "ti,dra76-cal", + .data = (void *)&dra76x_cal_data, + }, + { + .compatible = "ti,am654-cal", + .data = (void *)&am654_cal_data, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, cal_of_match); + +/* Get hardware revision and info. */ + +#define CAL_HL_HWINFO_VALUE 0xa3c90469 + +static void cal_get_hwinfo(struct cal_dev *cal) +{ + u32 hwinfo; + + cal->revision = cal_read(cal, CAL_HL_REVISION); + switch (FIELD_GET(CAL_HL_REVISION_SCHEME_MASK, cal->revision)) { + case CAL_HL_REVISION_SCHEME_H08: + cal_dbg(3, cal, "CAL HW revision %lu.%lu.%lu (0x%08x)\n", + FIELD_GET(CAL_HL_REVISION_MAJOR_MASK, cal->revision), + FIELD_GET(CAL_HL_REVISION_MINOR_MASK, cal->revision), + FIELD_GET(CAL_HL_REVISION_RTL_MASK, cal->revision), + cal->revision); + break; + + case CAL_HL_REVISION_SCHEME_LEGACY: + default: + cal_info(cal, "Unexpected CAL HW revision 0x%08x\n", + cal->revision); + break; + } + + hwinfo = cal_read(cal, CAL_HL_HWINFO); + if (hwinfo != CAL_HL_HWINFO_VALUE) + cal_info(cal, "CAL_HL_HWINFO = 0x%08x, expected 0x%08x\n", + hwinfo, CAL_HL_HWINFO_VALUE); +} + +static int cal_init_camerarx_regmap(struct cal_dev *cal) +{ + struct platform_device *pdev = to_platform_device(cal->dev); + struct device_node *np = cal->dev->of_node; + struct regmap_config config = { }; + struct regmap *syscon; + struct resource *res; + unsigned int offset; + void __iomem *base; + + syscon = syscon_regmap_lookup_by_phandle_args(np, "ti,camerrx-control", + 1, &offset); + if (!IS_ERR(syscon)) { + cal->syscon_camerrx = syscon; + cal->syscon_camerrx_offset = offset; + return 0; + } + + dev_warn(cal->dev, "failed to get ti,camerrx-control: %ld\n", + PTR_ERR(syscon)); + + /* + * Backward DTS compatibility. If syscon entry is not present then + * check if the camerrx_control resource is present. + */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "camerrx_control"); + base = devm_ioremap_resource(cal->dev, res); + if (IS_ERR(base)) { + cal_err(cal, "failed to ioremap camerrx_control\n"); + return PTR_ERR(base); + } + + cal_dbg(1, cal, "ioresource %s at %pa - %pa\n", + res->name, &res->start, &res->end); + + config.reg_bits = 32; + config.reg_stride = 4; + config.val_bits = 32; + config.max_register = resource_size(res) - 4; + + syscon = regmap_init_mmio(NULL, base, &config); + if (IS_ERR(syscon)) { + pr_err("regmap init failed\n"); + return PTR_ERR(syscon); + } + + /* + * In this case the base already point to the direct CM register so no + * need for an offset. + */ + cal->syscon_camerrx = syscon; + cal->syscon_camerrx_offset = 0; + + return 0; +} + +static int cal_probe(struct platform_device *pdev) +{ + struct cal_dev *cal; + bool connected = false; + unsigned int i; + int ret; + int irq; + + cal = devm_kzalloc(&pdev->dev, sizeof(*cal), GFP_KERNEL); + if (!cal) + return -ENOMEM; + + cal->data = of_device_get_match_data(&pdev->dev); + if (!cal->data) { + dev_err(&pdev->dev, "Could not get feature data based on compatible version\n"); + return -ENODEV; + } + + cal->dev = &pdev->dev; + platform_set_drvdata(pdev, cal); + + /* Acquire resources: clocks, CAMERARX regmap, I/O memory and IRQ. */ + cal->fclk = devm_clk_get(&pdev->dev, "fck"); + if (IS_ERR(cal->fclk)) { + dev_err(&pdev->dev, "cannot get CAL fclk\n"); + return PTR_ERR(cal->fclk); + } + + ret = cal_init_camerarx_regmap(cal); + if (ret < 0) + return ret; + + cal->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "cal_top"); + cal->base = devm_ioremap_resource(&pdev->dev, cal->res); + if (IS_ERR(cal->base)) + return PTR_ERR(cal->base); + + cal_dbg(1, cal, "ioresource %s at %pa - %pa\n", + cal->res->name, &cal->res->start, &cal->res->end); + + irq = platform_get_irq(pdev, 0); + cal_dbg(1, cal, "got irq# %d\n", irq); + ret = devm_request_irq(&pdev->dev, irq, cal_irq, 0, CAL_MODULE_NAME, + cal); + if (ret) + return ret; + + /* Read the revision and hardware info to verify hardware access. */ + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret) + goto error_pm_runtime; + + cal_get_hwinfo(cal); + pm_runtime_put_sync(&pdev->dev); + + /* Initialize the media device. */ + ret = cal_media_init(cal); + if (ret < 0) + goto error_pm_runtime; + + /* Create CAMERARX PHYs. */ + for (i = 0; i < cal->data->num_csi2_phy; ++i) { + cal->phy[i] = cal_camerarx_create(cal, i); + if (IS_ERR(cal->phy[i])) { + ret = PTR_ERR(cal->phy[i]); + cal->phy[i] = NULL; + goto error_camerarx; + } + + if (cal->phy[i]->source_node) + connected = true; + } + + if (!connected) { + cal_err(cal, "Neither port is configured, no point in staying up\n"); + ret = -ENODEV; + goto error_camerarx; + } + + /* Create contexts. */ + for (i = 0; i < cal->data->num_csi2_phy; ++i) { + if (!cal->phy[i]->source_node) + continue; + + cal->ctx[cal->num_contexts] = cal_ctx_create(cal, i); + if (!cal->ctx[cal->num_contexts]) { + cal_err(cal, "Failed to create context %u\n", cal->num_contexts); + ret = -ENODEV; + goto error_context; + } + + cal->num_contexts++; + } + + /* Register the media device. */ + ret = cal_media_register(cal); + if (ret) + goto error_context; + + return 0; + +error_context: + for (i = 0; i < cal->num_contexts; i++) + cal_ctx_destroy(cal->ctx[i]); + +error_camerarx: + for (i = 0; i < cal->data->num_csi2_phy; i++) + cal_camerarx_destroy(cal->phy[i]); + + cal_media_cleanup(cal); + +error_pm_runtime: + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static void cal_remove(struct platform_device *pdev) +{ + struct cal_dev *cal = platform_get_drvdata(pdev); + unsigned int i; + int ret; + + cal_dbg(1, cal, "Removing %s\n", CAL_MODULE_NAME); + + ret = pm_runtime_resume_and_get(&pdev->dev); + + cal_media_unregister(cal); + + for (i = 0; i < cal->data->num_csi2_phy; i++) + cal_camerarx_disable(cal->phy[i]); + + for (i = 0; i < cal->num_contexts; i++) + cal_ctx_destroy(cal->ctx[i]); + + for (i = 0; i < cal->data->num_csi2_phy; i++) + cal_camerarx_destroy(cal->phy[i]); + + cal_media_cleanup(cal); + + if (ret >= 0) + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); +} + +static int cal_runtime_resume(struct device *dev) +{ + struct cal_dev *cal = dev_get_drvdata(dev); + unsigned int i; + u32 val; + + if (cal->data->flags & DRA72_CAL_PRE_ES2_LDO_DISABLE) { + /* + * Apply errata on both port everytime we (re-)enable + * the clock + */ + for (i = 0; i < cal->data->num_csi2_phy; i++) + cal_camerarx_i913_errata(cal->phy[i]); + } + + /* + * Enable global interrupts that are not related to a particular + * CAMERARAX or context. + */ + cal_write(cal, CAL_HL_IRQENABLE_SET(0), CAL_HL_IRQ_OCPO_ERR_MASK); + + val = cal_read(cal, CAL_CTRL); + cal_set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, + CAL_CTRL_BURSTSIZE_MASK); + cal_set_field(&val, 0xf, CAL_CTRL_TAGCNT_MASK); + cal_set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED, + CAL_CTRL_POSTED_WRITES_MASK); + cal_set_field(&val, 0xff, CAL_CTRL_MFLAGL_MASK); + cal_set_field(&val, 0xff, CAL_CTRL_MFLAGH_MASK); + cal_write(cal, CAL_CTRL, val); + cal_dbg(3, cal, "CAL_CTRL = 0x%08x\n", cal_read(cal, CAL_CTRL)); + + return 0; +} + +static const struct dev_pm_ops cal_pm_ops = { + .runtime_resume = cal_runtime_resume, +}; + +static struct platform_driver cal_pdrv = { + .probe = cal_probe, + .remove_new = cal_remove, + .driver = { + .name = CAL_MODULE_NAME, + .pm = &cal_pm_ops, + .of_match_table = cal_of_match, + }, +}; + +module_platform_driver(cal_pdrv); diff --git a/drivers/media/platform/ti/cal/cal.h b/drivers/media/platform/ti/cal/cal.h new file mode 100644 index 0000000000..0856297adc --- /dev/null +++ b/drivers/media/platform/ti/cal/cal.h @@ -0,0 +1,339 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * TI Camera Access Layer (CAL) + * + * Copyright (c) 2015-2020 Texas Instruments Inc. + * + * Authors: + * Benoit Parrot + * Laurent Pinchart + */ +#ifndef __TI_CAL_H__ +#define __TI_CAL_H__ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define CAL_MODULE_NAME "cal" +#define CAL_MAX_NUM_CONTEXT 8 +#define CAL_NUM_CSI2_PORTS 2 + +/* + * The width is limited by the size of the CAL_WR_DMA_XSIZE_j.XSIZE field, + * expressed in multiples of 64 bits. The height is limited by the size of the + * CAL_CSI2_CTXi_j.CTXi_LINES and CAL_WR_DMA_CTRL_j.YSIZE fields, expressed in + * lines. + */ +#define CAL_MIN_WIDTH_BYTES 16 +#define CAL_MAX_WIDTH_BYTES (8192 * 8) +#define CAL_MIN_HEIGHT_LINES 1 +#define CAL_MAX_HEIGHT_LINES 16383 + +#define CAL_CAMERARX_PAD_SINK 0 +#define CAL_CAMERARX_PAD_FIRST_SOURCE 1 +#define CAL_CAMERARX_NUM_SOURCE_PADS 1 +#define CAL_CAMERARX_NUM_PADS (1 + CAL_CAMERARX_NUM_SOURCE_PADS) + +static inline bool cal_rx_pad_is_sink(u32 pad) +{ + /* Camera RX has 1 sink pad, and N source pads */ + return pad == 0; +} + +static inline bool cal_rx_pad_is_source(u32 pad) +{ + /* Camera RX has 1 sink pad, and N source pads */ + return pad >= CAL_CAMERARX_PAD_FIRST_SOURCE && + pad <= CAL_CAMERARX_NUM_SOURCE_PADS; +} + +struct device; +struct device_node; +struct resource; +struct regmap; +struct regmap_fied; + +/* CTRL_CORE_CAMERRX_CONTROL register field id */ +enum cal_camerarx_field { + F_CTRLCLKEN, + F_CAMMODE, + F_LANEENABLE, + F_CSI_MODE, + F_MAX_FIELDS, +}; + +enum cal_dma_state { + CAL_DMA_RUNNING, + CAL_DMA_STOP_REQUESTED, + CAL_DMA_STOP_PENDING, + CAL_DMA_STOPPED, +}; + +struct cal_format_info { + u32 fourcc; + u32 code; + /* Bits per pixel */ + u8 bpp; + bool meta; +}; + +/* buffer for one video frame */ +struct cal_buffer { + /* common v4l buffer stuff -- must be first */ + struct vb2_v4l2_buffer vb; + struct list_head list; +}; + +/** + * struct cal_dmaqueue - Queue of DMA buffers + */ +struct cal_dmaqueue { + /** + * @lock: Protects all fields in the cal_dmaqueue. + */ + spinlock_t lock; + + /** + * @queue: Buffers queued to the driver and waiting for DMA processing. + * Buffers are added to the list by the vb2 .buffer_queue() operation, + * and move to @pending when they are scheduled for the next frame. + */ + struct list_head queue; + /** + * @pending: Buffer provided to the hardware to DMA the next frame. + * Will move to @active at the end of the current frame. + */ + struct cal_buffer *pending; + /** + * @active: Buffer being DMA'ed to for the current frame. Will be + * retired and given back to vb2 at the end of the current frame if + * a @pending buffer has been scheduled to replace it. + */ + struct cal_buffer *active; + + /** @state: State of the DMA engine. */ + enum cal_dma_state state; + /** @wait: Wait queue to signal a @state transition to CAL_DMA_STOPPED. */ + struct wait_queue_head wait; +}; + +struct cal_camerarx_data { + struct { + unsigned int lsb; + unsigned int msb; + } fields[F_MAX_FIELDS]; + unsigned int num_lanes; +}; + +struct cal_data { + const struct cal_camerarx_data *camerarx; + unsigned int num_csi2_phy; + unsigned int flags; +}; + +/* + * The Camera Adaptation Layer (CAL) module is paired with one or more complex + * I/O PHYs (CAMERARX). It contains multiple instances of CSI-2, processing and + * DMA contexts. + * + * The cal_dev structure represents the whole subsystem, including the CAL and + * the CAMERARX instances. Instances of struct cal_dev are named cal through the + * driver. + * + * The cal_camerarx structure represents one CAMERARX instance. Instances of + * cal_camerarx are named phy through the driver. + * + * The cal_ctx structure represents the combination of one CSI-2 context, one + * processing context and one DMA context. Instance of struct cal_ctx are named + * ctx through the driver. + */ + +struct cal_camerarx { + void __iomem *base; + struct resource *res; + struct regmap_field *fields[F_MAX_FIELDS]; + + struct cal_dev *cal; + unsigned int instance; + + struct v4l2_fwnode_endpoint endpoint; + struct device_node *source_ep_node; + struct device_node *source_node; + struct v4l2_subdev *source; + + struct v4l2_subdev subdev; + struct media_pad pads[CAL_CAMERARX_NUM_PADS]; + + /* protects the vc_* fields below */ + spinlock_t vc_lock; + u8 vc_enable_count[4]; + u16 vc_frame_number[4]; + u32 vc_sequence[4]; + + unsigned int enable_count; +}; + +struct cal_dev { + struct clk *fclk; + int irq; + void __iomem *base; + struct resource *res; + struct device *dev; + + const struct cal_data *data; + u32 revision; + + /* Control Module handle */ + struct regmap *syscon_camerrx; + u32 syscon_camerrx_offset; + + /* Camera Core Module handle */ + struct cal_camerarx *phy[CAL_NUM_CSI2_PORTS]; + + u32 num_contexts; + struct cal_ctx *ctx[CAL_MAX_NUM_CONTEXT]; + + struct media_device mdev; + struct v4l2_device v4l2_dev; + struct v4l2_async_notifier notifier; + + unsigned long reserved_pix_proc_mask; +}; + +/* + * There is one cal_ctx structure for each camera core context. + */ +struct cal_ctx { + struct v4l2_ctrl_handler ctrl_handler; + struct video_device vdev; + struct media_pad pad; + + struct cal_dev *cal; + struct cal_camerarx *phy; + + /* v4l2_ioctl mutex */ + struct mutex mutex; + + struct cal_dmaqueue dma; + + /* video capture */ + const struct cal_format_info *fmtinfo; + /* Used to store current pixel format */ + struct v4l2_format v_fmt; + + /* Current subdev enumerated format (legacy) */ + const struct cal_format_info **active_fmt; + unsigned int num_active_fmt; + + struct vb2_queue vb_vidq; + u8 dma_ctx; + u8 cport; + u8 csi2_ctx; + u8 pix_proc; + u8 vc; + u8 datatype; + + bool use_pix_proc; +}; + +extern unsigned int cal_debug; +extern int cal_video_nr; +extern bool cal_mc_api; + +#define cal_dbg(level, cal, fmt, arg...) \ + do { \ + if (cal_debug >= (level)) \ + dev_printk(KERN_DEBUG, (cal)->dev, fmt, ##arg); \ + } while (0) +#define cal_info(cal, fmt, arg...) \ + dev_info((cal)->dev, fmt, ##arg) +#define cal_err(cal, fmt, arg...) \ + dev_err((cal)->dev, fmt, ##arg) + +#define ctx_dbg(level, ctx, fmt, arg...) \ + cal_dbg(level, (ctx)->cal, "ctx%u: " fmt, (ctx)->dma_ctx, ##arg) +#define ctx_info(ctx, fmt, arg...) \ + cal_info((ctx)->cal, "ctx%u: " fmt, (ctx)->dma_ctx, ##arg) +#define ctx_err(ctx, fmt, arg...) \ + cal_err((ctx)->cal, "ctx%u: " fmt, (ctx)->dma_ctx, ##arg) + +#define phy_dbg(level, phy, fmt, arg...) \ + cal_dbg(level, (phy)->cal, "phy%u: " fmt, (phy)->instance, ##arg) +#define phy_info(phy, fmt, arg...) \ + cal_info((phy)->cal, "phy%u: " fmt, (phy)->instance, ##arg) +#define phy_err(phy, fmt, arg...) \ + cal_err((phy)->cal, "phy%u: " fmt, (phy)->instance, ##arg) + +static inline u32 cal_read(struct cal_dev *cal, u32 offset) +{ + return ioread32(cal->base + offset); +} + +static inline void cal_write(struct cal_dev *cal, u32 offset, u32 val) +{ + iowrite32(val, cal->base + offset); +} + +static __always_inline u32 cal_read_field(struct cal_dev *cal, u32 offset, u32 mask) +{ + return FIELD_GET(mask, cal_read(cal, offset)); +} + +static inline void cal_write_field(struct cal_dev *cal, u32 offset, u32 value, + u32 mask) +{ + u32 val = cal_read(cal, offset); + + val &= ~mask; + val |= (value << __ffs(mask)) & mask; + cal_write(cal, offset, val); +} + +static inline void cal_set_field(u32 *valp, u32 field, u32 mask) +{ + u32 val = *valp; + + val &= ~mask; + val |= (field << __ffs(mask)) & mask; + *valp = val; +} + +extern const struct cal_format_info cal_formats[]; +extern const unsigned int cal_num_formats; +const struct cal_format_info *cal_format_by_fourcc(u32 fourcc); +const struct cal_format_info *cal_format_by_code(u32 code); + +void cal_quickdump_regs(struct cal_dev *cal); + +void cal_camerarx_disable(struct cal_camerarx *phy); +void cal_camerarx_i913_errata(struct cal_camerarx *phy); +struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal, + unsigned int instance); +void cal_camerarx_destroy(struct cal_camerarx *phy); + +int cal_ctx_prepare(struct cal_ctx *ctx); +void cal_ctx_unprepare(struct cal_ctx *ctx); +void cal_ctx_set_dma_addr(struct cal_ctx *ctx, dma_addr_t addr); +void cal_ctx_start(struct cal_ctx *ctx); +void cal_ctx_stop(struct cal_ctx *ctx); + +int cal_ctx_v4l2_register(struct cal_ctx *ctx); +void cal_ctx_v4l2_unregister(struct cal_ctx *ctx); +int cal_ctx_v4l2_init(struct cal_ctx *ctx); +void cal_ctx_v4l2_cleanup(struct cal_ctx *ctx); + +#endif /* __TI_CAL_H__ */ diff --git a/drivers/media/platform/ti/cal/cal_regs.h b/drivers/media/platform/ti/cal/cal_regs.h new file mode 100644 index 0000000000..40e4f972fc --- /dev/null +++ b/drivers/media/platform/ti/cal/cal_regs.h @@ -0,0 +1,463 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * TI CAL camera interface driver + * + * Copyright (c) 2015 Texas Instruments Inc. + * + * Benoit Parrot, + */ + +#ifndef __TI_CAL_REGS_H +#define __TI_CAL_REGS_H + +/* + * struct cal_dev.flags possibilities + * + * DRA72_CAL_PRE_ES2_LDO_DISABLE: + * Errata i913: CSI2 LDO Needs to be disabled when module is powered on + * + * Enabling CSI2 LDO shorts it to core supply. It is crucial the 2 CSI2 + * LDOs on the device are disabled if CSI-2 module is powered on + * (0x4845 B304 | 0x4845 B384 [28:27] = 0x1) or in ULPS (0x4845 B304 + * | 0x4845 B384 [28:27] = 0x2) mode. Common concerns include: high + * current draw on the module supply in active mode. + * + * Errata does not apply when CSI-2 module is powered off + * (0x4845 B304 | 0x4845 B384 [28:27] = 0x0). + * + * SW Workaround: + * Set the following register bits to disable the LDO, + * which is essentially CSI2 REG10 bit 6: + * + * Core 0: 0x4845 B828 = 0x0000 0040 + * Core 1: 0x4845 B928 = 0x0000 0040 + */ +#define DRA72_CAL_PRE_ES2_LDO_DISABLE BIT(0) + +/* CAL register offsets */ + +#define CAL_HL_REVISION 0x0000 +#define CAL_HL_HWINFO 0x0004 +#define CAL_HL_SYSCONFIG 0x0010 +#define CAL_HL_IRQ_EOI 0x001c +#define CAL_HL_IRQSTATUS_RAW(m) (0x20U + (m) * 0x10U) +#define CAL_HL_IRQSTATUS(m) (0x24U + (m) * 0x10U) +#define CAL_HL_IRQENABLE_SET(m) (0x28U + (m) * 0x10U) +#define CAL_HL_IRQENABLE_CLR(m) (0x2cU + (m) * 0x10U) +#define CAL_PIX_PROC(m) (0xc0U + (m) * 0x4U) +#define CAL_CTRL 0x100 +#define CAL_CTRL1 0x104 +#define CAL_LINE_NUMBER_EVT 0x108 +#define CAL_VPORT_CTRL1 0x120 +#define CAL_VPORT_CTRL2 0x124 +#define CAL_BYS_CTRL1 0x130 +#define CAL_BYS_CTRL2 0x134 +#define CAL_RD_DMA_CTRL 0x140 +#define CAL_RD_DMA_PIX_ADDR 0x144 +#define CAL_RD_DMA_PIX_OFST 0x148 +#define CAL_RD_DMA_XSIZE 0x14c +#define CAL_RD_DMA_YSIZE 0x150 +#define CAL_RD_DMA_INIT_ADDR 0x154 +#define CAL_RD_DMA_INIT_OFST 0x168 +#define CAL_RD_DMA_CTRL2 0x16c +#define CAL_WR_DMA_CTRL(m) (0x200U + (m) * 0x10U) +#define CAL_WR_DMA_ADDR(m) (0x204U + (m) * 0x10U) +#define CAL_WR_DMA_OFST(m) (0x208U + (m) * 0x10U) +#define CAL_WR_DMA_XSIZE(m) (0x20cU + (m) * 0x10U) +#define CAL_CSI2_PPI_CTRL(m) (0x300U + (m) * 0x80U) +#define CAL_CSI2_COMPLEXIO_CFG(m) (0x304U + (m) * 0x80U) +#define CAL_CSI2_COMPLEXIO_IRQSTATUS(m) (0x308U + (m) * 0x80U) +#define CAL_CSI2_SHORT_PACKET(m) (0x30cU + (m) * 0x80U) +#define CAL_CSI2_COMPLEXIO_IRQENABLE(m) (0x310U + (m) * 0x80U) +#define CAL_CSI2_TIMING(m) (0x314U + (m) * 0x80U) +#define CAL_CSI2_VC_IRQENABLE(m) (0x318U + (m) * 0x80U) +#define CAL_CSI2_VC_IRQSTATUS(m) (0x328U + (m) * 0x80U) +#define CAL_CSI2_CTX(phy, csi2_ctx) (0x330U + (phy) * 0x80U + (csi2_ctx) * 4) +#define CAL_CSI2_STATUS(phy, csi2_ctx) (0x350U + (phy) * 0x80U + (csi2_ctx) * 4) + +/* CAL CSI2 PHY register offsets */ +#define CAL_CSI2_PHY_REG0 0x000 +#define CAL_CSI2_PHY_REG1 0x004 +#define CAL_CSI2_PHY_REG2 0x008 +#define CAL_CSI2_PHY_REG10 0x028 + +/* CAL Control Module Core Camerrx Control register offsets */ +#define CM_CTRL_CORE_CAMERRX_CONTROL 0x000 + +/********************************************************************* +* Field Definition Macros +*********************************************************************/ + +#define CAL_HL_REVISION_MINOR_MASK GENMASK(5, 0) +#define CAL_HL_REVISION_CUSTOM_MASK GENMASK(7, 6) +#define CAL_HL_REVISION_MAJOR_MASK GENMASK(10, 8) +#define CAL_HL_REVISION_RTL_MASK GENMASK(15, 11) +#define CAL_HL_REVISION_FUNC_MASK GENMASK(27, 16) +#define CAL_HL_REVISION_SCHEME_MASK GENMASK(31, 30) +#define CAL_HL_REVISION_SCHEME_H08 1 +#define CAL_HL_REVISION_SCHEME_LEGACY 0 + +#define CAL_HL_HWINFO_WFIFO_MASK GENMASK(3, 0) +#define CAL_HL_HWINFO_RFIFO_MASK GENMASK(7, 4) +#define CAL_HL_HWINFO_PCTX_MASK GENMASK(12, 8) +#define CAL_HL_HWINFO_WCTX_MASK GENMASK(18, 13) +#define CAL_HL_HWINFO_VFIFO_MASK GENMASK(22, 19) +#define CAL_HL_HWINFO_NCPORT_MASK GENMASK(27, 23) +#define CAL_HL_HWINFO_NPPI_CTXS0_MASK GENMASK(29, 28) +#define CAL_HL_HWINFO_NPPI_CTXS1_MASK GENMASK(31, 30) +#define CAL_HL_HWINFO_NPPI_CONTEXTS_ZERO 0 +#define CAL_HL_HWINFO_NPPI_CONTEXTS_FOUR 1 +#define CAL_HL_HWINFO_NPPI_CONTEXTS_EIGHT 2 +#define CAL_HL_HWINFO_NPPI_CONTEXTS_RESERVED 3 + +#define CAL_HL_SYSCONFIG_SOFTRESET_MASK BIT(0) +#define CAL_HL_SYSCONFIG_SOFTRESET_DONE 0x0 +#define CAL_HL_SYSCONFIG_SOFTRESET_PENDING 0x1 +#define CAL_HL_SYSCONFIG_SOFTRESET_NOACTION 0x0 +#define CAL_HL_SYSCONFIG_SOFTRESET_RESET 0x1 +#define CAL_HL_SYSCONFIG_IDLE_MASK GENMASK(3, 2) +#define CAL_HL_SYSCONFIG_IDLEMODE_FORCE 0 +#define CAL_HL_SYSCONFIG_IDLEMODE_NO 1 +#define CAL_HL_SYSCONFIG_IDLEMODE_SMART1 2 +#define CAL_HL_SYSCONFIG_IDLEMODE_SMART2 3 + +#define CAL_HL_IRQ_EOI_LINE_NUMBER_MASK BIT(0) +#define CAL_HL_IRQ_EOI_LINE_NUMBER_READ0 0 +#define CAL_HL_IRQ_EOI_LINE_NUMBER_EOI0 0 + +#define CAL_HL_IRQ_WDMA_END_MASK(m) BIT(m) +#define CAL_HL_IRQ_WDMA_START_MASK(m) BIT(m) + +#define CAL_HL_IRQ_OCPO_ERR_MASK BIT(6) + +#define CAL_HL_IRQ_CIO_MASK(i) BIT(16 + (i) * 8) +#define CAL_HL_IRQ_VC_MASK(i) BIT(17 + (i) * 8) + +#define CAL_PIX_PROC_EN_MASK BIT(0) +#define CAL_PIX_PROC_EXTRACT_MASK GENMASK(4, 1) +#define CAL_PIX_PROC_EXTRACT_B6 0x0 +#define CAL_PIX_PROC_EXTRACT_B7 0x1 +#define CAL_PIX_PROC_EXTRACT_B8 0x2 +#define CAL_PIX_PROC_EXTRACT_B10 0x3 +#define CAL_PIX_PROC_EXTRACT_B10_MIPI 0x4 +#define CAL_PIX_PROC_EXTRACT_B12 0x5 +#define CAL_PIX_PROC_EXTRACT_B12_MIPI 0x6 +#define CAL_PIX_PROC_EXTRACT_B14 0x7 +#define CAL_PIX_PROC_EXTRACT_B14_MIPI 0x8 +#define CAL_PIX_PROC_EXTRACT_B16_BE 0x9 +#define CAL_PIX_PROC_EXTRACT_B16_LE 0xa +#define CAL_PIX_PROC_DPCMD_MASK GENMASK(9, 5) +#define CAL_PIX_PROC_DPCMD_BYPASS 0x0 +#define CAL_PIX_PROC_DPCMD_DPCM_10_8_1 0x2 +#define CAL_PIX_PROC_DPCMD_DPCM_12_8_1 0x8 +#define CAL_PIX_PROC_DPCMD_DPCM_10_7_1 0x4 +#define CAL_PIX_PROC_DPCMD_DPCM_10_7_2 0x5 +#define CAL_PIX_PROC_DPCMD_DPCM_10_6_1 0x6 +#define CAL_PIX_PROC_DPCMD_DPCM_10_6_2 0x7 +#define CAL_PIX_PROC_DPCMD_DPCM_12_7_1 0xa +#define CAL_PIX_PROC_DPCMD_DPCM_12_6_1 0xc +#define CAL_PIX_PROC_DPCMD_DPCM_14_10 0xe +#define CAL_PIX_PROC_DPCMD_DPCM_14_8_1 0x10 +#define CAL_PIX_PROC_DPCMD_DPCM_16_12_1 0x12 +#define CAL_PIX_PROC_DPCMD_DPCM_16_10_1 0x14 +#define CAL_PIX_PROC_DPCMD_DPCM_16_8_1 0x16 +#define CAL_PIX_PROC_DPCME_MASK GENMASK(15, 11) +#define CAL_PIX_PROC_DPCME_BYPASS 0x0 +#define CAL_PIX_PROC_DPCME_DPCM_10_8_1 0x2 +#define CAL_PIX_PROC_DPCME_DPCM_12_8_1 0x8 +#define CAL_PIX_PROC_DPCME_DPCM_14_10 0xe +#define CAL_PIX_PROC_DPCME_DPCM_14_8_1 0x10 +#define CAL_PIX_PROC_DPCME_DPCM_16_12_1 0x12 +#define CAL_PIX_PROC_DPCME_DPCM_16_10_1 0x14 +#define CAL_PIX_PROC_DPCME_DPCM_16_8_1 0x16 +#define CAL_PIX_PROC_PACK_MASK GENMASK(18, 16) +#define CAL_PIX_PROC_PACK_B8 0x0 +#define CAL_PIX_PROC_PACK_B10_MIPI 0x2 +#define CAL_PIX_PROC_PACK_B12 0x3 +#define CAL_PIX_PROC_PACK_B12_MIPI 0x4 +#define CAL_PIX_PROC_PACK_B16 0x5 +#define CAL_PIX_PROC_PACK_ARGB 0x6 +#define CAL_PIX_PROC_CPORT_MASK GENMASK(23, 19) + +#define CAL_CTRL_POSTED_WRITES_MASK BIT(0) +#define CAL_CTRL_POSTED_WRITES_NONPOSTED 0 +#define CAL_CTRL_POSTED_WRITES 1 +#define CAL_CTRL_TAGCNT_MASK GENMASK(4, 1) +#define CAL_CTRL_BURSTSIZE_MASK GENMASK(6, 5) +#define CAL_CTRL_BURSTSIZE_BURST16 0x0 +#define CAL_CTRL_BURSTSIZE_BURST32 0x1 +#define CAL_CTRL_BURSTSIZE_BURST64 0x2 +#define CAL_CTRL_BURSTSIZE_BURST128 0x3 +#define CAL_CTRL_LL_FORCE_STATE_MASK GENMASK(12, 7) +#define CAL_CTRL_MFLAGL_MASK GENMASK(20, 13) +#define CAL_CTRL_PWRSCPCLK_MASK BIT(21) +#define CAL_CTRL_PWRSCPCLK_AUTO 0 +#define CAL_CTRL_PWRSCPCLK_FORCE 1 +#define CAL_CTRL_RD_DMA_STALL_MASK BIT(22) +#define CAL_CTRL_MFLAGH_MASK GENMASK(31, 24) + +#define CAL_CTRL1_PPI_GROUPING_MASK GENMASK(1, 0) +#define CAL_CTRL1_PPI_GROUPING_DISABLED 0 +#define CAL_CTRL1_PPI_GROUPING_RESERVED 1 +#define CAL_CTRL1_PPI_GROUPING_0 2 +#define CAL_CTRL1_PPI_GROUPING_1 3 +#define CAL_CTRL1_INTERLEAVE01_MASK GENMASK(3, 2) +#define CAL_CTRL1_INTERLEAVE01_DISABLED 0 +#define CAL_CTRL1_INTERLEAVE01_PIX1 1 +#define CAL_CTRL1_INTERLEAVE01_PIX4 2 +#define CAL_CTRL1_INTERLEAVE01_RESERVED 3 +#define CAL_CTRL1_INTERLEAVE23_MASK GENMASK(5, 4) +#define CAL_CTRL1_INTERLEAVE23_DISABLED 0 +#define CAL_CTRL1_INTERLEAVE23_PIX1 1 +#define CAL_CTRL1_INTERLEAVE23_PIX4 2 +#define CAL_CTRL1_INTERLEAVE23_RESERVED 3 + +#define CAL_LINE_NUMBER_EVT_CPORT_MASK GENMASK(4, 0) +#define CAL_LINE_NUMBER_EVT_MASK GENMASK(29, 16) + +#define CAL_VPORT_CTRL1_PCLK_MASK GENMASK(16, 0) +#define CAL_VPORT_CTRL1_XBLK_MASK GENMASK(24, 17) +#define CAL_VPORT_CTRL1_YBLK_MASK GENMASK(30, 25) +#define CAL_VPORT_CTRL1_WIDTH_MASK BIT(31) +#define CAL_VPORT_CTRL1_WIDTH_ONE 0 +#define CAL_VPORT_CTRL1_WIDTH_TWO 1 + +#define CAL_VPORT_CTRL2_CPORT_MASK GENMASK(4, 0) +#define CAL_VPORT_CTRL2_FREERUNNING_MASK BIT(15) +#define CAL_VPORT_CTRL2_FREERUNNING_GATED 0 +#define CAL_VPORT_CTRL2_FREERUNNING_FREE 1 +#define CAL_VPORT_CTRL2_FS_RESETS_MASK BIT(16) +#define CAL_VPORT_CTRL2_FS_RESETS_NO 0 +#define CAL_VPORT_CTRL2_FS_RESETS_YES 1 +#define CAL_VPORT_CTRL2_FSM_RESET_MASK BIT(17) +#define CAL_VPORT_CTRL2_FSM_RESET_NOEFFECT 0 +#define CAL_VPORT_CTRL2_FSM_RESET 1 +#define CAL_VPORT_CTRL2_RDY_THR_MASK GENMASK(31, 18) + +#define CAL_BYS_CTRL1_PCLK_MASK GENMASK(16, 0) +#define CAL_BYS_CTRL1_XBLK_MASK GENMASK(24, 17) +#define CAL_BYS_CTRL1_YBLK_MASK GENMASK(30, 25) +#define CAL_BYS_CTRL1_BYSINEN_MASK BIT(31) + +#define CAL_BYS_CTRL2_CPORTIN_MASK GENMASK(4, 0) +#define CAL_BYS_CTRL2_CPORTOUT_MASK GENMASK(9, 5) +#define CAL_BYS_CTRL2_DUPLICATEDDATA_MASK BIT(10) +#define CAL_BYS_CTRL2_DUPLICATEDDATA_NO 0 +#define CAL_BYS_CTRL2_DUPLICATEDDATA_YES 1 +#define CAL_BYS_CTRL2_FREERUNNING_MASK BIT(11) +#define CAL_BYS_CTRL2_FREERUNNING_NO 0 +#define CAL_BYS_CTRL2_FREERUNNING_YES 1 + +#define CAL_RD_DMA_CTRL_GO_MASK BIT(0) +#define CAL_RD_DMA_CTRL_GO_DIS 0 +#define CAL_RD_DMA_CTRL_GO_EN 1 +#define CAL_RD_DMA_CTRL_GO_IDLE 0 +#define CAL_RD_DMA_CTRL_GO_BUSY 1 +#define CAL_RD_DMA_CTRL_INIT_MASK BIT(1) +#define CAL_RD_DMA_CTRL_BW_LIMITER_MASK GENMASK(10, 2) +#define CAL_RD_DMA_CTRL_OCP_TAG_CNT_MASK GENMASK(14, 11) +#define CAL_RD_DMA_CTRL_PCLK_MASK GENMASK(31, 15) + +#define CAL_RD_DMA_PIX_ADDR_MASK GENMASK(31, 3) + +#define CAL_RD_DMA_PIX_OFST_MASK GENMASK(31, 4) + +#define CAL_RD_DMA_XSIZE_MASK GENMASK(31, 19) + +#define CAL_RD_DMA_YSIZE_MASK GENMASK(29, 16) + +#define CAL_RD_DMA_INIT_ADDR_MASK GENMASK(31, 3) + +#define CAL_RD_DMA_INIT_OFST_MASK GENMASK(31, 3) + +#define CAL_RD_DMA_CTRL2_CIRC_MODE_MASK GENMASK(2, 0) +#define CAL_RD_DMA_CTRL2_CIRC_MODE_DIS 0 +#define CAL_RD_DMA_CTRL2_CIRC_MODE_ONE 1 +#define CAL_RD_DMA_CTRL2_CIRC_MODE_FOUR 2 +#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTEEN 3 +#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTYFOUR 4 +#define CAL_RD_DMA_CTRL2_CIRC_MODE_RESERVED 5 +#define CAL_RD_DMA_CTRL2_ICM_CSTART_MASK BIT(3) +#define CAL_RD_DMA_CTRL2_PATTERN_MASK GENMASK(5, 4) +#define CAL_RD_DMA_CTRL2_PATTERN_LINEAR 0 +#define CAL_RD_DMA_CTRL2_PATTERN_YUV420 1 +#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP2 2 +#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP4 3 +#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_MASK BIT(6) +#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_FREERUNNING 0 +#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_WAITFORBYSOUT 1 +#define CAL_RD_DMA_CTRL2_CIRC_SIZE_MASK GENMASK(29, 16) + +#define CAL_WR_DMA_CTRL_MODE_MASK GENMASK(2, 0) +#define CAL_WR_DMA_CTRL_MODE_DIS 0 +#define CAL_WR_DMA_CTRL_MODE_SHD 1 +#define CAL_WR_DMA_CTRL_MODE_CNT 2 +#define CAL_WR_DMA_CTRL_MODE_CNT_INIT 3 +#define CAL_WR_DMA_CTRL_MODE_CONST 4 +#define CAL_WR_DMA_CTRL_MODE_RESERVED 5 +#define CAL_WR_DMA_CTRL_PATTERN_MASK GENMASK(4, 3) +#define CAL_WR_DMA_CTRL_PATTERN_LINEAR 0 +#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP2 2 +#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP4 3 +#define CAL_WR_DMA_CTRL_PATTERN_RESERVED 1 +#define CAL_WR_DMA_CTRL_ICM_PSTART_MASK BIT(5) +#define CAL_WR_DMA_CTRL_DTAG_MASK GENMASK(8, 6) +#define CAL_WR_DMA_CTRL_DTAG_ATT_HDR 0 +#define CAL_WR_DMA_CTRL_DTAG_ATT_DAT 1 +#define CAL_WR_DMA_CTRL_DTAG 2 +#define CAL_WR_DMA_CTRL_DTAG_PIX_HDR 3 +#define CAL_WR_DMA_CTRL_DTAG_PIX_DAT 4 +#define CAL_WR_DMA_CTRL_DTAG_D5 5 +#define CAL_WR_DMA_CTRL_DTAG_D6 6 +#define CAL_WR_DMA_CTRL_DTAG_D7 7 +#define CAL_WR_DMA_CTRL_CPORT_MASK GENMASK(13, 9) +#define CAL_WR_DMA_CTRL_STALL_RD_MASK BIT(14) +#define CAL_WR_DMA_CTRL_YSIZE_MASK GENMASK(31, 18) + +#define CAL_WR_DMA_ADDR_MASK GENMASK(31, 4) + +#define CAL_WR_DMA_OFST_MASK GENMASK(18, 4) +#define CAL_WR_DMA_OFST_CIRC_MODE_MASK GENMASK(23, 22) +#define CAL_WR_DMA_OFST_CIRC_MODE_ONE 1 +#define CAL_WR_DMA_OFST_CIRC_MODE_FOUR 2 +#define CAL_WR_DMA_OFST_CIRC_MODE_SIXTYFOUR 3 +#define CAL_WR_DMA_OFST_CIRC_MODE_DISABLED 0 +#define CAL_WR_DMA_OFST_CIRC_SIZE_MASK GENMASK(31, 24) + +#define CAL_WR_DMA_XSIZE_XSKIP_MASK GENMASK(15, 3) +#define CAL_WR_DMA_XSIZE_MASK GENMASK(31, 19) + +#define CAL_CSI2_PPI_CTRL_IF_EN_MASK BIT(0) +#define CAL_CSI2_PPI_CTRL_ECC_EN_MASK BIT(2) +#define CAL_CSI2_PPI_CTRL_FRAME_MASK BIT(3) +#define CAL_CSI2_PPI_CTRL_FRAME_IMMEDIATE 0 +#define CAL_CSI2_PPI_CTRL_FRAME 1 + +#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK GENMASK(2, 0) +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_5 5 +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_4 4 +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_3 3 +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_2 2 +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_1 1 +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_NOT_USED 0 +#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK BIT(3) +#define CAL_CSI2_COMPLEXIO_CFG_POL_PLUSMINUS 0 +#define CAL_CSI2_COMPLEXIO_CFG_POL_MINUSPLUS 1 +#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POSITION_MASK GENMASK(6, 4) +#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POL_MASK BIT(7) +#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POSITION_MASK GENMASK(10, 8) +#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POL_MASK BIT(11) +#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POSITION_MASK GENMASK(14, 12) +#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POL_MASK BIT(15) +#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POSITION_MASK GENMASK(18, 16) +#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POL_MASK BIT(19) +#define CAL_CSI2_COMPLEXIO_CFG_PWR_AUTO_MASK BIT(24) +#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK GENMASK(26, 25) +#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_OFF 0 +#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON 1 +#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ULP 2 +#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK GENMASK(28, 27) +#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF 0 +#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON 1 +#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ULP 2 +#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK BIT(29) +#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED 1 +#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETONGOING 0 +#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK BIT(30) +#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL 0 +#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL 1 + +#define CAL_CSI2_SHORT_PACKET_MASK GENMASK(23, 0) + +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS1_MASK BIT(0) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS2_MASK BIT(1) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS3_MASK BIT(2) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS4_MASK BIT(3) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS5_MASK BIT(4) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1_MASK BIT(5) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2_MASK BIT(6) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3_MASK BIT(7) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4_MASK BIT(8) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5_MASK BIT(9) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC1_MASK BIT(10) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC2_MASK BIT(11) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC3_MASK BIT(12) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC4_MASK BIT(13) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC5_MASK BIT(14) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL1_MASK BIT(15) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL2_MASK BIT(16) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL3_MASK BIT(17) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL4_MASK BIT(18) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL5_MASK BIT(19) +#define CAL_CSI2_COMPLEXIO_IRQ_LANE_ERRORS_MASK GENMASK(19, 0) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM1_MASK BIT(20) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM2_MASK BIT(21) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM3_MASK BIT(22) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM4_MASK BIT(23) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM5_MASK BIT(24) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER_MASK BIT(25) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT_MASK BIT(26) +#define CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK BIT(27) +#define CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK BIT(28) +#define CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK BIT(30) + +#define CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK GENMASK(12, 0) +#define CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK BIT(13) +#define CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK BIT(14) +#define CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK BIT(15) + +#define CAL_CSI2_VC_IRQ_FS_IRQ_MASK(n) BIT(0 + ((n) * 8)) +#define CAL_CSI2_VC_IRQ_FE_IRQ_MASK(n) BIT(1 + ((n) * 8)) +#define CAL_CSI2_VC_IRQ_LS_IRQ_MASK(n) BIT(2 + ((n) * 8)) +#define CAL_CSI2_VC_IRQ_LE_IRQ_MASK(n) BIT(3 + ((n) * 8)) +#define CAL_CSI2_VC_IRQ_CS_IRQ_MASK(n) BIT(4 + ((n) * 8)) +#define CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(n) BIT(5 + ((n) * 8)) + +#define CAL_CSI2_CTX_DT_MASK GENMASK(5, 0) +#define CAL_CSI2_CTX_DT_DISABLED 0 +#define CAL_CSI2_CTX_DT_ANY 1 +#define CAL_CSI2_CTX_VC_MASK GENMASK(7, 6) +#define CAL_CSI2_CTX_CPORT_MASK GENMASK(12, 8) +#define CAL_CSI2_CTX_ATT_MASK BIT(13) +#define CAL_CSI2_CTX_ATT_PIX 0 +#define CAL_CSI2_CTX_ATT 1 +#define CAL_CSI2_CTX_PACK_MODE_MASK BIT(14) +#define CAL_CSI2_CTX_PACK_MODE_LINE 0 +#define CAL_CSI2_CTX_PACK_MODE_FRAME 1 +#define CAL_CSI2_CTX_LINES_MASK GENMASK(29, 16) + +#define CAL_CSI2_STATUS_FRAME_MASK GENMASK(15, 0) + +#define CAL_CSI2_PHY_REG0_THS_SETTLE_MASK GENMASK(7, 0) +#define CAL_CSI2_PHY_REG0_THS_TERM_MASK GENMASK(15, 8) +#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK BIT(24) +#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE 1 +#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_ENABLE 0 + +#define CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK GENMASK(7, 0) +#define CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK GENMASK(9, 8) +#define CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK GENMASK(17, 10) +#define CAL_CSI2_PHY_REG1_TCLK_TERM_MASK GENMASK(24, 18) +#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_MASK BIT(25) +#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_ERROR 1 +#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_SUCCESS 0 +#define CAL_CSI2_PHY_REG1_RESET_DONE_STATUS_MASK GENMASK(29, 28) + +#define CAL_CSI2_PHY_REG10_I933_LDO_DISABLE_MASK BIT(6) + +#define CAL_CSI2_PHY_REG2_CCP2_SYNC_PATTERN_MASK GENMASK(23, 0) +#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC3_MASK GENMASK(25, 24) +#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC2_MASK GENMASK(27, 26) +#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC1_MASK GENMASK(29, 28) +#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC0_MASK GENMASK(31, 30) + +#define CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK BIT(0) +#define CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK GENMASK(2, 1) +#define CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK GENMASK(4, 3) +#define CM_CAMERRX_CTRL_CSI1_MODE_MASK BIT(5) +#define CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK BIT(10) +#define CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK GENMASK(12, 11) +#define CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK GENMASK(16, 13) +#define CM_CAMERRX_CTRL_CSI0_MODE_MASK BIT(17) + +#endif diff --git a/drivers/media/platform/ti/davinci/Kconfig b/drivers/media/platform/ti/davinci/Kconfig new file mode 100644 index 0000000000..542a602e66 --- /dev/null +++ b/drivers/media/platform/ti/davinci/Kconfig @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0-only +config VIDEO_DAVINCI_VPIF_DISPLAY + tristate "TI DaVinci VPIF V4L2-Display driver" + depends on V4L_PLATFORM_DRIVERS + depends on VIDEO_DEV + depends on ARCH_DAVINCI || COMPILE_TEST + depends on I2C + select VIDEOBUF2_DMA_CONTIG + select VIDEO_ADV7343 if MEDIA_SUBDRV_AUTOSELECT + select VIDEO_THS7303 if MEDIA_SUBDRV_AUTOSELECT + help + Enables Davinci VPIF module used for display devices. + This module is used for display on TI DM6467/DA850/OMAPL138 + SoCs. + + To compile this driver as a module, choose M here. There will + be two modules called vpif.ko and vpif_display.ko + +config VIDEO_DAVINCI_VPIF_CAPTURE + tristate "TI DaVinci VPIF video capture driver" + depends on V4L_PLATFORM_DRIVERS + depends on VIDEO_DEV + depends on ARCH_DAVINCI || COMPILE_TEST + depends on I2C + select VIDEOBUF2_DMA_CONTIG + select V4L2_FWNODE + help + Enables Davinci VPIF module used for capture devices. + This module is used for capture on TI DM6467/DA850/OMAPL138 + SoCs. + + To compile this driver as a module, choose M here. There will + be two modules called vpif.ko and vpif_capture.ko diff --git a/drivers/media/platform/ti/davinci/Makefile b/drivers/media/platform/ti/davinci/Makefile new file mode 100644 index 0000000000..512f03369b --- /dev/null +++ b/drivers/media/platform/ti/davinci/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the davinci video device drivers. +# + +#VPIF Display driver +obj-$(CONFIG_VIDEO_DAVINCI_VPIF_DISPLAY) += vpif.o vpif_display.o +#VPIF Capture driver +obj-$(CONFIG_VIDEO_DAVINCI_VPIF_CAPTURE) += vpif.o vpif_capture.o diff --git a/drivers/media/platform/ti/davinci/vpif.c b/drivers/media/platform/ti/davinci/vpif.c new file mode 100644 index 0000000000..63cdfed37b --- /dev/null +++ b/drivers/media/platform/ti/davinci/vpif.c @@ -0,0 +1,608 @@ +/* + * vpif - Video Port Interface driver + * VPIF is a receiver and transmitter for video data. It has two channels(0, 1) + * that receiving video byte stream and two channels(2, 3) for video output. + * The hardware supports SDTV, HDTV formats, raw data capture. + * Currently, the driver supports NTSC and PAL standards. + * + * Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/ + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed .as is. WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vpif.h" + +MODULE_DESCRIPTION("TI DaVinci Video Port Interface driver"); +MODULE_LICENSE("GPL"); + +#define VPIF_DRIVER_NAME "vpif" +MODULE_ALIAS("platform:" VPIF_DRIVER_NAME); + +#define VPIF_CH0_MAX_MODES 22 +#define VPIF_CH1_MAX_MODES 2 +#define VPIF_CH2_MAX_MODES 15 +#define VPIF_CH3_MAX_MODES 2 + +struct vpif_data { + struct platform_device *capture; + struct platform_device *display; +}; + +DEFINE_SPINLOCK(vpif_lock); +EXPORT_SYMBOL_GPL(vpif_lock); + +void __iomem *vpif_base; +EXPORT_SYMBOL_GPL(vpif_base); + +/* + * vpif_ch_params: video standard configuration parameters for vpif + * + * The table must include all presets from supported subdevices. + */ +const struct vpif_channel_config_params vpif_ch_params[] = { + /* HDTV formats */ + { + .name = "480p59_94", + .width = 720, + .height = 480, + .frm_fmt = 1, + .ycmux_mode = 0, + .eav2sav = 138-8, + .sav2eav = 720, + .l1 = 1, + .l3 = 43, + .l5 = 523, + .vsize = 525, + .capture_format = 0, + .vbi_supported = 0, + .hd_sd = 1, + .dv_timings = V4L2_DV_BT_CEA_720X480P59_94, + }, + { + .name = "576p50", + .width = 720, + .height = 576, + .frm_fmt = 1, + .ycmux_mode = 0, + .eav2sav = 144-8, + .sav2eav = 720, + .l1 = 1, + .l3 = 45, + .l5 = 621, + .vsize = 625, + .capture_format = 0, + .vbi_supported = 0, + .hd_sd = 1, + .dv_timings = V4L2_DV_BT_CEA_720X576P50, + }, + { + .name = "720p50", + .width = 1280, + .height = 720, + .frm_fmt = 1, + .ycmux_mode = 0, + .eav2sav = 700-8, + .sav2eav = 1280, + .l1 = 1, + .l3 = 26, + .l5 = 746, + .vsize = 750, + .capture_format = 0, + .vbi_supported = 0, + .hd_sd = 1, + .dv_timings = V4L2_DV_BT_CEA_1280X720P50, + }, + { + .name = "720p60", + .width = 1280, + .height = 720, + .frm_fmt = 1, + .ycmux_mode = 0, + .eav2sav = 370 - 8, + .sav2eav = 1280, + .l1 = 1, + .l3 = 26, + .l5 = 746, + .vsize = 750, + .capture_format = 0, + .vbi_supported = 0, + .hd_sd = 1, + .dv_timings = V4L2_DV_BT_CEA_1280X720P60, + }, + { + .name = "1080I50", + .width = 1920, + .height = 1080, + .frm_fmt = 0, + .ycmux_mode = 0, + .eav2sav = 720 - 8, + .sav2eav = 1920, + .l1 = 1, + .l3 = 21, + .l5 = 561, + .l7 = 563, + .l9 = 584, + .l11 = 1124, + .vsize = 1125, + .capture_format = 0, + .vbi_supported = 0, + .hd_sd = 1, + .dv_timings = V4L2_DV_BT_CEA_1920X1080I50, + }, + { + .name = "1080I60", + .width = 1920, + .height = 1080, + .frm_fmt = 0, + .ycmux_mode = 0, + .eav2sav = 280 - 8, + .sav2eav = 1920, + .l1 = 1, + .l3 = 21, + .l5 = 561, + .l7 = 563, + .l9 = 584, + .l11 = 1124, + .vsize = 1125, + .capture_format = 0, + .vbi_supported = 0, + .hd_sd = 1, + .dv_timings = V4L2_DV_BT_CEA_1920X1080I60, + }, + { + .name = "1080p60", + .width = 1920, + .height = 1080, + .frm_fmt = 1, + .ycmux_mode = 0, + .eav2sav = 280 - 8, + .sav2eav = 1920, + .l1 = 1, + .l3 = 42, + .l5 = 1122, + .vsize = 1125, + .capture_format = 0, + .vbi_supported = 0, + .hd_sd = 1, + .dv_timings = V4L2_DV_BT_CEA_1920X1080P60, + }, + + /* SDTV formats */ + { + .name = "NTSC_M", + .width = 720, + .height = 480, + .frm_fmt = 0, + .ycmux_mode = 1, + .eav2sav = 268, + .sav2eav = 1440, + .l1 = 1, + .l3 = 23, + .l5 = 263, + .l7 = 266, + .l9 = 286, + .l11 = 525, + .vsize = 525, + .capture_format = 0, + .vbi_supported = 1, + .hd_sd = 0, + .stdid = V4L2_STD_525_60, + }, + { + .name = "PAL_BDGHIK", + .width = 720, + .height = 576, + .frm_fmt = 0, + .ycmux_mode = 1, + .eav2sav = 280, + .sav2eav = 1440, + .l1 = 1, + .l3 = 23, + .l5 = 311, + .l7 = 313, + .l9 = 336, + .l11 = 624, + .vsize = 625, + .capture_format = 0, + .vbi_supported = 1, + .hd_sd = 0, + .stdid = V4L2_STD_625_50, + }, +}; +EXPORT_SYMBOL_GPL(vpif_ch_params); + +const unsigned int vpif_ch_params_count = ARRAY_SIZE(vpif_ch_params); +EXPORT_SYMBOL_GPL(vpif_ch_params_count); + +static inline void vpif_wr_bit(u32 reg, u32 bit, u32 val) +{ + if (val) + vpif_set_bit(reg, bit); + else + vpif_clr_bit(reg, bit); +} + +/* This structure is used to keep track of VPIF size register's offsets */ +struct vpif_registers { + u32 h_cfg, v_cfg_00, v_cfg_01, v_cfg_02, v_cfg, ch_ctrl; + u32 line_offset, vanc0_strt, vanc0_size, vanc1_strt; + u32 vanc1_size, width_mask, len_mask; + u8 max_modes; +}; + +static const struct vpif_registers vpifregs[VPIF_NUM_CHANNELS] = { + /* Channel0 */ + { + VPIF_CH0_H_CFG, VPIF_CH0_V_CFG_00, VPIF_CH0_V_CFG_01, + VPIF_CH0_V_CFG_02, VPIF_CH0_V_CFG_03, VPIF_CH0_CTRL, + VPIF_CH0_IMG_ADD_OFST, 0, 0, 0, 0, 0x1FFF, 0xFFF, + VPIF_CH0_MAX_MODES, + }, + /* Channel1 */ + { + VPIF_CH1_H_CFG, VPIF_CH1_V_CFG_00, VPIF_CH1_V_CFG_01, + VPIF_CH1_V_CFG_02, VPIF_CH1_V_CFG_03, VPIF_CH1_CTRL, + VPIF_CH1_IMG_ADD_OFST, 0, 0, 0, 0, 0x1FFF, 0xFFF, + VPIF_CH1_MAX_MODES, + }, + /* Channel2 */ + { + VPIF_CH2_H_CFG, VPIF_CH2_V_CFG_00, VPIF_CH2_V_CFG_01, + VPIF_CH2_V_CFG_02, VPIF_CH2_V_CFG_03, VPIF_CH2_CTRL, + VPIF_CH2_IMG_ADD_OFST, VPIF_CH2_VANC0_STRT, VPIF_CH2_VANC0_SIZE, + VPIF_CH2_VANC1_STRT, VPIF_CH2_VANC1_SIZE, 0x7FF, 0x7FF, + VPIF_CH2_MAX_MODES + }, + /* Channel3 */ + { + VPIF_CH3_H_CFG, VPIF_CH3_V_CFG_00, VPIF_CH3_V_CFG_01, + VPIF_CH3_V_CFG_02, VPIF_CH3_V_CFG_03, VPIF_CH3_CTRL, + VPIF_CH3_IMG_ADD_OFST, VPIF_CH3_VANC0_STRT, VPIF_CH3_VANC0_SIZE, + VPIF_CH3_VANC1_STRT, VPIF_CH3_VANC1_SIZE, 0x7FF, 0x7FF, + VPIF_CH3_MAX_MODES + }, +}; + +/* vpif_set_mode_info: + * This function is used to set horizontal and vertical config parameters + * As per the standard in the channel, configure the values of L1, L3, + * L5, L7 L9, L11 in VPIF Register , also write width and height + */ +static void vpif_set_mode_info(const struct vpif_channel_config_params *config, + u8 channel_id, u8 config_channel_id) +{ + u32 value; + + value = (config->eav2sav & vpifregs[config_channel_id].width_mask); + value <<= VPIF_CH_LEN_SHIFT; + value |= (config->sav2eav & vpifregs[config_channel_id].width_mask); + regw(value, vpifregs[channel_id].h_cfg); + + value = (config->l1 & vpifregs[config_channel_id].len_mask); + value <<= VPIF_CH_LEN_SHIFT; + value |= (config->l3 & vpifregs[config_channel_id].len_mask); + regw(value, vpifregs[channel_id].v_cfg_00); + + value = (config->l5 & vpifregs[config_channel_id].len_mask); + value <<= VPIF_CH_LEN_SHIFT; + value |= (config->l7 & vpifregs[config_channel_id].len_mask); + regw(value, vpifregs[channel_id].v_cfg_01); + + value = (config->l9 & vpifregs[config_channel_id].len_mask); + value <<= VPIF_CH_LEN_SHIFT; + value |= (config->l11 & vpifregs[config_channel_id].len_mask); + regw(value, vpifregs[channel_id].v_cfg_02); + + value = (config->vsize & vpifregs[config_channel_id].len_mask); + regw(value, vpifregs[channel_id].v_cfg); +} + +/* config_vpif_params + * Function to set the parameters of a channel + * Mainly modifies the channel ciontrol register + * It sets frame format, yc mux mode + */ +static void config_vpif_params(struct vpif_params *vpifparams, + u8 channel_id, u8 found) +{ + const struct vpif_channel_config_params *config = &vpifparams->std_info; + u32 value, ch_nip, reg; + u8 start, end; + int i; + + start = channel_id; + end = channel_id + found; + + for (i = start; i < end; i++) { + reg = vpifregs[i].ch_ctrl; + if (channel_id < 2) + ch_nip = VPIF_CAPTURE_CH_NIP; + else + ch_nip = VPIF_DISPLAY_CH_NIP; + + vpif_wr_bit(reg, ch_nip, config->frm_fmt); + vpif_wr_bit(reg, VPIF_CH_YC_MUX_BIT, config->ycmux_mode); + vpif_wr_bit(reg, VPIF_CH_INPUT_FIELD_FRAME_BIT, + vpifparams->video_params.storage_mode); + + /* Set raster scanning SDR Format */ + vpif_clr_bit(reg, VPIF_CH_SDR_FMT_BIT); + vpif_wr_bit(reg, VPIF_CH_DATA_MODE_BIT, config->capture_format); + + if (channel_id > 1) /* Set the Pixel enable bit */ + vpif_set_bit(reg, VPIF_DISPLAY_PIX_EN_BIT); + else if (config->capture_format) { + /* Set the polarity of various pins */ + vpif_wr_bit(reg, VPIF_CH_FID_POLARITY_BIT, + vpifparams->iface.fid_pol); + vpif_wr_bit(reg, VPIF_CH_V_VALID_POLARITY_BIT, + vpifparams->iface.vd_pol); + vpif_wr_bit(reg, VPIF_CH_H_VALID_POLARITY_BIT, + vpifparams->iface.hd_pol); + + value = regr(reg); + /* Set data width */ + value &= ~(0x3u << + VPIF_CH_DATA_WIDTH_BIT); + value |= ((vpifparams->params.data_sz) << + VPIF_CH_DATA_WIDTH_BIT); + regw(value, reg); + } + + /* Write the pitch in the driver */ + regw((vpifparams->video_params.hpitch), + vpifregs[i].line_offset); + } +} + +/* vpif_set_video_params + * This function is used to set video parameters in VPIF register + */ +int vpif_set_video_params(struct vpif_params *vpifparams, u8 channel_id) +{ + const struct vpif_channel_config_params *config = &vpifparams->std_info; + int found = 1; + + vpif_set_mode_info(config, channel_id, channel_id); + if (!config->ycmux_mode) { + /* YC are on separate channels (HDTV formats) */ + vpif_set_mode_info(config, channel_id + 1, channel_id); + found = 2; + } + + config_vpif_params(vpifparams, channel_id, found); + + regw(0x80, VPIF_REQ_SIZE); + regw(0x01, VPIF_EMULATION_CTRL); + + return found; +} +EXPORT_SYMBOL(vpif_set_video_params); + +void vpif_set_vbi_display_params(struct vpif_vbi_params *vbiparams, + u8 channel_id) +{ + u32 value; + + value = 0x3F8 & (vbiparams->hstart0); + value |= 0x3FFFFFF & ((vbiparams->vstart0) << 16); + regw(value, vpifregs[channel_id].vanc0_strt); + + value = 0x3F8 & (vbiparams->hstart1); + value |= 0x3FFFFFF & ((vbiparams->vstart1) << 16); + regw(value, vpifregs[channel_id].vanc1_strt); + + value = 0x3F8 & (vbiparams->hsize0); + value |= 0x3FFFFFF & ((vbiparams->vsize0) << 16); + regw(value, vpifregs[channel_id].vanc0_size); + + value = 0x3F8 & (vbiparams->hsize1); + value |= 0x3FFFFFF & ((vbiparams->vsize1) << 16); + regw(value, vpifregs[channel_id].vanc1_size); + +} +EXPORT_SYMBOL(vpif_set_vbi_display_params); + +int vpif_channel_getfid(u8 channel_id) +{ + return (regr(vpifregs[channel_id].ch_ctrl) & VPIF_CH_FID_MASK) + >> VPIF_CH_FID_SHIFT; +} +EXPORT_SYMBOL(vpif_channel_getfid); + +static void vpif_pdev_release(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + + kfree(pdev); +} + +static int vpif_probe(struct platform_device *pdev) +{ + static struct resource res_irq; + struct platform_device *pdev_capture, *pdev_display; + struct device_node *endpoint = NULL; + struct vpif_data *data; + int ret; + int irq; + + vpif_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(vpif_base)) + return PTR_ERR(vpif_base); + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + platform_set_drvdata(pdev, data); + + pm_runtime_enable(&pdev->dev); + pm_runtime_get(&pdev->dev); + + /* + * If VPIF Node has endpoints, assume "new" DT support, + * where capture and display drivers don't have DT nodes + * so their devices need to be registered manually here + * for their legacy platform_drivers to work. + */ + endpoint = of_graph_get_next_endpoint(pdev->dev.of_node, + endpoint); + if (!endpoint) + return 0; + of_node_put(endpoint); + + /* + * For DT platforms, manually create platform_devices for + * capture/display drivers. + */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto err_put_rpm; + } + res_irq = DEFINE_RES_IRQ_NAMED(irq, of_node_full_name(pdev->dev.of_node)); + res_irq.flags |= irq_get_trigger_type(irq); + + pdev_capture = kzalloc(sizeof(*pdev_capture), GFP_KERNEL); + if (!pdev_capture) { + ret = -ENOMEM; + goto err_put_rpm; + } + + pdev_capture->name = "vpif_capture"; + pdev_capture->id = -1; + pdev_capture->resource = &res_irq; + pdev_capture->num_resources = 1; + pdev_capture->dev.dma_mask = pdev->dev.dma_mask; + pdev_capture->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; + pdev_capture->dev.parent = &pdev->dev; + pdev_capture->dev.release = vpif_pdev_release; + + ret = platform_device_register(pdev_capture); + if (ret) + goto err_put_pdev_capture; + + pdev_display = kzalloc(sizeof(*pdev_display), GFP_KERNEL); + if (!pdev_display) { + ret = -ENOMEM; + goto err_put_pdev_capture; + } + + pdev_display->name = "vpif_display"; + pdev_display->id = -1; + pdev_display->resource = &res_irq; + pdev_display->num_resources = 1; + pdev_display->dev.dma_mask = pdev->dev.dma_mask; + pdev_display->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; + pdev_display->dev.parent = &pdev->dev; + pdev_display->dev.release = vpif_pdev_release; + + ret = platform_device_register(pdev_display); + if (ret) + goto err_put_pdev_display; + + data->capture = pdev_capture; + data->display = pdev_display; + + return 0; + +err_put_pdev_display: + platform_device_put(pdev_display); +err_put_pdev_capture: + platform_device_put(pdev_capture); +err_put_rpm: + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + kfree(data); + + return ret; +} + +static void vpif_remove(struct platform_device *pdev) +{ + struct vpif_data *data = platform_get_drvdata(pdev); + + if (data->capture) + platform_device_unregister(data->capture); + if (data->display) + platform_device_unregister(data->display); + + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + kfree(data); +} + +#ifdef CONFIG_PM +static int vpif_suspend(struct device *dev) +{ + pm_runtime_put(dev); + return 0; +} + +static int vpif_resume(struct device *dev) +{ + pm_runtime_get(dev); + return 0; +} + +static const struct dev_pm_ops vpif_pm = { + .suspend = vpif_suspend, + .resume = vpif_resume, +}; + +#define vpif_pm_ops (&vpif_pm) +#else +#define vpif_pm_ops NULL +#endif + +#if IS_ENABLED(CONFIG_OF) +static const struct of_device_id vpif_of_match[] = { + { .compatible = "ti,da850-vpif", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, vpif_of_match); +#endif + +static struct platform_driver vpif_driver = { + .driver = { + .of_match_table = of_match_ptr(vpif_of_match), + .name = VPIF_DRIVER_NAME, + .pm = vpif_pm_ops, + }, + .remove_new = vpif_remove, + .probe = vpif_probe, +}; + +static void vpif_exit(void) +{ + platform_driver_unregister(&vpif_driver); +} + +static int __init vpif_init(void) +{ + return platform_driver_register(&vpif_driver); +} +subsys_initcall(vpif_init); +module_exit(vpif_exit); + diff --git a/drivers/media/platform/ti/davinci/vpif.h b/drivers/media/platform/ti/davinci/vpif.h new file mode 100644 index 0000000000..52ecc25622 --- /dev/null +++ b/drivers/media/platform/ti/davinci/vpif.h @@ -0,0 +1,679 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * VPIF header file + * + * Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/ + */ + +#ifndef VPIF_H +#define VPIF_H + +#include +#include +#include + +/* Maximum channel allowed */ +#define VPIF_NUM_CHANNELS (4) +#define VPIF_CAPTURE_NUM_CHANNELS (2) +#define VPIF_DISPLAY_NUM_CHANNELS (2) + +/* Macros to read/write registers */ +extern void __iomem *vpif_base; +extern spinlock_t vpif_lock; + +#define regr(reg) readl((reg) + vpif_base) +#define regw(value, reg) writel(value, (reg + vpif_base)) + +/* Register Address Offsets */ +#define VPIF_PID (0x0000) +#define VPIF_CH0_CTRL (0x0004) +#define VPIF_CH1_CTRL (0x0008) +#define VPIF_CH2_CTRL (0x000C) +#define VPIF_CH3_CTRL (0x0010) + +#define VPIF_INTEN (0x0020) +#define VPIF_INTEN_SET (0x0024) +#define VPIF_INTEN_CLR (0x0028) +#define VPIF_STATUS (0x002C) +#define VPIF_STATUS_CLR (0x0030) +#define VPIF_EMULATION_CTRL (0x0034) +#define VPIF_REQ_SIZE (0x0038) + +#define VPIF_CH0_TOP_STRT_ADD_LUMA (0x0040) +#define VPIF_CH0_BTM_STRT_ADD_LUMA (0x0044) +#define VPIF_CH0_TOP_STRT_ADD_CHROMA (0x0048) +#define VPIF_CH0_BTM_STRT_ADD_CHROMA (0x004c) +#define VPIF_CH0_TOP_STRT_ADD_HANC (0x0050) +#define VPIF_CH0_BTM_STRT_ADD_HANC (0x0054) +#define VPIF_CH0_TOP_STRT_ADD_VANC (0x0058) +#define VPIF_CH0_BTM_STRT_ADD_VANC (0x005c) +#define VPIF_CH0_SP_CFG (0x0060) +#define VPIF_CH0_IMG_ADD_OFST (0x0064) +#define VPIF_CH0_HANC_ADD_OFST (0x0068) +#define VPIF_CH0_H_CFG (0x006c) +#define VPIF_CH0_V_CFG_00 (0x0070) +#define VPIF_CH0_V_CFG_01 (0x0074) +#define VPIF_CH0_V_CFG_02 (0x0078) +#define VPIF_CH0_V_CFG_03 (0x007c) + +#define VPIF_CH1_TOP_STRT_ADD_LUMA (0x0080) +#define VPIF_CH1_BTM_STRT_ADD_LUMA (0x0084) +#define VPIF_CH1_TOP_STRT_ADD_CHROMA (0x0088) +#define VPIF_CH1_BTM_STRT_ADD_CHROMA (0x008c) +#define VPIF_CH1_TOP_STRT_ADD_HANC (0x0090) +#define VPIF_CH1_BTM_STRT_ADD_HANC (0x0094) +#define VPIF_CH1_TOP_STRT_ADD_VANC (0x0098) +#define VPIF_CH1_BTM_STRT_ADD_VANC (0x009c) +#define VPIF_CH1_SP_CFG (0x00a0) +#define VPIF_CH1_IMG_ADD_OFST (0x00a4) +#define VPIF_CH1_HANC_ADD_OFST (0x00a8) +#define VPIF_CH1_H_CFG (0x00ac) +#define VPIF_CH1_V_CFG_00 (0x00b0) +#define VPIF_CH1_V_CFG_01 (0x00b4) +#define VPIF_CH1_V_CFG_02 (0x00b8) +#define VPIF_CH1_V_CFG_03 (0x00bc) + +#define VPIF_CH2_TOP_STRT_ADD_LUMA (0x00c0) +#define VPIF_CH2_BTM_STRT_ADD_LUMA (0x00c4) +#define VPIF_CH2_TOP_STRT_ADD_CHROMA (0x00c8) +#define VPIF_CH2_BTM_STRT_ADD_CHROMA (0x00cc) +#define VPIF_CH2_TOP_STRT_ADD_HANC (0x00d0) +#define VPIF_CH2_BTM_STRT_ADD_HANC (0x00d4) +#define VPIF_CH2_TOP_STRT_ADD_VANC (0x00d8) +#define VPIF_CH2_BTM_STRT_ADD_VANC (0x00dc) +#define VPIF_CH2_SP_CFG (0x00e0) +#define VPIF_CH2_IMG_ADD_OFST (0x00e4) +#define VPIF_CH2_HANC_ADD_OFST (0x00e8) +#define VPIF_CH2_H_CFG (0x00ec) +#define VPIF_CH2_V_CFG_00 (0x00f0) +#define VPIF_CH2_V_CFG_01 (0x00f4) +#define VPIF_CH2_V_CFG_02 (0x00f8) +#define VPIF_CH2_V_CFG_03 (0x00fc) +#define VPIF_CH2_HANC0_STRT (0x0100) +#define VPIF_CH2_HANC0_SIZE (0x0104) +#define VPIF_CH2_HANC1_STRT (0x0108) +#define VPIF_CH2_HANC1_SIZE (0x010c) +#define VPIF_CH2_VANC0_STRT (0x0110) +#define VPIF_CH2_VANC0_SIZE (0x0114) +#define VPIF_CH2_VANC1_STRT (0x0118) +#define VPIF_CH2_VANC1_SIZE (0x011c) + +#define VPIF_CH3_TOP_STRT_ADD_LUMA (0x0140) +#define VPIF_CH3_BTM_STRT_ADD_LUMA (0x0144) +#define VPIF_CH3_TOP_STRT_ADD_CHROMA (0x0148) +#define VPIF_CH3_BTM_STRT_ADD_CHROMA (0x014c) +#define VPIF_CH3_TOP_STRT_ADD_HANC (0x0150) +#define VPIF_CH3_BTM_STRT_ADD_HANC (0x0154) +#define VPIF_CH3_TOP_STRT_ADD_VANC (0x0158) +#define VPIF_CH3_BTM_STRT_ADD_VANC (0x015c) +#define VPIF_CH3_SP_CFG (0x0160) +#define VPIF_CH3_IMG_ADD_OFST (0x0164) +#define VPIF_CH3_HANC_ADD_OFST (0x0168) +#define VPIF_CH3_H_CFG (0x016c) +#define VPIF_CH3_V_CFG_00 (0x0170) +#define VPIF_CH3_V_CFG_01 (0x0174) +#define VPIF_CH3_V_CFG_02 (0x0178) +#define VPIF_CH3_V_CFG_03 (0x017c) +#define VPIF_CH3_HANC0_STRT (0x0180) +#define VPIF_CH3_HANC0_SIZE (0x0184) +#define VPIF_CH3_HANC1_STRT (0x0188) +#define VPIF_CH3_HANC1_SIZE (0x018c) +#define VPIF_CH3_VANC0_STRT (0x0190) +#define VPIF_CH3_VANC0_SIZE (0x0194) +#define VPIF_CH3_VANC1_STRT (0x0198) +#define VPIF_CH3_VANC1_SIZE (0x019c) + +#define VPIF_IODFT_CTRL (0x01c0) + +/* Functions for bit Manipulation */ +static inline void vpif_set_bit(u32 reg, u32 bit) +{ + regw((regr(reg)) | (0x01 << bit), reg); +} + +static inline void vpif_clr_bit(u32 reg, u32 bit) +{ + regw(((regr(reg)) & ~(0x01 << bit)), reg); +} + +/* Macro for Generating mask */ +#ifdef GENERATE_MASK +#undef GENERATE_MASK +#endif + +#define GENERATE_MASK(bits, pos) \ + ((((0xFFFFFFFF) << (32 - bits)) >> (32 - bits)) << pos) + +/* Bit positions in the channel control registers */ +#define VPIF_CH_DATA_MODE_BIT (2) +#define VPIF_CH_YC_MUX_BIT (3) +#define VPIF_CH_SDR_FMT_BIT (4) +#define VPIF_CH_HANC_EN_BIT (8) +#define VPIF_CH_VANC_EN_BIT (9) + +#define VPIF_CAPTURE_CH_NIP (10) +#define VPIF_DISPLAY_CH_NIP (11) + +#define VPIF_DISPLAY_PIX_EN_BIT (10) + +#define VPIF_CH_INPUT_FIELD_FRAME_BIT (12) + +#define VPIF_CH_FID_POLARITY_BIT (15) +#define VPIF_CH_V_VALID_POLARITY_BIT (14) +#define VPIF_CH_H_VALID_POLARITY_BIT (13) +#define VPIF_CH_DATA_WIDTH_BIT (28) + +#define VPIF_CH_CLK_EDGE_CTRL_BIT (31) + +/* Mask various length */ +#define VPIF_CH_EAVSAV_MASK GENERATE_MASK(13, 0) +#define VPIF_CH_LEN_MASK GENERATE_MASK(12, 0) +#define VPIF_CH_WIDTH_MASK GENERATE_MASK(13, 0) +#define VPIF_CH_LEN_SHIFT (16) + +/* VPIF masks for registers */ +#define VPIF_REQ_SIZE_MASK (0x1ff) + +/* bit posotion of interrupt vpif_ch_intr register */ +#define VPIF_INTEN_FRAME_CH0 (0x00000001) +#define VPIF_INTEN_FRAME_CH1 (0x00000002) +#define VPIF_INTEN_FRAME_CH2 (0x00000004) +#define VPIF_INTEN_FRAME_CH3 (0x00000008) + +/* bit position of clock and channel enable in vpif_chn_ctrl register */ + +#define VPIF_CH0_CLK_EN (0x00000002) +#define VPIF_CH0_EN (0x00000001) +#define VPIF_CH1_CLK_EN (0x00000002) +#define VPIF_CH1_EN (0x00000001) +#define VPIF_CH2_CLK_EN (0x00000002) +#define VPIF_CH2_EN (0x00000001) +#define VPIF_CH3_CLK_EN (0x00000002) +#define VPIF_CH3_EN (0x00000001) +#define VPIF_CH_CLK_EN (0x00000002) +#define VPIF_CH_EN (0x00000001) + +#define VPIF_INT_TOP (0x00) +#define VPIF_INT_BOTTOM (0x01) +#define VPIF_INT_BOTH (0x02) + +#define VPIF_CH0_INT_CTRL_SHIFT (6) +#define VPIF_CH1_INT_CTRL_SHIFT (6) +#define VPIF_CH2_INT_CTRL_SHIFT (6) +#define VPIF_CH3_INT_CTRL_SHIFT (6) +#define VPIF_CH_INT_CTRL_SHIFT (6) + +#define VPIF_CH2_CLIP_ANC_EN 14 +#define VPIF_CH2_CLIP_ACTIVE_EN 13 + +#define VPIF_CH3_CLIP_ANC_EN 14 +#define VPIF_CH3_CLIP_ACTIVE_EN 13 + +/* enabled interrupt on both the fields on vpid_ch0_ctrl register */ +#define channel0_intr_assert() (regw((regr(VPIF_CH0_CTRL)|\ + (VPIF_INT_BOTH << VPIF_CH0_INT_CTRL_SHIFT)), VPIF_CH0_CTRL)) + +/* enabled interrupt on both the fields on vpid_ch1_ctrl register */ +#define channel1_intr_assert() (regw((regr(VPIF_CH1_CTRL)|\ + (VPIF_INT_BOTH << VPIF_CH1_INT_CTRL_SHIFT)), VPIF_CH1_CTRL)) + +/* enabled interrupt on both the fields on vpid_ch0_ctrl register */ +#define channel2_intr_assert() (regw((regr(VPIF_CH2_CTRL)|\ + (VPIF_INT_BOTH << VPIF_CH2_INT_CTRL_SHIFT)), VPIF_CH2_CTRL)) + +/* enabled interrupt on both the fields on vpid_ch1_ctrl register */ +#define channel3_intr_assert() (regw((regr(VPIF_CH3_CTRL)|\ + (VPIF_INT_BOTH << VPIF_CH3_INT_CTRL_SHIFT)), VPIF_CH3_CTRL)) + +#define VPIF_CH_FID_MASK (0x20) +#define VPIF_CH_FID_SHIFT (5) + +#define VPIF_NTSC_VBI_START_FIELD0 (1) +#define VPIF_NTSC_VBI_START_FIELD1 (263) +#define VPIF_PAL_VBI_START_FIELD0 (624) +#define VPIF_PAL_VBI_START_FIELD1 (311) + +#define VPIF_NTSC_HBI_START_FIELD0 (1) +#define VPIF_NTSC_HBI_START_FIELD1 (263) +#define VPIF_PAL_HBI_START_FIELD0 (624) +#define VPIF_PAL_HBI_START_FIELD1 (311) + +#define VPIF_NTSC_VBI_COUNT_FIELD0 (20) +#define VPIF_NTSC_VBI_COUNT_FIELD1 (19) +#define VPIF_PAL_VBI_COUNT_FIELD0 (24) +#define VPIF_PAL_VBI_COUNT_FIELD1 (25) + +#define VPIF_NTSC_HBI_COUNT_FIELD0 (263) +#define VPIF_NTSC_HBI_COUNT_FIELD1 (262) +#define VPIF_PAL_HBI_COUNT_FIELD0 (312) +#define VPIF_PAL_HBI_COUNT_FIELD1 (313) + +#define VPIF_NTSC_VBI_SAMPLES_PER_LINE (720) +#define VPIF_PAL_VBI_SAMPLES_PER_LINE (720) +#define VPIF_NTSC_HBI_SAMPLES_PER_LINE (268) +#define VPIF_PAL_HBI_SAMPLES_PER_LINE (280) + +#define VPIF_CH_VANC_EN (0x20) +#define VPIF_DMA_REQ_SIZE (0x080) +#define VPIF_EMULATION_DISABLE (0x01) + +extern u8 irq_vpif_capture_channel[VPIF_NUM_CHANNELS]; + +/* inline function to enable/disable channel0 */ +static inline void enable_channel0(int enable) +{ + if (enable) + regw((regr(VPIF_CH0_CTRL) | (VPIF_CH0_EN)), VPIF_CH0_CTRL); + else + regw((regr(VPIF_CH0_CTRL) & (~VPIF_CH0_EN)), VPIF_CH0_CTRL); +} + +/* inline function to enable/disable channel1 */ +static inline void enable_channel1(int enable) +{ + if (enable) + regw((regr(VPIF_CH1_CTRL) | (VPIF_CH1_EN)), VPIF_CH1_CTRL); + else + regw((regr(VPIF_CH1_CTRL) & (~VPIF_CH1_EN)), VPIF_CH1_CTRL); +} + +/* inline function to enable interrupt for channel0 */ +static inline void channel0_intr_enable(int enable) +{ + unsigned long flags; + + spin_lock_irqsave(&vpif_lock, flags); + + if (enable) { + regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN); + regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET); + + regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH0), VPIF_INTEN); + regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH0), + VPIF_INTEN_SET); + } else { + regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH0)), VPIF_INTEN); + regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH0), + VPIF_INTEN_SET); + } + spin_unlock_irqrestore(&vpif_lock, flags); +} + +/* inline function to enable interrupt for channel1 */ +static inline void channel1_intr_enable(int enable) +{ + unsigned long flags; + + spin_lock_irqsave(&vpif_lock, flags); + + if (enable) { + regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN); + regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET); + + regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH1), VPIF_INTEN); + regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH1), + VPIF_INTEN_SET); + } else { + regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH1)), VPIF_INTEN); + regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH1), + VPIF_INTEN_SET); + } + spin_unlock_irqrestore(&vpif_lock, flags); +} + +/* inline function to set buffer addresses in case of Y/C non mux mode */ +static inline void ch0_set_video_buf_addr_yc_nmux(unsigned long top_strt_luma, + unsigned long btm_strt_luma, + unsigned long top_strt_chroma, + unsigned long btm_strt_chroma) +{ + regw(top_strt_luma, VPIF_CH0_TOP_STRT_ADD_LUMA); + regw(btm_strt_luma, VPIF_CH0_BTM_STRT_ADD_LUMA); + regw(top_strt_chroma, VPIF_CH1_TOP_STRT_ADD_CHROMA); + regw(btm_strt_chroma, VPIF_CH1_BTM_STRT_ADD_CHROMA); +} + +/* inline function to set buffer addresses in VPIF registers for video data */ +static inline void ch0_set_video_buf_addr(unsigned long top_strt_luma, + unsigned long btm_strt_luma, + unsigned long top_strt_chroma, + unsigned long btm_strt_chroma) +{ + regw(top_strt_luma, VPIF_CH0_TOP_STRT_ADD_LUMA); + regw(btm_strt_luma, VPIF_CH0_BTM_STRT_ADD_LUMA); + regw(top_strt_chroma, VPIF_CH0_TOP_STRT_ADD_CHROMA); + regw(btm_strt_chroma, VPIF_CH0_BTM_STRT_ADD_CHROMA); +} + +static inline void ch1_set_video_buf_addr(unsigned long top_strt_luma, + unsigned long btm_strt_luma, + unsigned long top_strt_chroma, + unsigned long btm_strt_chroma) +{ + + regw(top_strt_luma, VPIF_CH1_TOP_STRT_ADD_LUMA); + regw(btm_strt_luma, VPIF_CH1_BTM_STRT_ADD_LUMA); + regw(top_strt_chroma, VPIF_CH1_TOP_STRT_ADD_CHROMA); + regw(btm_strt_chroma, VPIF_CH1_BTM_STRT_ADD_CHROMA); +} + +static inline void ch0_set_vbi_addr(unsigned long top_vbi, + unsigned long btm_vbi, unsigned long a, unsigned long b) +{ + regw(top_vbi, VPIF_CH0_TOP_STRT_ADD_VANC); + regw(btm_vbi, VPIF_CH0_BTM_STRT_ADD_VANC); +} + +static inline void ch0_set_hbi_addr(unsigned long top_vbi, + unsigned long btm_vbi, unsigned long a, unsigned long b) +{ + regw(top_vbi, VPIF_CH0_TOP_STRT_ADD_HANC); + regw(btm_vbi, VPIF_CH0_BTM_STRT_ADD_HANC); +} + +static inline void ch1_set_vbi_addr(unsigned long top_vbi, + unsigned long btm_vbi, unsigned long a, unsigned long b) +{ + regw(top_vbi, VPIF_CH1_TOP_STRT_ADD_VANC); + regw(btm_vbi, VPIF_CH1_BTM_STRT_ADD_VANC); +} + +static inline void ch1_set_hbi_addr(unsigned long top_vbi, + unsigned long btm_vbi, unsigned long a, unsigned long b) +{ + regw(top_vbi, VPIF_CH1_TOP_STRT_ADD_HANC); + regw(btm_vbi, VPIF_CH1_BTM_STRT_ADD_HANC); +} + +/* Inline function to enable raw vbi in the given channel */ +static inline void disable_raw_feature(u8 channel_id, u8 index) +{ + u32 ctrl_reg; + if (0 == channel_id) + ctrl_reg = VPIF_CH0_CTRL; + else + ctrl_reg = VPIF_CH1_CTRL; + + if (1 == index) + vpif_clr_bit(ctrl_reg, VPIF_CH_VANC_EN_BIT); + else + vpif_clr_bit(ctrl_reg, VPIF_CH_HANC_EN_BIT); +} + +static inline void enable_raw_feature(u8 channel_id, u8 index) +{ + u32 ctrl_reg; + if (0 == channel_id) + ctrl_reg = VPIF_CH0_CTRL; + else + ctrl_reg = VPIF_CH1_CTRL; + + if (1 == index) + vpif_set_bit(ctrl_reg, VPIF_CH_VANC_EN_BIT); + else + vpif_set_bit(ctrl_reg, VPIF_CH_HANC_EN_BIT); +} + +/* inline function to enable/disable channel2 */ +static inline void enable_channel2(int enable) +{ + if (enable) { + regw((regr(VPIF_CH2_CTRL) | (VPIF_CH2_CLK_EN)), VPIF_CH2_CTRL); + regw((regr(VPIF_CH2_CTRL) | (VPIF_CH2_EN)), VPIF_CH2_CTRL); + } else { + regw((regr(VPIF_CH2_CTRL) & (~VPIF_CH2_CLK_EN)), VPIF_CH2_CTRL); + regw((regr(VPIF_CH2_CTRL) & (~VPIF_CH2_EN)), VPIF_CH2_CTRL); + } +} + +/* inline function to enable/disable channel3 */ +static inline void enable_channel3(int enable) +{ + if (enable) { + regw((regr(VPIF_CH3_CTRL) | (VPIF_CH3_CLK_EN)), VPIF_CH3_CTRL); + regw((regr(VPIF_CH3_CTRL) | (VPIF_CH3_EN)), VPIF_CH3_CTRL); + } else { + regw((regr(VPIF_CH3_CTRL) & (~VPIF_CH3_CLK_EN)), VPIF_CH3_CTRL); + regw((regr(VPIF_CH3_CTRL) & (~VPIF_CH3_EN)), VPIF_CH3_CTRL); + } +} + +/* inline function to enable interrupt for channel2 */ +static inline void channel2_intr_enable(int enable) +{ + unsigned long flags; + + spin_lock_irqsave(&vpif_lock, flags); + + if (enable) { + regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN); + regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET); + regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH2), VPIF_INTEN); + regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH2), + VPIF_INTEN_SET); + } else { + regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH2)), VPIF_INTEN); + regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH2), + VPIF_INTEN_SET); + } + spin_unlock_irqrestore(&vpif_lock, flags); +} + +/* inline function to enable interrupt for channel3 */ +static inline void channel3_intr_enable(int enable) +{ + unsigned long flags; + + spin_lock_irqsave(&vpif_lock, flags); + + if (enable) { + regw((regr(VPIF_INTEN) | 0x10), VPIF_INTEN); + regw((regr(VPIF_INTEN_SET) | 0x10), VPIF_INTEN_SET); + + regw((regr(VPIF_INTEN) | VPIF_INTEN_FRAME_CH3), VPIF_INTEN); + regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH3), + VPIF_INTEN_SET); + } else { + regw((regr(VPIF_INTEN) & (~VPIF_INTEN_FRAME_CH3)), VPIF_INTEN); + regw((regr(VPIF_INTEN_SET) | VPIF_INTEN_FRAME_CH3), + VPIF_INTEN_SET); + } + spin_unlock_irqrestore(&vpif_lock, flags); +} + +/* inline function to enable raw vbi data for channel2 */ +static inline void channel2_raw_enable(int enable, u8 index) +{ + u32 mask; + + if (1 == index) + mask = VPIF_CH_VANC_EN_BIT; + else + mask = VPIF_CH_HANC_EN_BIT; + + if (enable) + vpif_set_bit(VPIF_CH2_CTRL, mask); + else + vpif_clr_bit(VPIF_CH2_CTRL, mask); +} + +/* inline function to enable raw vbi data for channel3*/ +static inline void channel3_raw_enable(int enable, u8 index) +{ + u32 mask; + + if (1 == index) + mask = VPIF_CH_VANC_EN_BIT; + else + mask = VPIF_CH_HANC_EN_BIT; + + if (enable) + vpif_set_bit(VPIF_CH3_CTRL, mask); + else + vpif_clr_bit(VPIF_CH3_CTRL, mask); +} + +/* function to enable clipping (for both active and blanking regions) on ch 2 */ +static inline void channel2_clipping_enable(int enable) +{ + if (enable) { + vpif_set_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ANC_EN); + vpif_set_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ACTIVE_EN); + } else { + vpif_clr_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ANC_EN); + vpif_clr_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ACTIVE_EN); + } +} + +/* function to enable clipping (for both active and blanking regions) on ch 3 */ +static inline void channel3_clipping_enable(int enable) +{ + if (enable) { + vpif_set_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ANC_EN); + vpif_set_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ACTIVE_EN); + } else { + vpif_clr_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ANC_EN); + vpif_clr_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ACTIVE_EN); + } +} + +/* inline function to set buffer addresses in case of Y/C non mux mode */ +static inline void ch2_set_video_buf_addr_yc_nmux(unsigned long top_strt_luma, + unsigned long btm_strt_luma, + unsigned long top_strt_chroma, + unsigned long btm_strt_chroma) +{ + regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_LUMA); + regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_LUMA); + regw(top_strt_chroma, VPIF_CH3_TOP_STRT_ADD_CHROMA); + regw(btm_strt_chroma, VPIF_CH3_BTM_STRT_ADD_CHROMA); +} + +/* inline function to set buffer addresses in VPIF registers for video data */ +static inline void ch2_set_video_buf_addr(unsigned long top_strt_luma, + unsigned long btm_strt_luma, + unsigned long top_strt_chroma, + unsigned long btm_strt_chroma) +{ + regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_LUMA); + regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_LUMA); + regw(top_strt_chroma, VPIF_CH2_TOP_STRT_ADD_CHROMA); + regw(btm_strt_chroma, VPIF_CH2_BTM_STRT_ADD_CHROMA); +} + +static inline void ch3_set_video_buf_addr(unsigned long top_strt_luma, + unsigned long btm_strt_luma, + unsigned long top_strt_chroma, + unsigned long btm_strt_chroma) +{ + regw(top_strt_luma, VPIF_CH3_TOP_STRT_ADD_LUMA); + regw(btm_strt_luma, VPIF_CH3_BTM_STRT_ADD_LUMA); + regw(top_strt_chroma, VPIF_CH3_TOP_STRT_ADD_CHROMA); + regw(btm_strt_chroma, VPIF_CH3_BTM_STRT_ADD_CHROMA); +} + +/* inline function to set buffer addresses in VPIF registers for vbi data */ +static inline void ch2_set_vbi_addr(unsigned long top_strt_luma, + unsigned long btm_strt_luma, + unsigned long top_strt_chroma, + unsigned long btm_strt_chroma) +{ + regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_VANC); + regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_VANC); +} + +static inline void ch3_set_vbi_addr(unsigned long top_strt_luma, + unsigned long btm_strt_luma, + unsigned long top_strt_chroma, + unsigned long btm_strt_chroma) +{ + regw(top_strt_luma, VPIF_CH3_TOP_STRT_ADD_VANC); + regw(btm_strt_luma, VPIF_CH3_BTM_STRT_ADD_VANC); +} + +static inline int vpif_intr_status(int channel) +{ + int status = 0; + int mask; + + if (channel < 0 || channel > 3) + return 0; + + mask = 1 << channel; + status = regr(VPIF_STATUS) & mask; + regw(status, VPIF_STATUS_CLR); + + return status; +} + +#define VPIF_MAX_NAME (30) + +/* This structure will store size parameters as per the mode selected by user */ +struct vpif_channel_config_params { + char name[VPIF_MAX_NAME]; /* Name of the mode */ + u16 width; /* Indicates width of the image */ + u16 height; /* Indicates height of the image */ + u8 frm_fmt; /* Interlaced (0) or progressive (1) */ + u8 ycmux_mode; /* This mode requires one (0) or two (1) + channels */ + u16 eav2sav; /* length of eav 2 sav */ + u16 sav2eav; /* length of sav 2 eav */ + u16 l1, l3, l5, l7, l9, l11; /* Other parameter configurations */ + u16 vsize; /* Vertical size of the image */ + u8 capture_format; /* Indicates whether capture format + * is in BT or in CCD/CMOS */ + u8 vbi_supported; /* Indicates whether this mode + * supports capturing vbi or not */ + u8 hd_sd; /* HDTV (1) or SDTV (0) format */ + v4l2_std_id stdid; /* SDTV format */ + struct v4l2_dv_timings dv_timings; /* HDTV format */ +}; + +extern const unsigned int vpif_ch_params_count; +extern const struct vpif_channel_config_params vpif_ch_params[]; + +struct vpif_video_params; +struct vpif_params; +struct vpif_vbi_params; + +int vpif_set_video_params(struct vpif_params *vpifparams, u8 channel_id); +void vpif_set_vbi_display_params(struct vpif_vbi_params *vbiparams, + u8 channel_id); +int vpif_channel_getfid(u8 channel_id); + +enum data_size { + _8BITS = 0, + _10BITS, + _12BITS, +}; + +/* Structure for vpif parameters for raw vbi data */ +struct vpif_vbi_params { + __u32 hstart0; /* Horizontal start of raw vbi data for first field */ + __u32 vstart0; /* Vertical start of raw vbi data for first field */ + __u32 hsize0; /* Horizontal size of raw vbi data for first field */ + __u32 vsize0; /* Vertical size of raw vbi data for first field */ + __u32 hstart1; /* Horizontal start of raw vbi data for second field */ + __u32 vstart1; /* Vertical start of raw vbi data for second field */ + __u32 hsize1; /* Horizontal size of raw vbi data for second field */ + __u32 vsize1; /* Vertical size of raw vbi data for second field */ +}; + +/* structure for vpif parameters */ +struct vpif_video_params { + __u8 storage_mode; /* Indicates field or frame mode */ + unsigned long hpitch; + v4l2_std_id stdid; +}; + +struct vpif_params { + struct vpif_interface iface; + struct vpif_video_params video_params; + struct vpif_channel_config_params std_info; + union param { + struct vpif_vbi_params vbi_params; + enum data_size data_sz; + } params; +}; + +#endif /* End of #ifndef VPIF_H */ diff --git a/drivers/media/platform/ti/davinci/vpif_capture.c b/drivers/media/platform/ti/davinci/vpif_capture.c new file mode 100644 index 0000000000..99fae8830c --- /dev/null +++ b/drivers/media/platform/ti/davinci/vpif_capture.c @@ -0,0 +1,1820 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2009 Texas Instruments Inc + * Copyright (C) 2014 Lad, Prabhakar + * + * TODO : add support for VBI & HBI data service + * add static buffer allocation + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "vpif.h" +#include "vpif_capture.h" + +MODULE_DESCRIPTION("TI DaVinci VPIF Capture driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(VPIF_CAPTURE_VERSION); + +#define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg) +#define vpif_dbg(level, debug, fmt, arg...) \ + v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg) + +static int debug = 1; + +module_param(debug, int, 0644); + +MODULE_PARM_DESC(debug, "Debug level 0-1"); + +#define VPIF_DRIVER_NAME "vpif_capture" +MODULE_ALIAS("platform:" VPIF_DRIVER_NAME); + +/* global variables */ +static struct vpif_device vpif_obj = { {NULL} }; +static struct device *vpif_dev; +static void vpif_calculate_offsets(struct channel_obj *ch); +static void vpif_config_addr(struct channel_obj *ch, int muxmode); + +static u8 channel_first_int[VPIF_NUMBER_OF_OBJECTS][2] = { {1, 1} }; + +/* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */ +static int ycmux_mode; + +static inline +struct vpif_cap_buffer *to_vpif_buffer(struct vb2_v4l2_buffer *vb) +{ + return container_of(vb, struct vpif_cap_buffer, vb); +} + +/** + * vpif_buffer_prepare : callback function for buffer prepare + * @vb: ptr to vb2_buffer + * + * This is the callback function for buffer prepare when vb2_qbuf() + * function is called. The buffer is prepared and user space virtual address + * or user address is converted into physical address + */ +static int vpif_buffer_prepare(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct vb2_queue *q = vb->vb2_queue; + struct channel_obj *ch = vb2_get_drv_priv(q); + struct common_obj *common; + unsigned long addr; + + vpif_dbg(2, debug, "vpif_buffer_prepare\n"); + + common = &ch->common[VPIF_VIDEO_INDEX]; + + vb2_set_plane_payload(vb, 0, common->fmt.fmt.pix.sizeimage); + if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) + return -EINVAL; + + vbuf->field = common->fmt.fmt.pix.field; + + addr = vb2_dma_contig_plane_dma_addr(vb, 0); + if (!IS_ALIGNED((addr + common->ytop_off), 8) || + !IS_ALIGNED((addr + common->ybtm_off), 8) || + !IS_ALIGNED((addr + common->ctop_off), 8) || + !IS_ALIGNED((addr + common->cbtm_off), 8)) { + vpif_dbg(1, debug, "offset is not aligned\n"); + return -EINVAL; + } + + return 0; +} + +/** + * vpif_buffer_queue_setup : Callback function for buffer setup. + * @vq: vb2_queue ptr + * @nbuffers: ptr to number of buffers requested by application + * @nplanes: contains number of distinct video planes needed to hold a frame + * @sizes: contains the size (in bytes) of each plane. + * @alloc_devs: ptr to allocation context + * + * This callback function is called when reqbuf() is called to adjust + * the buffer count and buffer size + */ +static int vpif_buffer_queue_setup(struct vb2_queue *vq, + unsigned int *nbuffers, unsigned int *nplanes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct channel_obj *ch = vb2_get_drv_priv(vq); + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + unsigned size = common->fmt.fmt.pix.sizeimage; + + vpif_dbg(2, debug, "vpif_buffer_setup\n"); + + if (*nplanes) { + if (sizes[0] < size) + return -EINVAL; + size = sizes[0]; + } + + if (vq->num_buffers + *nbuffers < 3) + *nbuffers = 3 - vq->num_buffers; + + *nplanes = 1; + sizes[0] = size; + + /* Calculate the offset for Y and C data in the buffer */ + vpif_calculate_offsets(ch); + + return 0; +} + +/** + * vpif_buffer_queue : Callback function to add buffer to DMA queue + * @vb: ptr to vb2_buffer + */ +static void vpif_buffer_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue); + struct vpif_cap_buffer *buf = to_vpif_buffer(vbuf); + struct common_obj *common; + unsigned long flags; + + common = &ch->common[VPIF_VIDEO_INDEX]; + + vpif_dbg(2, debug, "vpif_buffer_queue\n"); + + spin_lock_irqsave(&common->irqlock, flags); + /* add the buffer to the DMA queue */ + list_add_tail(&buf->list, &common->dma_queue); + spin_unlock_irqrestore(&common->irqlock, flags); +} + +/** + * vpif_start_streaming : Starts the DMA engine for streaming + * @vq: ptr to vb2_buffer + * @count: number of buffers + */ +static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count) +{ + struct vpif_capture_config *vpif_config_data = + vpif_dev->platform_data; + struct channel_obj *ch = vb2_get_drv_priv(vq); + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + struct vpif_params *vpif = &ch->vpifparams; + struct vpif_cap_buffer *buf, *tmp; + unsigned long addr, flags; + int ret; + + /* Initialize field_id */ + ch->field_id = 0; + + /* configure 1 or 2 channel mode */ + if (vpif_config_data->setup_input_channel_mode) { + ret = vpif_config_data-> + setup_input_channel_mode(vpif->std_info.ycmux_mode); + if (ret < 0) { + vpif_dbg(1, debug, "can't set vpif channel mode\n"); + goto err; + } + } + + ret = v4l2_subdev_call(ch->sd, video, s_stream, 1); + if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) { + vpif_dbg(1, debug, "stream on failed in subdev\n"); + goto err; + } + + /* Call vpif_set_params function to set the parameters and addresses */ + ret = vpif_set_video_params(vpif, ch->channel_id); + if (ret < 0) { + vpif_dbg(1, debug, "can't set video params\n"); + goto err; + } + + ycmux_mode = ret; + vpif_config_addr(ch, ret); + + /* Get the next frame from the buffer queue */ + spin_lock_irqsave(&common->irqlock, flags); + common->cur_frm = common->next_frm = list_entry(common->dma_queue.next, + struct vpif_cap_buffer, list); + /* Remove buffer from the buffer queue */ + list_del(&common->cur_frm->list); + spin_unlock_irqrestore(&common->irqlock, flags); + + addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb.vb2_buf, 0); + + common->set_addr(addr + common->ytop_off, + addr + common->ybtm_off, + addr + common->ctop_off, + addr + common->cbtm_off); + + /** + * Set interrupt for both the fields in VPIF Register enable channel in + * VPIF register + */ + channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1; + if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { + channel0_intr_assert(); + channel0_intr_enable(1); + enable_channel0(1); + } + if (VPIF_CHANNEL1_VIDEO == ch->channel_id || + ycmux_mode == 2) { + channel1_intr_assert(); + channel1_intr_enable(1); + enable_channel1(1); + } + + return 0; + +err: + spin_lock_irqsave(&common->irqlock, flags); + list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) { + list_del(&buf->list); + vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); + } + spin_unlock_irqrestore(&common->irqlock, flags); + + return ret; +} + +/** + * vpif_stop_streaming : Stop the DMA engine + * @vq: ptr to vb2_queue + * + * This callback stops the DMA engine and any remaining buffers + * in the DMA queue are released. + */ +static void vpif_stop_streaming(struct vb2_queue *vq) +{ + struct channel_obj *ch = vb2_get_drv_priv(vq); + struct common_obj *common; + unsigned long flags; + int ret; + + common = &ch->common[VPIF_VIDEO_INDEX]; + + /* Disable channel as per its device type and channel id */ + if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { + enable_channel0(0); + channel0_intr_enable(0); + } + if (VPIF_CHANNEL1_VIDEO == ch->channel_id || + ycmux_mode == 2) { + enable_channel1(0); + channel1_intr_enable(0); + } + + ycmux_mode = 0; + + ret = v4l2_subdev_call(ch->sd, video, s_stream, 0); + if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) + vpif_dbg(1, debug, "stream off failed in subdev\n"); + + /* release all active buffers */ + if (common->cur_frm == common->next_frm) { + vb2_buffer_done(&common->cur_frm->vb.vb2_buf, + VB2_BUF_STATE_ERROR); + } else { + if (common->cur_frm) + vb2_buffer_done(&common->cur_frm->vb.vb2_buf, + VB2_BUF_STATE_ERROR); + if (common->next_frm) + vb2_buffer_done(&common->next_frm->vb.vb2_buf, + VB2_BUF_STATE_ERROR); + } + + spin_lock_irqsave(&common->irqlock, flags); + while (!list_empty(&common->dma_queue)) { + common->next_frm = list_entry(common->dma_queue.next, + struct vpif_cap_buffer, list); + list_del(&common->next_frm->list); + vb2_buffer_done(&common->next_frm->vb.vb2_buf, + VB2_BUF_STATE_ERROR); + } + spin_unlock_irqrestore(&common->irqlock, flags); +} + +static const struct vb2_ops video_qops = { + .queue_setup = vpif_buffer_queue_setup, + .buf_prepare = vpif_buffer_prepare, + .start_streaming = vpif_start_streaming, + .stop_streaming = vpif_stop_streaming, + .buf_queue = vpif_buffer_queue, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, +}; + +/** + * vpif_process_buffer_complete: process a completed buffer + * @common: ptr to common channel object + * + * This function time stamp the buffer and mark it as DONE. It also + * wake up any process waiting on the QUEUE and set the next buffer + * as current + */ +static void vpif_process_buffer_complete(struct common_obj *common) +{ + common->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns(); + vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE); + /* Make curFrm pointing to nextFrm */ + common->cur_frm = common->next_frm; +} + +/** + * vpif_schedule_next_buffer: set next buffer address for capture + * @common : ptr to common channel object + * + * This function will get next buffer from the dma queue and + * set the buffer address in the vpif register for capture. + * the buffer is marked active + */ +static void vpif_schedule_next_buffer(struct common_obj *common) +{ + unsigned long addr = 0; + + spin_lock(&common->irqlock); + common->next_frm = list_entry(common->dma_queue.next, + struct vpif_cap_buffer, list); + /* Remove that buffer from the buffer queue */ + list_del(&common->next_frm->list); + spin_unlock(&common->irqlock); + addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb.vb2_buf, 0); + + /* Set top and bottom field addresses in VPIF registers */ + common->set_addr(addr + common->ytop_off, + addr + common->ybtm_off, + addr + common->ctop_off, + addr + common->cbtm_off); +} + +/** + * vpif_channel_isr : ISR handler for vpif capture + * @irq: irq number + * @dev_id: dev_id ptr + * + * It changes status of the captured buffer, takes next buffer from the queue + * and sets its address in VPIF registers + */ +static irqreturn_t vpif_channel_isr(int irq, void *dev_id) +{ + struct vpif_device *dev = &vpif_obj; + struct common_obj *common; + struct channel_obj *ch; + int channel_id; + int fid = -1, i; + + channel_id = *(int *)(dev_id); + if (!vpif_intr_status(channel_id)) + return IRQ_NONE; + + ch = dev->dev[channel_id]; + + for (i = 0; i < VPIF_NUMBER_OF_OBJECTS; i++) { + common = &ch->common[i]; + /* skip If streaming is not started in this channel */ + /* Check the field format */ + if (1 == ch->vpifparams.std_info.frm_fmt || + common->fmt.fmt.pix.field == V4L2_FIELD_NONE) { + /* Progressive mode */ + spin_lock(&common->irqlock); + if (list_empty(&common->dma_queue)) { + spin_unlock(&common->irqlock); + continue; + } + spin_unlock(&common->irqlock); + + if (!channel_first_int[i][channel_id]) + vpif_process_buffer_complete(common); + + channel_first_int[i][channel_id] = 0; + + vpif_schedule_next_buffer(common); + + + channel_first_int[i][channel_id] = 0; + } else { + /** + * Interlaced mode. If it is first interrupt, ignore + * it + */ + if (channel_first_int[i][channel_id]) { + channel_first_int[i][channel_id] = 0; + continue; + } + if (0 == i) { + ch->field_id ^= 1; + /* Get field id from VPIF registers */ + fid = vpif_channel_getfid(ch->channel_id); + if (fid != ch->field_id) { + /** + * If field id does not match stored + * field id, make them in sync + */ + if (0 == fid) + ch->field_id = fid; + return IRQ_HANDLED; + } + } + /* device field id and local field id are in sync */ + if (0 == fid) { + /* this is even field */ + if (common->cur_frm == common->next_frm) + continue; + + /* mark the current buffer as done */ + vpif_process_buffer_complete(common); + } else if (1 == fid) { + /* odd field */ + spin_lock(&common->irqlock); + if (list_empty(&common->dma_queue) || + (common->cur_frm != common->next_frm)) { + spin_unlock(&common->irqlock); + continue; + } + spin_unlock(&common->irqlock); + + vpif_schedule_next_buffer(common); + } + } + } + return IRQ_HANDLED; +} + +/** + * vpif_update_std_info() - update standard related info + * @ch: ptr to channel object + * + * For a given standard selected by application, update values + * in the device data structures + */ +static int vpif_update_std_info(struct channel_obj *ch) +{ + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + struct vpif_params *vpifparams = &ch->vpifparams; + const struct vpif_channel_config_params *config; + struct vpif_channel_config_params *std_info = &vpifparams->std_info; + struct video_obj *vid_ch = &ch->video; + int index; + struct v4l2_pix_format *pixfmt = &common->fmt.fmt.pix; + + vpif_dbg(2, debug, "vpif_update_std_info\n"); + + /* + * if called after try_fmt or g_fmt, there will already be a size + * so use that by default. + */ + if (pixfmt->width && pixfmt->height) { + if (pixfmt->field == V4L2_FIELD_ANY || + pixfmt->field == V4L2_FIELD_NONE) + pixfmt->field = V4L2_FIELD_NONE; + + vpifparams->iface.if_type = VPIF_IF_BT656; + if (pixfmt->pixelformat == V4L2_PIX_FMT_SGRBG10 || + pixfmt->pixelformat == V4L2_PIX_FMT_SBGGR8) + vpifparams->iface.if_type = VPIF_IF_RAW_BAYER; + + if (pixfmt->pixelformat == V4L2_PIX_FMT_SGRBG10) + vpifparams->params.data_sz = 1; /* 10 bits/pixel. */ + + /* + * For raw formats from camera sensors, we don't need + * the std_info from table lookup, so nothing else to do here. + */ + if (vpifparams->iface.if_type == VPIF_IF_RAW_BAYER) { + memset(std_info, 0, sizeof(struct vpif_channel_config_params)); + vpifparams->std_info.capture_format = 1; /* CCD/raw mode */ + return 0; + } + } + + for (index = 0; index < vpif_ch_params_count; index++) { + config = &vpif_ch_params[index]; + if (config->hd_sd == 0) { + vpif_dbg(2, debug, "SD format\n"); + if (config->stdid & vid_ch->stdid) { + memcpy(std_info, config, sizeof(*config)); + break; + } + } else { + vpif_dbg(2, debug, "HD format\n"); + if (!memcmp(&config->dv_timings, &vid_ch->dv_timings, + sizeof(vid_ch->dv_timings))) { + memcpy(std_info, config, sizeof(*config)); + break; + } + } + } + + /* standard not found */ + if (index == vpif_ch_params_count) + return -EINVAL; + + common->fmt.fmt.pix.width = std_info->width; + common->width = std_info->width; + common->fmt.fmt.pix.height = std_info->height; + common->height = std_info->height; + common->fmt.fmt.pix.sizeimage = common->height * common->width * 2; + common->fmt.fmt.pix.bytesperline = std_info->width; + vpifparams->video_params.hpitch = std_info->width; + vpifparams->video_params.storage_mode = std_info->frm_fmt; + + if (vid_ch->stdid) + common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; + else + common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_REC709; + + if (ch->vpifparams.std_info.frm_fmt) + common->fmt.fmt.pix.field = V4L2_FIELD_NONE; + else + common->fmt.fmt.pix.field = V4L2_FIELD_INTERLACED; + + if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) + common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8; + else + common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_NV16; + + common->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + return 0; +} + +/** + * vpif_calculate_offsets : This function calculates buffers offsets + * @ch : ptr to channel object + * + * This function calculates buffer offsets for Y and C in the top and + * bottom field + */ +static void vpif_calculate_offsets(struct channel_obj *ch) +{ + unsigned int hpitch, sizeimage; + struct video_obj *vid_ch = &(ch->video); + struct vpif_params *vpifparams = &ch->vpifparams; + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + enum v4l2_field field = common->fmt.fmt.pix.field; + + vpif_dbg(2, debug, "vpif_calculate_offsets\n"); + + if (V4L2_FIELD_ANY == field) { + if (vpifparams->std_info.frm_fmt) + vid_ch->buf_field = V4L2_FIELD_NONE; + else + vid_ch->buf_field = V4L2_FIELD_INTERLACED; + } else + vid_ch->buf_field = common->fmt.fmt.pix.field; + + sizeimage = common->fmt.fmt.pix.sizeimage; + + hpitch = common->fmt.fmt.pix.bytesperline; + + if ((V4L2_FIELD_NONE == vid_ch->buf_field) || + (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) { + /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ + common->ytop_off = 0; + common->ybtm_off = hpitch; + common->ctop_off = sizeimage / 2; + common->cbtm_off = sizeimage / 2 + hpitch; + } else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) { + /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ + common->ytop_off = 0; + common->ybtm_off = sizeimage / 4; + common->ctop_off = sizeimage / 2; + common->cbtm_off = common->ctop_off + sizeimage / 4; + } else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) { + /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ + common->ybtm_off = 0; + common->ytop_off = sizeimage / 4; + common->cbtm_off = sizeimage / 2; + common->ctop_off = common->cbtm_off + sizeimage / 4; + } + if ((V4L2_FIELD_NONE == vid_ch->buf_field) || + (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) + vpifparams->video_params.storage_mode = 1; + else + vpifparams->video_params.storage_mode = 0; + + if (1 == vpifparams->std_info.frm_fmt) + vpifparams->video_params.hpitch = + common->fmt.fmt.pix.bytesperline; + else { + if ((field == V4L2_FIELD_ANY) + || (field == V4L2_FIELD_INTERLACED)) + vpifparams->video_params.hpitch = + common->fmt.fmt.pix.bytesperline * 2; + else + vpifparams->video_params.hpitch = + common->fmt.fmt.pix.bytesperline; + } + + ch->vpifparams.video_params.stdid = vpifparams->std_info.stdid; +} + +/** + * vpif_config_addr() - function to configure buffer address in vpif + * @ch: channel ptr + * @muxmode: channel mux mode + */ +static void vpif_config_addr(struct channel_obj *ch, int muxmode) +{ + struct common_obj *common; + + vpif_dbg(2, debug, "vpif_config_addr\n"); + + common = &(ch->common[VPIF_VIDEO_INDEX]); + + if (VPIF_CHANNEL1_VIDEO == ch->channel_id) + common->set_addr = ch1_set_video_buf_addr; + else if (2 == muxmode) + common->set_addr = ch0_set_video_buf_addr_yc_nmux; + else + common->set_addr = ch0_set_video_buf_addr; +} + +/** + * vpif_input_to_subdev() - Maps input to sub device + * @vpif_cfg: global config ptr + * @chan_cfg: channel config ptr + * @input_index: Given input index from application + * + * lookup the sub device information for a given input index. + * we report all the inputs to application. inputs table also + * has sub device name for the each input + */ +static int vpif_input_to_subdev( + struct vpif_capture_config *vpif_cfg, + struct vpif_capture_chan_config *chan_cfg, + int input_index) +{ + struct vpif_subdev_info *subdev_info; + const char *subdev_name; + int i; + + vpif_dbg(2, debug, "vpif_input_to_subdev\n"); + + if (!chan_cfg) + return -1; + if (input_index >= chan_cfg->input_count) + return -1; + subdev_name = chan_cfg->inputs[input_index].subdev_name; + if (!subdev_name) + return -1; + + /* loop through the sub device list to get the sub device info */ + for (i = 0; i < vpif_cfg->subdev_count; i++) { + subdev_info = &vpif_cfg->subdev_info[i]; + if (subdev_info && !strcmp(subdev_info->name, subdev_name)) + return i; + } + return -1; +} + +/** + * vpif_set_input() - Select an input + * @vpif_cfg: global config ptr + * @ch: channel + * @index: Given input index from application + * + * Select the given input. + */ +static int vpif_set_input( + struct vpif_capture_config *vpif_cfg, + struct channel_obj *ch, + int index) +{ + struct vpif_capture_chan_config *chan_cfg = + &vpif_cfg->chan_config[ch->channel_id]; + struct vpif_subdev_info *subdev_info = NULL; + struct v4l2_subdev *sd = NULL; + u32 input = 0, output = 0; + int sd_index; + int ret; + + sd_index = vpif_input_to_subdev(vpif_cfg, chan_cfg, index); + if (sd_index >= 0) { + sd = vpif_obj.sd[sd_index]; + subdev_info = &vpif_cfg->subdev_info[sd_index]; + } else { + /* no subdevice, no input to setup */ + return 0; + } + + /* first setup input path from sub device to vpif */ + if (sd && vpif_cfg->setup_input_path) { + ret = vpif_cfg->setup_input_path(ch->channel_id, + subdev_info->name); + if (ret < 0) { + vpif_dbg(1, debug, "couldn't setup input path for the" \ + " sub device %s, for input index %d\n", + subdev_info->name, index); + return ret; + } + } + + if (sd) { + input = chan_cfg->inputs[index].input_route; + output = chan_cfg->inputs[index].output_route; + ret = v4l2_subdev_call(sd, video, s_routing, + input, output, 0); + if (ret < 0 && ret != -ENOIOCTLCMD) { + vpif_dbg(1, debug, "Failed to set input\n"); + return ret; + } + } + ch->input_idx = index; + ch->sd = sd; + /* copy interface parameters to vpif */ + ch->vpifparams.iface = chan_cfg->vpif_if; + + /* update tvnorms from the sub device input info */ + ch->video_dev.tvnorms = chan_cfg->inputs[index].input.std; + return 0; +} + +/** + * vpif_querystd() - querystd handler + * @file: file ptr + * @priv: file handle + * @std_id: ptr to std id + * + * This function is called to detect standard at the selected input + */ +static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id) +{ + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + int ret; + + vpif_dbg(2, debug, "vpif_querystd\n"); + + /* Call querystd function of decoder device */ + ret = v4l2_subdev_call(ch->sd, video, querystd, std_id); + + if (ret == -ENOIOCTLCMD || ret == -ENODEV) + return -ENODATA; + if (ret) { + vpif_dbg(1, debug, "Failed to query standard for sub devices\n"); + return ret; + } + + return 0; +} + +/** + * vpif_g_std() - get STD handler + * @file: file ptr + * @priv: file handle + * @std: ptr to std id + */ +static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std) +{ + struct vpif_capture_config *config = vpif_dev->platform_data; + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct vpif_capture_chan_config *chan_cfg; + struct v4l2_input input; + + vpif_dbg(2, debug, "vpif_g_std\n"); + + if (!config->chan_config[ch->channel_id].inputs) + return -ENODATA; + + chan_cfg = &config->chan_config[ch->channel_id]; + input = chan_cfg->inputs[ch->input_idx].input; + if (input.capabilities != V4L2_IN_CAP_STD) + return -ENODATA; + + *std = ch->video.stdid; + return 0; +} + +/** + * vpif_s_std() - set STD handler + * @file: file ptr + * @priv: file handle + * @std_id: ptr to std id + */ +static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id) +{ + struct vpif_capture_config *config = vpif_dev->platform_data; + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + struct vpif_capture_chan_config *chan_cfg; + struct v4l2_input input; + int ret; + + vpif_dbg(2, debug, "vpif_s_std\n"); + + if (!config->chan_config[ch->channel_id].inputs) + return -ENODATA; + + chan_cfg = &config->chan_config[ch->channel_id]; + input = chan_cfg->inputs[ch->input_idx].input; + if (input.capabilities != V4L2_IN_CAP_STD) + return -ENODATA; + + if (vb2_is_busy(&common->buffer_queue)) + return -EBUSY; + + /* Call encoder subdevice function to set the standard */ + ch->video.stdid = std_id; + memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); + + /* Get the information about the standard */ + if (vpif_update_std_info(ch)) { + vpif_err("Error getting the standard info\n"); + return -EINVAL; + } + + /* set standard in the sub device */ + ret = v4l2_subdev_call(ch->sd, video, s_std, std_id); + if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) { + vpif_dbg(1, debug, "Failed to set standard for sub devices\n"); + return ret; + } + return 0; +} + +/** + * vpif_enum_input() - ENUMINPUT handler + * @file: file ptr + * @priv: file handle + * @input: ptr to input structure + */ +static int vpif_enum_input(struct file *file, void *priv, + struct v4l2_input *input) +{ + + struct vpif_capture_config *config = vpif_dev->platform_data; + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct vpif_capture_chan_config *chan_cfg; + + chan_cfg = &config->chan_config[ch->channel_id]; + + if (input->index >= chan_cfg->input_count) + return -EINVAL; + + memcpy(input, &chan_cfg->inputs[input->index].input, + sizeof(*input)); + return 0; +} + +/** + * vpif_g_input() - Get INPUT handler + * @file: file ptr + * @priv: file handle + * @index: ptr to input index + */ +static int vpif_g_input(struct file *file, void *priv, unsigned int *index) +{ + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + + *index = ch->input_idx; + return 0; +} + +/** + * vpif_s_input() - Set INPUT handler + * @file: file ptr + * @priv: file handle + * @index: input index + */ +static int vpif_s_input(struct file *file, void *priv, unsigned int index) +{ + struct vpif_capture_config *config = vpif_dev->platform_data; + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + struct vpif_capture_chan_config *chan_cfg; + + chan_cfg = &config->chan_config[ch->channel_id]; + + if (index >= chan_cfg->input_count) + return -EINVAL; + + if (vb2_is_busy(&common->buffer_queue)) + return -EBUSY; + + return vpif_set_input(config, ch, index); +} + +/** + * vpif_enum_fmt_vid_cap() - ENUM_FMT handler + * @file: file ptr + * @priv: file handle + * @fmt: ptr to V4L2 format descriptor + */ +static int vpif_enum_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_fmtdesc *fmt) +{ + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + + if (fmt->index != 0) { + vpif_dbg(1, debug, "Invalid format index\n"); + return -EINVAL; + } + + /* Fill in the information about format */ + if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) + fmt->pixelformat = V4L2_PIX_FMT_SBGGR8; + else + fmt->pixelformat = V4L2_PIX_FMT_NV16; + return 0; +} + +/** + * vpif_try_fmt_vid_cap() - TRY_FMT handler + * @file: file ptr + * @priv: file handle + * @fmt: ptr to v4l2 format structure + */ +static int vpif_try_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *fmt) +{ + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; + struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]); + + common->fmt = *fmt; + vpif_update_std_info(ch); + + pixfmt->field = common->fmt.fmt.pix.field; + pixfmt->colorspace = common->fmt.fmt.pix.colorspace; + pixfmt->bytesperline = common->fmt.fmt.pix.width; + pixfmt->width = common->fmt.fmt.pix.width; + pixfmt->height = common->fmt.fmt.pix.height; + pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height * 2; + if (pixfmt->pixelformat == V4L2_PIX_FMT_SGRBG10) { + pixfmt->bytesperline = common->fmt.fmt.pix.width * 2; + pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height; + } + + dev_dbg(vpif_dev, "%s: %d x %d; pitch=%d pixelformat=0x%08x, field=%d, size=%d\n", __func__, + pixfmt->width, pixfmt->height, + pixfmt->bytesperline, pixfmt->pixelformat, + pixfmt->field, pixfmt->sizeimage); + + return 0; +} + + +/** + * vpif_g_fmt_vid_cap() - Set INPUT handler + * @file: file ptr + * @priv: file handle + * @fmt: ptr to v4l2 format structure + */ +static int vpif_g_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *fmt) +{ + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + struct v4l2_pix_format *pix_fmt = &fmt->fmt.pix; + struct v4l2_subdev_format format = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + struct v4l2_mbus_framefmt *mbus_fmt = &format.format; + int ret; + + /* Check the validity of the buffer type */ + if (common->fmt.type != fmt->type) + return -EINVAL; + + /* By default, use currently set fmt */ + *fmt = common->fmt; + + /* If subdev has get_fmt, use that to override */ + ret = v4l2_subdev_call(ch->sd, pad, get_fmt, NULL, &format); + if (!ret && mbus_fmt->code) { + v4l2_fill_pix_format(pix_fmt, mbus_fmt); + pix_fmt->bytesperline = pix_fmt->width; + if (mbus_fmt->code == MEDIA_BUS_FMT_SGRBG10_1X10) { + /* e.g. mt9v032 */ + pix_fmt->pixelformat = V4L2_PIX_FMT_SGRBG10; + pix_fmt->bytesperline = pix_fmt->width * 2; + } else if (mbus_fmt->code == MEDIA_BUS_FMT_UYVY8_2X8) { + /* e.g. tvp514x */ + pix_fmt->pixelformat = V4L2_PIX_FMT_NV16; + pix_fmt->bytesperline = pix_fmt->width * 2; + } else { + dev_warn(vpif_dev, "%s: Unhandled media-bus format 0x%x\n", + __func__, mbus_fmt->code); + } + pix_fmt->sizeimage = pix_fmt->bytesperline * pix_fmt->height; + dev_dbg(vpif_dev, "%s: %d x %d; pitch=%d, pixelformat=0x%08x, code=0x%x, field=%d, size=%d\n", __func__, + pix_fmt->width, pix_fmt->height, + pix_fmt->bytesperline, pix_fmt->pixelformat, + mbus_fmt->code, pix_fmt->field, pix_fmt->sizeimage); + + common->fmt = *fmt; + vpif_update_std_info(ch); + } + + return 0; +} + +/** + * vpif_s_fmt_vid_cap() - Set FMT handler + * @file: file ptr + * @priv: file handle + * @fmt: ptr to v4l2 format structure + */ +static int vpif_s_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *fmt) +{ + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + int ret; + + vpif_dbg(2, debug, "%s\n", __func__); + + if (vb2_is_busy(&common->buffer_queue)) + return -EBUSY; + + ret = vpif_try_fmt_vid_cap(file, priv, fmt); + if (ret) + return ret; + + /* store the format in the channel object */ + common->fmt = *fmt; + return 0; +} + +/** + * vpif_querycap() - QUERYCAP handler + * @file: file ptr + * @priv: file handle + * @cap: ptr to v4l2_capability structure + */ +static int vpif_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) +{ + struct vpif_capture_config *config = vpif_dev->platform_data; + + strscpy(cap->driver, VPIF_DRIVER_NAME, sizeof(cap->driver)); + strscpy(cap->card, config->card_name, sizeof(cap->card)); + + return 0; +} + +/** + * vpif_enum_dv_timings() - ENUM_DV_TIMINGS handler + * @file: file ptr + * @priv: file handle + * @timings: input timings + */ +static int +vpif_enum_dv_timings(struct file *file, void *priv, + struct v4l2_enum_dv_timings *timings) +{ + struct vpif_capture_config *config = vpif_dev->platform_data; + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct vpif_capture_chan_config *chan_cfg; + struct v4l2_input input; + int ret; + + if (!config->chan_config[ch->channel_id].inputs) + return -ENODATA; + + chan_cfg = &config->chan_config[ch->channel_id]; + input = chan_cfg->inputs[ch->input_idx].input; + if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS) + return -ENODATA; + + timings->pad = 0; + + ret = v4l2_subdev_call(ch->sd, pad, enum_dv_timings, timings); + if (ret == -ENOIOCTLCMD || ret == -ENODEV) + return -EINVAL; + + return ret; +} + +/** + * vpif_query_dv_timings() - QUERY_DV_TIMINGS handler + * @file: file ptr + * @priv: file handle + * @timings: input timings + */ +static int +vpif_query_dv_timings(struct file *file, void *priv, + struct v4l2_dv_timings *timings) +{ + struct vpif_capture_config *config = vpif_dev->platform_data; + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct vpif_capture_chan_config *chan_cfg; + struct v4l2_input input; + int ret; + + if (!config->chan_config[ch->channel_id].inputs) + return -ENODATA; + + chan_cfg = &config->chan_config[ch->channel_id]; + input = chan_cfg->inputs[ch->input_idx].input; + if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS) + return -ENODATA; + + ret = v4l2_subdev_call(ch->sd, video, query_dv_timings, timings); + if (ret == -ENOIOCTLCMD || ret == -ENODEV) + return -ENODATA; + + return ret; +} + +/** + * vpif_s_dv_timings() - S_DV_TIMINGS handler + * @file: file ptr + * @priv: file handle + * @timings: digital video timings + */ +static int vpif_s_dv_timings(struct file *file, void *priv, + struct v4l2_dv_timings *timings) +{ + struct vpif_capture_config *config = vpif_dev->platform_data; + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct vpif_params *vpifparams = &ch->vpifparams; + struct vpif_channel_config_params *std_info = &vpifparams->std_info; + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + struct video_obj *vid_ch = &ch->video; + struct v4l2_bt_timings *bt = &vid_ch->dv_timings.bt; + struct vpif_capture_chan_config *chan_cfg; + struct v4l2_input input; + int ret; + + if (!config->chan_config[ch->channel_id].inputs) + return -ENODATA; + + chan_cfg = &config->chan_config[ch->channel_id]; + input = chan_cfg->inputs[ch->input_idx].input; + if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS) + return -ENODATA; + + if (timings->type != V4L2_DV_BT_656_1120) { + vpif_dbg(2, debug, "Timing type not defined\n"); + return -EINVAL; + } + + if (vb2_is_busy(&common->buffer_queue)) + return -EBUSY; + + /* Configure subdevice timings, if any */ + ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings); + if (ret == -ENOIOCTLCMD || ret == -ENODEV) + ret = 0; + if (ret < 0) { + vpif_dbg(2, debug, "Error setting custom DV timings\n"); + return ret; + } + + if (!(timings->bt.width && timings->bt.height && + (timings->bt.hbackporch || + timings->bt.hfrontporch || + timings->bt.hsync) && + timings->bt.vfrontporch && + (timings->bt.vbackporch || + timings->bt.vsync))) { + vpif_dbg(2, debug, "Timings for width, height, horizontal back porch, horizontal sync, horizontal front porch, vertical back porch, vertical sync and vertical back porch must be defined\n"); + return -EINVAL; + } + + vid_ch->dv_timings = *timings; + + /* Configure video port timings */ + + std_info->eav2sav = V4L2_DV_BT_BLANKING_WIDTH(bt) - 8; + std_info->sav2eav = bt->width; + + std_info->l1 = 1; + std_info->l3 = bt->vsync + bt->vbackporch + 1; + + std_info->vsize = V4L2_DV_BT_FRAME_HEIGHT(bt); + if (bt->interlaced) { + if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) { + std_info->l5 = std_info->vsize/2 - + (bt->vfrontporch - 1); + std_info->l7 = std_info->vsize/2 + 1; + std_info->l9 = std_info->l7 + bt->il_vsync + + bt->il_vbackporch + 1; + std_info->l11 = std_info->vsize - + (bt->il_vfrontporch - 1); + } else { + vpif_dbg(2, debug, "Required timing values for interlaced BT format missing\n"); + return -EINVAL; + } + } else { + std_info->l5 = std_info->vsize - (bt->vfrontporch - 1); + } + strscpy(std_info->name, "Custom timings BT656/1120", + sizeof(std_info->name)); + std_info->width = bt->width; + std_info->height = bt->height; + std_info->frm_fmt = bt->interlaced ? 0 : 1; + std_info->ycmux_mode = 0; + std_info->capture_format = 0; + std_info->vbi_supported = 0; + std_info->hd_sd = 1; + std_info->stdid = 0; + + vid_ch->stdid = 0; + return 0; +} + +/** + * vpif_g_dv_timings() - G_DV_TIMINGS handler + * @file: file ptr + * @priv: file handle + * @timings: digital video timings + */ +static int vpif_g_dv_timings(struct file *file, void *priv, + struct v4l2_dv_timings *timings) +{ + struct vpif_capture_config *config = vpif_dev->platform_data; + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct video_obj *vid_ch = &ch->video; + struct vpif_capture_chan_config *chan_cfg; + struct v4l2_input input; + + if (!config->chan_config[ch->channel_id].inputs) + return -ENODATA; + + chan_cfg = &config->chan_config[ch->channel_id]; + input = chan_cfg->inputs[ch->input_idx].input; + if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS) + return -ENODATA; + + *timings = vid_ch->dv_timings; + + return 0; +} + +/* + * vpif_log_status() - Status information + * @file: file ptr + * @priv: file handle + * + * Returns zero. + */ +static int vpif_log_status(struct file *filep, void *priv) +{ + /* status for sub devices */ + v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status); + + return 0; +} + +/* vpif capture ioctl operations */ +static const struct v4l2_ioctl_ops vpif_ioctl_ops = { + .vidioc_querycap = vpif_querycap, + .vidioc_enum_fmt_vid_cap = vpif_enum_fmt_vid_cap, + .vidioc_g_fmt_vid_cap = vpif_g_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = vpif_s_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = vpif_try_fmt_vid_cap, + + .vidioc_enum_input = vpif_enum_input, + .vidioc_s_input = vpif_s_input, + .vidioc_g_input = vpif_g_input, + + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + + .vidioc_querystd = vpif_querystd, + .vidioc_s_std = vpif_s_std, + .vidioc_g_std = vpif_g_std, + + .vidioc_enum_dv_timings = vpif_enum_dv_timings, + .vidioc_query_dv_timings = vpif_query_dv_timings, + .vidioc_s_dv_timings = vpif_s_dv_timings, + .vidioc_g_dv_timings = vpif_g_dv_timings, + + .vidioc_log_status = vpif_log_status, +}; + +/* vpif file operations */ +static const struct v4l2_file_operations vpif_fops = { + .owner = THIS_MODULE, + .open = v4l2_fh_open, + .release = vb2_fop_release, + .unlocked_ioctl = video_ioctl2, + .mmap = vb2_fop_mmap, + .poll = vb2_fop_poll +}; + +/** + * initialize_vpif() - Initialize vpif data structures + * + * Allocate memory for data structures and initialize them + */ +static int initialize_vpif(void) +{ + int err, i, j; + int free_channel_objects_index; + + /* Allocate memory for six channel objects */ + for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { + vpif_obj.dev[i] = + kzalloc(sizeof(*vpif_obj.dev[i]), GFP_KERNEL); + /* If memory allocation fails, return error */ + if (!vpif_obj.dev[i]) { + free_channel_objects_index = i; + err = -ENOMEM; + goto vpif_init_free_channel_objects; + } + } + return 0; + +vpif_init_free_channel_objects: + for (j = 0; j < free_channel_objects_index; j++) + kfree(vpif_obj.dev[j]); + return err; +} + +static inline void free_vpif_objs(void) +{ + int i; + + for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) + kfree(vpif_obj.dev[i]); +} + +static int vpif_async_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asd) +{ + int i; + + for (i = 0; i < vpif_obj.config->asd_sizes[0]; i++) { + struct v4l2_async_connection *_asd = vpif_obj.config->asd[i]; + const struct fwnode_handle *fwnode = _asd->match.fwnode; + + if (fwnode == subdev->fwnode) { + vpif_obj.sd[i] = subdev; + vpif_obj.config->chan_config->inputs[i].subdev_name = + (char *)to_of_node(subdev->fwnode)->full_name; + vpif_dbg(2, debug, + "%s: setting input %d subdev_name = %s\n", + __func__, i, + vpif_obj.config->chan_config->inputs[i].subdev_name); + return 0; + } + } + + for (i = 0; i < vpif_obj.config->subdev_count; i++) + if (!strcmp(vpif_obj.config->subdev_info[i].name, + subdev->name)) { + vpif_obj.sd[i] = subdev; + return 0; + } + + return -EINVAL; +} + +static int vpif_probe_complete(void) +{ + struct common_obj *common; + struct video_device *vdev; + struct channel_obj *ch; + struct vb2_queue *q; + int j, err, k; + + for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) { + ch = vpif_obj.dev[j]; + ch->channel_id = j; + common = &(ch->common[VPIF_VIDEO_INDEX]); + spin_lock_init(&common->irqlock); + mutex_init(&common->lock); + + /* select input 0 */ + err = vpif_set_input(vpif_obj.config, ch, 0); + if (err) + goto probe_out; + + /* set initial format */ + ch->video.stdid = V4L2_STD_525_60; + memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); + common->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + vpif_update_std_info(ch); + + /* Initialize vb2 queue */ + q = &common->buffer_queue; + q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; + q->drv_priv = ch; + q->ops = &video_qops; + q->mem_ops = &vb2_dma_contig_memops; + q->buf_struct_size = sizeof(struct vpif_cap_buffer); + q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + q->min_buffers_needed = 1; + q->lock = &common->lock; + q->dev = vpif_dev; + + err = vb2_queue_init(q); + if (err) { + vpif_err("vpif_capture: vb2_queue_init() failed\n"); + goto probe_out; + } + + INIT_LIST_HEAD(&common->dma_queue); + + /* Initialize the video_device structure */ + vdev = &ch->video_dev; + strscpy(vdev->name, VPIF_DRIVER_NAME, sizeof(vdev->name)); + vdev->release = video_device_release_empty; + vdev->fops = &vpif_fops; + vdev->ioctl_ops = &vpif_ioctl_ops; + vdev->v4l2_dev = &vpif_obj.v4l2_dev; + vdev->vfl_dir = VFL_DIR_RX; + vdev->queue = q; + vdev->lock = &common->lock; + vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + video_set_drvdata(&ch->video_dev, ch); + err = video_register_device(vdev, + VFL_TYPE_VIDEO, (j ? 1 : 0)); + if (err) + goto probe_out; + } + + v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n"); + return 0; + +probe_out: + for (k = 0; k < j; k++) { + /* Get the pointer to the channel object */ + ch = vpif_obj.dev[k]; + /* Unregister video device */ + video_unregister_device(&ch->video_dev); + } + + return err; +} + +static int vpif_async_complete(struct v4l2_async_notifier *notifier) +{ + return vpif_probe_complete(); +} + +static const struct v4l2_async_notifier_operations vpif_async_ops = { + .bound = vpif_async_bound, + .complete = vpif_async_complete, +}; + +static struct vpif_capture_config * +vpif_capture_get_pdata(struct platform_device *pdev, + struct v4l2_device *v4l2_dev) +{ + struct device_node *endpoint = NULL; + struct device_node *rem = NULL; + struct vpif_capture_config *pdata; + struct vpif_subdev_info *sdinfo; + struct vpif_capture_chan_config *chan; + unsigned int i; + + v4l2_async_nf_init(&vpif_obj.notifier, v4l2_dev); + + /* + * DT boot: OF node from parent device contains + * video ports & endpoints data. + */ + if (pdev->dev.parent && pdev->dev.parent->of_node) + pdev->dev.of_node = pdev->dev.parent->of_node; + if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) + return pdev->dev.platform_data; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + pdata->subdev_info = + devm_kcalloc(&pdev->dev, + VPIF_CAPTURE_NUM_CHANNELS, + sizeof(*pdata->subdev_info), + GFP_KERNEL); + + if (!pdata->subdev_info) + return NULL; + + for (i = 0; i < VPIF_CAPTURE_NUM_CHANNELS; i++) { + struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 }; + unsigned int flags; + int err; + + endpoint = of_graph_get_next_endpoint(pdev->dev.of_node, + endpoint); + if (!endpoint) + break; + + rem = of_graph_get_remote_port_parent(endpoint); + if (!rem) { + dev_dbg(&pdev->dev, "Remote device at %pOF not found\n", + endpoint); + goto done; + } + + sdinfo = &pdata->subdev_info[i]; + chan = &pdata->chan_config[i]; + chan->inputs = devm_kcalloc(&pdev->dev, + VPIF_CAPTURE_NUM_CHANNELS, + sizeof(*chan->inputs), + GFP_KERNEL); + if (!chan->inputs) + goto err_cleanup; + + chan->input_count++; + chan->inputs[i].input.type = V4L2_INPUT_TYPE_CAMERA; + chan->inputs[i].input.std = V4L2_STD_ALL; + chan->inputs[i].input.capabilities = V4L2_IN_CAP_STD; + + err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint), + &bus_cfg); + if (err) { + dev_err(&pdev->dev, "Could not parse the endpoint\n"); + of_node_put(rem); + goto done; + } + + dev_dbg(&pdev->dev, "Endpoint %pOF, bus_width = %d\n", + endpoint, bus_cfg.bus.parallel.bus_width); + + flags = bus_cfg.bus.parallel.flags; + + if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) + chan->vpif_if.hd_pol = 1; + + if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) + chan->vpif_if.vd_pol = 1; + + dev_dbg(&pdev->dev, "Remote device %pOF found\n", rem); + sdinfo->name = rem->full_name; + + pdata->asd[i] = v4l2_async_nf_add_fwnode(&vpif_obj.notifier, + of_fwnode_handle(rem), + struct v4l2_async_connection); + if (IS_ERR(pdata->asd[i])) + goto err_cleanup; + + of_node_put(rem); + } + +done: + of_node_put(endpoint); + pdata->asd_sizes[0] = i; + pdata->subdev_count = i; + pdata->card_name = "DA850/OMAP-L138 Video Capture"; + + return pdata; + +err_cleanup: + of_node_put(rem); + of_node_put(endpoint); + v4l2_async_nf_cleanup(&vpif_obj.notifier); + + return NULL; +} + +/** + * vpif_probe : This function probes the vpif capture driver + * @pdev: platform device pointer + * + * This creates device entries by register itself to the V4L2 driver and + * initializes fields of each channel objects + */ +static __init int vpif_probe(struct platform_device *pdev) +{ + struct vpif_subdev_info *subdevdata; + struct i2c_adapter *i2c_adap; + int subdev_count; + int res_idx = 0; + int i, err; + + vpif_dev = &pdev->dev; + + err = initialize_vpif(); + if (err) { + v4l2_err(vpif_dev->driver, "Error initializing vpif\n"); + return err; + } + + err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev); + if (err) { + v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n"); + goto vpif_free; + } + + do { + int irq; + + err = platform_get_irq_optional(pdev, res_idx); + if (err < 0 && err != -ENXIO) + goto vpif_unregister; + if (err > 0) + irq = err; + else + break; + + err = devm_request_irq(&pdev->dev, irq, vpif_channel_isr, + IRQF_SHARED, VPIF_DRIVER_NAME, + (void *)(&vpif_obj.dev[res_idx]->channel_id)); + if (err) + goto vpif_unregister; + } while (++res_idx); + + pdev->dev.platform_data = + vpif_capture_get_pdata(pdev, &vpif_obj.v4l2_dev); + if (!pdev->dev.platform_data) { + err = -EINVAL; + dev_warn(&pdev->dev, "Missing platform data. Giving up.\n"); + goto vpif_unregister; + } + + vpif_obj.config = pdev->dev.platform_data; + + subdev_count = vpif_obj.config->subdev_count; + vpif_obj.sd = kcalloc(subdev_count, sizeof(*vpif_obj.sd), GFP_KERNEL); + if (!vpif_obj.sd) { + err = -ENOMEM; + goto probe_subdev_out; + } + + if (!vpif_obj.config->asd_sizes[0]) { + int i2c_id = vpif_obj.config->i2c_adapter_id; + + i2c_adap = i2c_get_adapter(i2c_id); + WARN_ON(!i2c_adap); + for (i = 0; i < subdev_count; i++) { + subdevdata = &vpif_obj.config->subdev_info[i]; + vpif_obj.sd[i] = + v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev, + i2c_adap, + &subdevdata-> + board_info, + NULL); + + if (!vpif_obj.sd[i]) { + vpif_err("Error registering v4l2 subdevice\n"); + err = -ENODEV; + goto probe_subdev_out; + } + v4l2_info(&vpif_obj.v4l2_dev, + "registered sub device %s\n", + subdevdata->name); + } + err = vpif_probe_complete(); + if (err) + goto probe_subdev_out; + } else { + vpif_obj.notifier.ops = &vpif_async_ops; + err = v4l2_async_nf_register(&vpif_obj.notifier); + if (err) { + vpif_err("Error registering async notifier\n"); + err = -EINVAL; + goto probe_subdev_out; + } + } + + return 0; + +probe_subdev_out: + v4l2_async_nf_cleanup(&vpif_obj.notifier); + /* free sub devices memory */ + kfree(vpif_obj.sd); +vpif_unregister: + v4l2_device_unregister(&vpif_obj.v4l2_dev); +vpif_free: + free_vpif_objs(); + + return err; +} + +/** + * vpif_remove() - driver remove handler + * @device: ptr to platform device structure + * + * The vidoe device is unregistered + */ +static void vpif_remove(struct platform_device *device) +{ + struct channel_obj *ch; + int i; + + v4l2_async_nf_unregister(&vpif_obj.notifier); + v4l2_async_nf_cleanup(&vpif_obj.notifier); + v4l2_device_unregister(&vpif_obj.v4l2_dev); + + kfree(vpif_obj.sd); + /* un-register device */ + for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { + /* Get the pointer to the channel object */ + ch = vpif_obj.dev[i]; + /* Unregister video device */ + video_unregister_device(&ch->video_dev); + kfree(vpif_obj.dev[i]); + } +} + +#ifdef CONFIG_PM_SLEEP +/** + * vpif_suspend: vpif device suspend + * @dev: pointer to &struct device + */ +static int vpif_suspend(struct device *dev) +{ + + struct common_obj *common; + struct channel_obj *ch; + int i; + + for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { + /* Get the pointer to the channel object */ + ch = vpif_obj.dev[i]; + common = &ch->common[VPIF_VIDEO_INDEX]; + + if (!vb2_start_streaming_called(&common->buffer_queue)) + continue; + + mutex_lock(&common->lock); + /* Disable channel */ + if (ch->channel_id == VPIF_CHANNEL0_VIDEO) { + enable_channel0(0); + channel0_intr_enable(0); + } + if (ch->channel_id == VPIF_CHANNEL1_VIDEO || + ycmux_mode == 2) { + enable_channel1(0); + channel1_intr_enable(0); + } + mutex_unlock(&common->lock); + } + + return 0; +} + +/* + * vpif_resume: vpif device suspend + */ +static int vpif_resume(struct device *dev) +{ + struct common_obj *common; + struct channel_obj *ch; + int i; + + for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { + /* Get the pointer to the channel object */ + ch = vpif_obj.dev[i]; + common = &ch->common[VPIF_VIDEO_INDEX]; + + if (!vb2_start_streaming_called(&common->buffer_queue)) + continue; + + mutex_lock(&common->lock); + /* Enable channel */ + if (ch->channel_id == VPIF_CHANNEL0_VIDEO) { + enable_channel0(1); + channel0_intr_enable(1); + } + if (ch->channel_id == VPIF_CHANNEL1_VIDEO || + ycmux_mode == 2) { + enable_channel1(1); + channel1_intr_enable(1); + } + mutex_unlock(&common->lock); + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(vpif_pm_ops, vpif_suspend, vpif_resume); + +static __refdata struct platform_driver vpif_driver = { + .driver = { + .name = VPIF_DRIVER_NAME, + .pm = &vpif_pm_ops, + }, + .probe = vpif_probe, + .remove_new = vpif_remove, +}; + +module_platform_driver(vpif_driver); diff --git a/drivers/media/platform/ti/davinci/vpif_capture.h b/drivers/media/platform/ti/davinci/vpif_capture.h new file mode 100644 index 0000000000..6191056500 --- /dev/null +++ b/drivers/media/platform/ti/davinci/vpif_capture.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2009 Texas Instruments Inc + */ + +#ifndef VPIF_CAPTURE_H +#define VPIF_CAPTURE_H + +/* Header files */ +#include +#include + +#include "vpif.h" + +/* Macros */ +#define VPIF_CAPTURE_VERSION "0.0.2" + +#define VPIF_VALID_FIELD(field) (((V4L2_FIELD_ANY == field) || \ + (V4L2_FIELD_NONE == field)) || \ + (((V4L2_FIELD_INTERLACED == field) || \ + (V4L2_FIELD_SEQ_TB == field)) || \ + (V4L2_FIELD_SEQ_BT == field))) + +#define VPIF_CAPTURE_MAX_DEVICES 2 +#define VPIF_VIDEO_INDEX 0 +#define VPIF_NUMBER_OF_OBJECTS 1 + +/* Enumerated data type to give id to each device per channel */ +enum vpif_channel_id { + VPIF_CHANNEL0_VIDEO = 0, + VPIF_CHANNEL1_VIDEO, +}; + +struct video_obj { + enum v4l2_field buf_field; + /* Currently selected or default standard */ + v4l2_std_id stdid; + struct v4l2_dv_timings dv_timings; +}; + +struct vpif_cap_buffer { + struct vb2_v4l2_buffer vb; + struct list_head list; +}; + +struct common_obj { + /* Pointer pointing to current v4l2_buffer */ + struct vpif_cap_buffer *cur_frm; + /* Pointer pointing to current v4l2_buffer */ + struct vpif_cap_buffer *next_frm; + /* Used to store pixel format */ + struct v4l2_format fmt; + /* Buffer queue used in vb2 */ + struct vb2_queue buffer_queue; + /* Queue of filled frames */ + struct list_head dma_queue; + /* Protects the dma_queue field */ + spinlock_t irqlock; + /* lock used to access this structure */ + struct mutex lock; + /* Function pointer to set the addresses */ + void (*set_addr) (unsigned long, unsigned long, unsigned long, + unsigned long); + /* offset where Y top starts from the starting of the buffer */ + u32 ytop_off; + /* offset where Y bottom starts from the starting of the buffer */ + u32 ybtm_off; + /* offset where C top starts from the starting of the buffer */ + u32 ctop_off; + /* offset where C bottom starts from the starting of the buffer */ + u32 cbtm_off; + /* Indicates width of the image data */ + u32 width; + /* Indicates height of the image data */ + u32 height; +}; + +struct channel_obj { + /* Identifies video device for this channel */ + struct video_device video_dev; + /* Indicates id of the field which is being displayed */ + u32 field_id; + /* flag to indicate whether decoder is initialized */ + u8 initialized; + /* Identifies channel */ + enum vpif_channel_id channel_id; + /* Current input */ + u32 input_idx; + /* subdev corresponding to the current input, may be NULL */ + struct v4l2_subdev *sd; + /* vpif configuration params */ + struct vpif_params vpifparams; + /* common object array */ + struct common_obj common[VPIF_NUMBER_OF_OBJECTS]; + /* video object */ + struct video_obj video; +}; + +struct vpif_device { + struct v4l2_device v4l2_dev; + struct channel_obj *dev[VPIF_CAPTURE_NUM_CHANNELS]; + struct v4l2_subdev **sd; + struct v4l2_async_notifier notifier; + struct vpif_capture_config *config; +}; + +#endif /* VPIF_CAPTURE_H */ diff --git a/drivers/media/platform/ti/davinci/vpif_display.c b/drivers/media/platform/ti/davinci/vpif_display.c new file mode 100644 index 0000000000..f8ec2991c6 --- /dev/null +++ b/drivers/media/platform/ti/davinci/vpif_display.c @@ -0,0 +1,1403 @@ +/* + * vpif-display - VPIF display driver + * Display driver for TI DaVinci VPIF + * + * Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2014 Lad, Prabhakar + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed .as is. WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#include + +#include "vpif.h" +#include "vpif_display.h" + +MODULE_DESCRIPTION("TI DaVinci VPIF Display driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(VPIF_DISPLAY_VERSION); + +#define VPIF_V4L2_STD (V4L2_STD_525_60 | V4L2_STD_625_50) + +#define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg) +#define vpif_dbg(level, debug, fmt, arg...) \ + v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg) + +static int debug = 1; + +module_param(debug, int, 0644); + +MODULE_PARM_DESC(debug, "Debug level 0-1"); + +#define VPIF_DRIVER_NAME "vpif_display" +MODULE_ALIAS("platform:" VPIF_DRIVER_NAME); + +/* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */ +static int ycmux_mode; + +static u8 channel_first_int[VPIF_NUMOBJECTS][2] = { {1, 1} }; + +static struct vpif_device vpif_obj = { {NULL} }; +static struct device *vpif_dev; +static void vpif_calculate_offsets(struct channel_obj *ch); +static void vpif_config_addr(struct channel_obj *ch, int muxmode); + +static inline +struct vpif_disp_buffer *to_vpif_buffer(struct vb2_v4l2_buffer *vb) +{ + return container_of(vb, struct vpif_disp_buffer, vb); +} + +/** + * vpif_buffer_prepare : callback function for buffer prepare + * @vb: ptr to vb2_buffer + * + * This is the callback function for buffer prepare when vb2_qbuf() + * function is called. The buffer is prepared and user space virtual address + * or user address is converted into physical address + */ +static int vpif_buffer_prepare(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue); + struct common_obj *common; + + common = &ch->common[VPIF_VIDEO_INDEX]; + + vb2_set_plane_payload(vb, 0, common->fmt.fmt.pix.sizeimage); + if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) + return -EINVAL; + + vbuf->field = common->fmt.fmt.pix.field; + + if (vb->vb2_queue->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) { + unsigned long addr = vb2_dma_contig_plane_dma_addr(vb, 0); + + if (!ISALIGNED(addr + common->ytop_off) || + !ISALIGNED(addr + common->ybtm_off) || + !ISALIGNED(addr + common->ctop_off) || + !ISALIGNED(addr + common->cbtm_off)) { + vpif_err("buffer offset not aligned to 8 bytes\n"); + return -EINVAL; + } + } + + return 0; +} + +/** + * vpif_buffer_queue_setup : Callback function for buffer setup. + * @vq: vb2_queue ptr + * @nbuffers: ptr to number of buffers requested by application + * @nplanes: contains number of distinct video planes needed to hold a frame + * @sizes: contains the size (in bytes) of each plane. + * @alloc_devs: ptr to allocation context + * + * This callback function is called when reqbuf() is called to adjust + * the buffer count and buffer size + */ +static int vpif_buffer_queue_setup(struct vb2_queue *vq, + unsigned int *nbuffers, unsigned int *nplanes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct channel_obj *ch = vb2_get_drv_priv(vq); + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + unsigned size = common->fmt.fmt.pix.sizeimage; + + if (*nplanes) { + if (sizes[0] < size) + return -EINVAL; + size = sizes[0]; + } + + if (vq->num_buffers + *nbuffers < 3) + *nbuffers = 3 - vq->num_buffers; + + *nplanes = 1; + sizes[0] = size; + + /* Calculate the offset for Y and C data in the buffer */ + vpif_calculate_offsets(ch); + + return 0; +} + +/** + * vpif_buffer_queue : Callback function to add buffer to DMA queue + * @vb: ptr to vb2_buffer + * + * This callback function queues the buffer to DMA engine + */ +static void vpif_buffer_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct vpif_disp_buffer *buf = to_vpif_buffer(vbuf); + struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue); + struct common_obj *common; + unsigned long flags; + + common = &ch->common[VPIF_VIDEO_INDEX]; + + /* add the buffer to the DMA queue */ + spin_lock_irqsave(&common->irqlock, flags); + list_add_tail(&buf->list, &common->dma_queue); + spin_unlock_irqrestore(&common->irqlock, flags); +} + +/** + * vpif_start_streaming : Starts the DMA engine for streaming + * @vq: ptr to vb2_buffer + * @count: number of buffers + */ +static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count) +{ + struct vpif_display_config *vpif_config_data = + vpif_dev->platform_data; + struct channel_obj *ch = vb2_get_drv_priv(vq); + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + struct vpif_params *vpif = &ch->vpifparams; + struct vpif_disp_buffer *buf, *tmp; + unsigned long addr, flags; + int ret; + + spin_lock_irqsave(&common->irqlock, flags); + + /* Initialize field_id */ + ch->field_id = 0; + + /* clock settings */ + if (vpif_config_data->set_clock) { + ret = vpif_config_data->set_clock(ch->vpifparams.std_info. + ycmux_mode, ch->vpifparams.std_info.hd_sd); + if (ret < 0) { + vpif_err("can't set clock\n"); + goto err; + } + } + + /* set the parameters and addresses */ + ret = vpif_set_video_params(vpif, ch->channel_id + 2); + if (ret < 0) + goto err; + + ycmux_mode = ret; + vpif_config_addr(ch, ret); + /* Get the next frame from the buffer queue */ + common->next_frm = common->cur_frm = + list_entry(common->dma_queue.next, + struct vpif_disp_buffer, list); + + list_del(&common->cur_frm->list); + spin_unlock_irqrestore(&common->irqlock, flags); + + addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb.vb2_buf, 0); + common->set_addr((addr + common->ytop_off), + (addr + common->ybtm_off), + (addr + common->ctop_off), + (addr + common->cbtm_off)); + + /* + * Set interrupt for both the fields in VPIF + * Register enable channel in VPIF register + */ + channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1; + if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { + channel2_intr_assert(); + channel2_intr_enable(1); + enable_channel2(1); + if (vpif_config_data->chan_config[VPIF_CHANNEL2_VIDEO].clip_en) + channel2_clipping_enable(1); + } + + if (VPIF_CHANNEL3_VIDEO == ch->channel_id || ycmux_mode == 2) { + channel3_intr_assert(); + channel3_intr_enable(1); + enable_channel3(1); + if (vpif_config_data->chan_config[VPIF_CHANNEL3_VIDEO].clip_en) + channel3_clipping_enable(1); + } + + return 0; + +err: + list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) { + list_del(&buf->list); + vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); + } + spin_unlock_irqrestore(&common->irqlock, flags); + + return ret; +} + +/** + * vpif_stop_streaming : Stop the DMA engine + * @vq: ptr to vb2_queue + * + * This callback stops the DMA engine and any remaining buffers + * in the DMA queue are released. + */ +static void vpif_stop_streaming(struct vb2_queue *vq) +{ + struct channel_obj *ch = vb2_get_drv_priv(vq); + struct common_obj *common; + unsigned long flags; + + common = &ch->common[VPIF_VIDEO_INDEX]; + + /* Disable channel */ + if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { + enable_channel2(0); + channel2_intr_enable(0); + } + if (VPIF_CHANNEL3_VIDEO == ch->channel_id || ycmux_mode == 2) { + enable_channel3(0); + channel3_intr_enable(0); + } + + /* release all active buffers */ + spin_lock_irqsave(&common->irqlock, flags); + if (common->cur_frm == common->next_frm) { + vb2_buffer_done(&common->cur_frm->vb.vb2_buf, + VB2_BUF_STATE_ERROR); + } else { + if (common->cur_frm) + vb2_buffer_done(&common->cur_frm->vb.vb2_buf, + VB2_BUF_STATE_ERROR); + if (common->next_frm) + vb2_buffer_done(&common->next_frm->vb.vb2_buf, + VB2_BUF_STATE_ERROR); + } + + while (!list_empty(&common->dma_queue)) { + common->next_frm = list_entry(common->dma_queue.next, + struct vpif_disp_buffer, list); + list_del(&common->next_frm->list); + vb2_buffer_done(&common->next_frm->vb.vb2_buf, + VB2_BUF_STATE_ERROR); + } + spin_unlock_irqrestore(&common->irqlock, flags); +} + +static const struct vb2_ops video_qops = { + .queue_setup = vpif_buffer_queue_setup, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, + .buf_prepare = vpif_buffer_prepare, + .start_streaming = vpif_start_streaming, + .stop_streaming = vpif_stop_streaming, + .buf_queue = vpif_buffer_queue, +}; + +static void process_progressive_mode(struct common_obj *common) +{ + unsigned long addr; + + spin_lock(&common->irqlock); + /* Get the next buffer from buffer queue */ + common->next_frm = list_entry(common->dma_queue.next, + struct vpif_disp_buffer, list); + /* Remove that buffer from the buffer queue */ + list_del(&common->next_frm->list); + spin_unlock(&common->irqlock); + + /* Set top and bottom field addrs in VPIF registers */ + addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb.vb2_buf, 0); + common->set_addr(addr + common->ytop_off, + addr + common->ybtm_off, + addr + common->ctop_off, + addr + common->cbtm_off); +} + +static void process_interlaced_mode(int fid, struct common_obj *common) +{ + /* device field id and local field id are in sync */ + /* If this is even field */ + if (0 == fid) { + if (common->cur_frm == common->next_frm) + return; + + /* one frame is displayed If next frame is + * available, release cur_frm and move on */ + /* Copy frame display time */ + common->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns(); + /* Change status of the cur_frm */ + vb2_buffer_done(&common->cur_frm->vb.vb2_buf, + VB2_BUF_STATE_DONE); + /* Make cur_frm pointing to next_frm */ + common->cur_frm = common->next_frm; + + } else if (1 == fid) { /* odd field */ + spin_lock(&common->irqlock); + if (list_empty(&common->dma_queue) + || (common->cur_frm != common->next_frm)) { + spin_unlock(&common->irqlock); + return; + } + spin_unlock(&common->irqlock); + /* one field is displayed configure the next + * frame if it is available else hold on current + * frame */ + /* Get next from the buffer queue */ + process_progressive_mode(common); + } +} + +/* + * vpif_channel_isr: It changes status of the displayed buffer, takes next + * buffer from the queue and sets its address in VPIF registers + */ +static irqreturn_t vpif_channel_isr(int irq, void *dev_id) +{ + struct vpif_device *dev = &vpif_obj; + struct channel_obj *ch; + struct common_obj *common; + int fid = -1, i; + int channel_id; + + channel_id = *(int *)(dev_id); + if (!vpif_intr_status(channel_id + 2)) + return IRQ_NONE; + + ch = dev->dev[channel_id]; + for (i = 0; i < VPIF_NUMOBJECTS; i++) { + common = &ch->common[i]; + /* If streaming is started in this channel */ + + if (1 == ch->vpifparams.std_info.frm_fmt) { + spin_lock(&common->irqlock); + if (list_empty(&common->dma_queue)) { + spin_unlock(&common->irqlock); + continue; + } + spin_unlock(&common->irqlock); + + /* Progressive mode */ + if (!channel_first_int[i][channel_id]) { + /* Mark status of the cur_frm to + * done and unlock semaphore on it */ + common->cur_frm->vb.vb2_buf.timestamp = + ktime_get_ns(); + vb2_buffer_done(&common->cur_frm->vb.vb2_buf, + VB2_BUF_STATE_DONE); + /* Make cur_frm pointing to next_frm */ + common->cur_frm = common->next_frm; + } + + channel_first_int[i][channel_id] = 0; + process_progressive_mode(common); + } else { + /* Interlaced mode */ + /* If it is first interrupt, ignore it */ + + if (channel_first_int[i][channel_id]) { + channel_first_int[i][channel_id] = 0; + continue; + } + + if (0 == i) { + ch->field_id ^= 1; + /* Get field id from VPIF registers */ + fid = vpif_channel_getfid(ch->channel_id + 2); + /* If fid does not match with stored field id */ + if (fid != ch->field_id) { + /* Make them in sync */ + if (0 == fid) + ch->field_id = fid; + + return IRQ_HANDLED; + } + } + process_interlaced_mode(fid, common); + } + } + + return IRQ_HANDLED; +} + +static int vpif_update_std_info(struct channel_obj *ch) +{ + struct video_obj *vid_ch = &ch->video; + struct vpif_params *vpifparams = &ch->vpifparams; + struct vpif_channel_config_params *std_info = &vpifparams->std_info; + const struct vpif_channel_config_params *config; + + int i; + + for (i = 0; i < vpif_ch_params_count; i++) { + config = &vpif_ch_params[i]; + if (config->hd_sd == 0) { + vpif_dbg(2, debug, "SD format\n"); + if (config->stdid & vid_ch->stdid) { + memcpy(std_info, config, sizeof(*config)); + break; + } + } + } + + if (i == vpif_ch_params_count) { + vpif_dbg(1, debug, "Format not found\n"); + return -EINVAL; + } + + return 0; +} + +static int vpif_update_resolution(struct channel_obj *ch) +{ + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + struct video_obj *vid_ch = &ch->video; + struct vpif_params *vpifparams = &ch->vpifparams; + struct vpif_channel_config_params *std_info = &vpifparams->std_info; + + if (!vid_ch->stdid && !vid_ch->dv_timings.bt.height) + return -EINVAL; + + if (vid_ch->stdid) { + if (vpif_update_std_info(ch)) + return -EINVAL; + } + + common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P; + common->fmt.fmt.pix.width = std_info->width; + common->fmt.fmt.pix.height = std_info->height; + vpif_dbg(1, debug, "Pixel details: Width = %d,Height = %d\n", + common->fmt.fmt.pix.width, common->fmt.fmt.pix.height); + + /* Set height and width paramateres */ + common->height = std_info->height; + common->width = std_info->width; + common->fmt.fmt.pix.sizeimage = common->height * common->width * 2; + + if (vid_ch->stdid) + common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; + else + common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_REC709; + + if (ch->vpifparams.std_info.frm_fmt) + common->fmt.fmt.pix.field = V4L2_FIELD_NONE; + else + common->fmt.fmt.pix.field = V4L2_FIELD_INTERLACED; + + return 0; +} + +/* + * vpif_calculate_offsets: This function calculates buffers offset for Y and C + * in the top and bottom field + */ +static void vpif_calculate_offsets(struct channel_obj *ch) +{ + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + struct vpif_params *vpifparams = &ch->vpifparams; + enum v4l2_field field = common->fmt.fmt.pix.field; + struct video_obj *vid_ch = &ch->video; + unsigned int hpitch, sizeimage; + + if (V4L2_FIELD_ANY == common->fmt.fmt.pix.field) { + if (ch->vpifparams.std_info.frm_fmt) + vid_ch->buf_field = V4L2_FIELD_NONE; + else + vid_ch->buf_field = V4L2_FIELD_INTERLACED; + } else { + vid_ch->buf_field = common->fmt.fmt.pix.field; + } + + sizeimage = common->fmt.fmt.pix.sizeimage; + + hpitch = common->fmt.fmt.pix.bytesperline; + if ((V4L2_FIELD_NONE == vid_ch->buf_field) || + (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) { + common->ytop_off = 0; + common->ybtm_off = hpitch; + common->ctop_off = sizeimage / 2; + common->cbtm_off = sizeimage / 2 + hpitch; + } else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) { + common->ytop_off = 0; + common->ybtm_off = sizeimage / 4; + common->ctop_off = sizeimage / 2; + common->cbtm_off = common->ctop_off + sizeimage / 4; + } else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) { + common->ybtm_off = 0; + common->ytop_off = sizeimage / 4; + common->cbtm_off = sizeimage / 2; + common->ctop_off = common->cbtm_off + sizeimage / 4; + } + + if ((V4L2_FIELD_NONE == vid_ch->buf_field) || + (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) { + vpifparams->video_params.storage_mode = 1; + } else { + vpifparams->video_params.storage_mode = 0; + } + + if (ch->vpifparams.std_info.frm_fmt == 1) { + vpifparams->video_params.hpitch = + common->fmt.fmt.pix.bytesperline; + } else { + if ((field == V4L2_FIELD_ANY) || + (field == V4L2_FIELD_INTERLACED)) + vpifparams->video_params.hpitch = + common->fmt.fmt.pix.bytesperline * 2; + else + vpifparams->video_params.hpitch = + common->fmt.fmt.pix.bytesperline; + } + + ch->vpifparams.video_params.stdid = ch->vpifparams.std_info.stdid; +} + +static void vpif_config_addr(struct channel_obj *ch, int muxmode) +{ + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + + if (VPIF_CHANNEL3_VIDEO == ch->channel_id) { + common->set_addr = ch3_set_video_buf_addr; + } else { + if (2 == muxmode) + common->set_addr = ch2_set_video_buf_addr_yc_nmux; + else + common->set_addr = ch2_set_video_buf_addr; + } +} + +/* functions implementing ioctls */ +/** + * vpif_querycap() - QUERYCAP handler + * @file: file ptr + * @priv: file handle + * @cap: ptr to v4l2_capability structure + */ +static int vpif_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) +{ + struct vpif_display_config *config = vpif_dev->platform_data; + + strscpy(cap->driver, VPIF_DRIVER_NAME, sizeof(cap->driver)); + strscpy(cap->card, config->card_name, sizeof(cap->card)); + + return 0; +} + +static int vpif_enum_fmt_vid_out(struct file *file, void *priv, + struct v4l2_fmtdesc *fmt) +{ + if (fmt->index != 0) + return -EINVAL; + + /* Fill in the information about format */ + fmt->pixelformat = V4L2_PIX_FMT_YUV422P; + return 0; +} + +static int vpif_g_fmt_vid_out(struct file *file, void *priv, + struct v4l2_format *fmt) +{ + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + + /* Check the validity of the buffer type */ + if (common->fmt.type != fmt->type) + return -EINVAL; + + if (vpif_update_resolution(ch)) + return -EINVAL; + *fmt = common->fmt; + return 0; +} + +static int vpif_try_fmt_vid_out(struct file *file, void *priv, + struct v4l2_format *fmt) +{ + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; + + /* + * to suppress v4l-compliance warnings silently correct + * the pixelformat + */ + if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P) + pixfmt->pixelformat = common->fmt.fmt.pix.pixelformat; + + if (vpif_update_resolution(ch)) + return -EINVAL; + + pixfmt->colorspace = common->fmt.fmt.pix.colorspace; + pixfmt->field = common->fmt.fmt.pix.field; + pixfmt->bytesperline = common->fmt.fmt.pix.width; + pixfmt->width = common->fmt.fmt.pix.width; + pixfmt->height = common->fmt.fmt.pix.height; + pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height * 2; + + return 0; +} + +static int vpif_s_fmt_vid_out(struct file *file, void *priv, + struct v4l2_format *fmt) +{ + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; + int ret; + + if (vb2_is_busy(&common->buffer_queue)) + return -EBUSY; + + ret = vpif_try_fmt_vid_out(file, priv, fmt); + if (ret) + return ret; + + /* store the pix format in the channel object */ + common->fmt.fmt.pix = *pixfmt; + + /* store the format in the channel object */ + common->fmt = *fmt; + return 0; +} + +static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id) +{ + struct vpif_display_config *config = vpif_dev->platform_data; + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + struct vpif_display_chan_config *chan_cfg; + struct v4l2_output output; + int ret; + + if (!config->chan_config[ch->channel_id].outputs) + return -ENODATA; + + chan_cfg = &config->chan_config[ch->channel_id]; + output = chan_cfg->outputs[ch->output_idx].output; + if (output.capabilities != V4L2_OUT_CAP_STD) + return -ENODATA; + + if (vb2_is_busy(&common->buffer_queue)) + return -EBUSY; + + + if (!(std_id & VPIF_V4L2_STD)) + return -EINVAL; + + /* Call encoder subdevice function to set the standard */ + ch->video.stdid = std_id; + memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); + /* Get the information about the standard */ + if (vpif_update_resolution(ch)) + return -EINVAL; + + common->fmt.fmt.pix.bytesperline = common->fmt.fmt.pix.width; + + ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video, + s_std_output, std_id); + if (ret < 0) { + vpif_err("Failed to set output standard\n"); + return ret; + } + + ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video, + s_std, std_id); + if (ret < 0) + vpif_err("Failed to set standard for sub devices\n"); + return ret; +} + +static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std) +{ + struct vpif_display_config *config = vpif_dev->platform_data; + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct vpif_display_chan_config *chan_cfg; + struct v4l2_output output; + + if (!config->chan_config[ch->channel_id].outputs) + return -ENODATA; + + chan_cfg = &config->chan_config[ch->channel_id]; + output = chan_cfg->outputs[ch->output_idx].output; + if (output.capabilities != V4L2_OUT_CAP_STD) + return -ENODATA; + + *std = ch->video.stdid; + return 0; +} + +static int vpif_enum_output(struct file *file, void *fh, + struct v4l2_output *output) +{ + + struct vpif_display_config *config = vpif_dev->platform_data; + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct vpif_display_chan_config *chan_cfg; + + chan_cfg = &config->chan_config[ch->channel_id]; + if (output->index >= chan_cfg->output_count) { + vpif_dbg(1, debug, "Invalid output index\n"); + return -EINVAL; + } + + *output = chan_cfg->outputs[output->index].output; + return 0; +} + +/** + * vpif_output_to_subdev() - Maps output to sub device + * @vpif_cfg: global config ptr + * @chan_cfg: channel config ptr + * @index: Given output index from application + * + * lookup the sub device information for a given output index. + * we report all the output to application. output table also + * has sub device name for the each output + */ +static int +vpif_output_to_subdev(struct vpif_display_config *vpif_cfg, + struct vpif_display_chan_config *chan_cfg, int index) +{ + struct vpif_subdev_info *subdev_info; + const char *subdev_name; + int i; + + vpif_dbg(2, debug, "vpif_output_to_subdev\n"); + + if (!chan_cfg->outputs) + return -1; + + subdev_name = chan_cfg->outputs[index].subdev_name; + if (!subdev_name) + return -1; + + /* loop through the sub device list to get the sub device info */ + for (i = 0; i < vpif_cfg->subdev_count; i++) { + subdev_info = &vpif_cfg->subdevinfo[i]; + if (!strcmp(subdev_info->name, subdev_name)) + return i; + } + return -1; +} + +/** + * vpif_set_output() - Select an output + * @vpif_cfg: global config ptr + * @ch: channel + * @index: Given output index from application + * + * Select the given output. + */ +static int vpif_set_output(struct vpif_display_config *vpif_cfg, + struct channel_obj *ch, int index) +{ + struct vpif_display_chan_config *chan_cfg = + &vpif_cfg->chan_config[ch->channel_id]; + struct v4l2_subdev *sd = NULL; + u32 input = 0, output = 0; + int sd_index; + int ret; + + sd_index = vpif_output_to_subdev(vpif_cfg, chan_cfg, index); + if (sd_index >= 0) + sd = vpif_obj.sd[sd_index]; + + if (sd) { + input = chan_cfg->outputs[index].input_route; + output = chan_cfg->outputs[index].output_route; + ret = v4l2_subdev_call(sd, video, s_routing, input, output, 0); + if (ret < 0 && ret != -ENOIOCTLCMD) { + vpif_err("Failed to set output\n"); + return ret; + } + + } + ch->output_idx = index; + ch->sd = sd; + if (chan_cfg->outputs) + /* update tvnorms from the sub device output info */ + ch->video_dev.tvnorms = chan_cfg->outputs[index].output.std; + return 0; +} + +static int vpif_s_output(struct file *file, void *priv, unsigned int i) +{ + struct vpif_display_config *config = vpif_dev->platform_data; + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct vpif_display_chan_config *chan_cfg; + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + + if (vb2_is_busy(&common->buffer_queue)) + return -EBUSY; + + chan_cfg = &config->chan_config[ch->channel_id]; + + if (i >= chan_cfg->output_count) + return -EINVAL; + + return vpif_set_output(config, ch, i); +} + +static int vpif_g_output(struct file *file, void *priv, unsigned int *i) +{ + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + + *i = ch->output_idx; + + return 0; +} + +/** + * vpif_enum_dv_timings() - ENUM_DV_TIMINGS handler + * @file: file ptr + * @priv: file handle + * @timings: input timings + */ +static int +vpif_enum_dv_timings(struct file *file, void *priv, + struct v4l2_enum_dv_timings *timings) +{ + struct vpif_display_config *config = vpif_dev->platform_data; + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct vpif_display_chan_config *chan_cfg; + struct v4l2_output output; + int ret; + + if (!config->chan_config[ch->channel_id].outputs) + return -ENODATA; + + chan_cfg = &config->chan_config[ch->channel_id]; + output = chan_cfg->outputs[ch->output_idx].output; + if (output.capabilities != V4L2_OUT_CAP_DV_TIMINGS) + return -ENODATA; + + timings->pad = 0; + + ret = v4l2_subdev_call(ch->sd, pad, enum_dv_timings, timings); + if (ret == -ENOIOCTLCMD || ret == -ENODEV) + return -EINVAL; + return ret; +} + +/** + * vpif_s_dv_timings() - S_DV_TIMINGS handler + * @file: file ptr + * @priv: file handle + * @timings: digital video timings + */ +static int vpif_s_dv_timings(struct file *file, void *priv, + struct v4l2_dv_timings *timings) +{ + struct vpif_display_config *config = vpif_dev->platform_data; + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct vpif_params *vpifparams = &ch->vpifparams; + struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; + struct vpif_channel_config_params *std_info = &vpifparams->std_info; + struct video_obj *vid_ch = &ch->video; + struct v4l2_bt_timings *bt = &vid_ch->dv_timings.bt; + struct vpif_display_chan_config *chan_cfg; + struct v4l2_output output; + int ret; + + if (!config->chan_config[ch->channel_id].outputs) + return -ENODATA; + + chan_cfg = &config->chan_config[ch->channel_id]; + output = chan_cfg->outputs[ch->output_idx].output; + if (output.capabilities != V4L2_OUT_CAP_DV_TIMINGS) + return -ENODATA; + + if (vb2_is_busy(&common->buffer_queue)) + return -EBUSY; + + if (timings->type != V4L2_DV_BT_656_1120) { + vpif_dbg(2, debug, "Timing type not defined\n"); + return -EINVAL; + } + + /* Configure subdevice timings, if any */ + ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings); + if (ret == -ENOIOCTLCMD || ret == -ENODEV) + ret = 0; + if (ret < 0) { + vpif_dbg(2, debug, "Error setting custom DV timings\n"); + return ret; + } + + if (!(timings->bt.width && timings->bt.height && + (timings->bt.hbackporch || + timings->bt.hfrontporch || + timings->bt.hsync) && + timings->bt.vfrontporch && + (timings->bt.vbackporch || + timings->bt.vsync))) { + vpif_dbg(2, debug, "Timings for width, height, horizontal back porch, horizontal sync, horizontal front porch, vertical back porch, vertical sync and vertical back porch must be defined\n"); + return -EINVAL; + } + + vid_ch->dv_timings = *timings; + + /* Configure video port timings */ + + std_info->eav2sav = V4L2_DV_BT_BLANKING_WIDTH(bt) - 8; + std_info->sav2eav = bt->width; + + std_info->l1 = 1; + std_info->l3 = bt->vsync + bt->vbackporch + 1; + + std_info->vsize = V4L2_DV_BT_FRAME_HEIGHT(bt); + if (bt->interlaced) { + if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) { + std_info->l5 = std_info->vsize/2 - + (bt->vfrontporch - 1); + std_info->l7 = std_info->vsize/2 + 1; + std_info->l9 = std_info->l7 + bt->il_vsync + + bt->il_vbackporch + 1; + std_info->l11 = std_info->vsize - + (bt->il_vfrontporch - 1); + } else { + vpif_dbg(2, debug, "Required timing values for interlaced BT format missing\n"); + return -EINVAL; + } + } else { + std_info->l5 = std_info->vsize - (bt->vfrontporch - 1); + } + strscpy(std_info->name, "Custom timings BT656/1120", + sizeof(std_info->name)); + std_info->width = bt->width; + std_info->height = bt->height; + std_info->frm_fmt = bt->interlaced ? 0 : 1; + std_info->ycmux_mode = 0; + std_info->capture_format = 0; + std_info->vbi_supported = 0; + std_info->hd_sd = 1; + std_info->stdid = 0; + vid_ch->stdid = 0; + + return 0; +} + +/** + * vpif_g_dv_timings() - G_DV_TIMINGS handler + * @file: file ptr + * @priv: file handle + * @timings: digital video timings + */ +static int vpif_g_dv_timings(struct file *file, void *priv, + struct v4l2_dv_timings *timings) +{ + struct vpif_display_config *config = vpif_dev->platform_data; + struct video_device *vdev = video_devdata(file); + struct channel_obj *ch = video_get_drvdata(vdev); + struct vpif_display_chan_config *chan_cfg; + struct video_obj *vid_ch = &ch->video; + struct v4l2_output output; + + if (!config->chan_config[ch->channel_id].outputs) + goto error; + + chan_cfg = &config->chan_config[ch->channel_id]; + output = chan_cfg->outputs[ch->output_idx].output; + + if (output.capabilities != V4L2_OUT_CAP_DV_TIMINGS) + goto error; + + *timings = vid_ch->dv_timings; + + return 0; +error: + return -ENODATA; +} + +/* + * vpif_log_status() - Status information + * @file: file ptr + * @priv: file handle + * + * Returns zero. + */ +static int vpif_log_status(struct file *filep, void *priv) +{ + /* status for sub devices */ + v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status); + + return 0; +} + +/* vpif display ioctl operations */ +static const struct v4l2_ioctl_ops vpif_ioctl_ops = { + .vidioc_querycap = vpif_querycap, + .vidioc_enum_fmt_vid_out = vpif_enum_fmt_vid_out, + .vidioc_g_fmt_vid_out = vpif_g_fmt_vid_out, + .vidioc_s_fmt_vid_out = vpif_s_fmt_vid_out, + .vidioc_try_fmt_vid_out = vpif_try_fmt_vid_out, + + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + + .vidioc_s_std = vpif_s_std, + .vidioc_g_std = vpif_g_std, + + .vidioc_enum_output = vpif_enum_output, + .vidioc_s_output = vpif_s_output, + .vidioc_g_output = vpif_g_output, + + .vidioc_enum_dv_timings = vpif_enum_dv_timings, + .vidioc_s_dv_timings = vpif_s_dv_timings, + .vidioc_g_dv_timings = vpif_g_dv_timings, + + .vidioc_log_status = vpif_log_status, +}; + +static const struct v4l2_file_operations vpif_fops = { + .owner = THIS_MODULE, + .open = v4l2_fh_open, + .release = vb2_fop_release, + .unlocked_ioctl = video_ioctl2, + .mmap = vb2_fop_mmap, + .poll = vb2_fop_poll +}; + +/*Configure the channels, buffer sizei, request irq */ +static int initialize_vpif(void) +{ + int free_channel_objects_index; + int err, i, j; + + /* Allocate memory for six channel objects */ + for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) { + vpif_obj.dev[i] = + kzalloc(sizeof(struct channel_obj), GFP_KERNEL); + /* If memory allocation fails, return error */ + if (!vpif_obj.dev[i]) { + free_channel_objects_index = i; + err = -ENOMEM; + goto vpif_init_free_channel_objects; + } + } + + return 0; + +vpif_init_free_channel_objects: + for (j = 0; j < free_channel_objects_index; j++) + kfree(vpif_obj.dev[j]); + return err; +} + +static void free_vpif_objs(void) +{ + int i; + + for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) + kfree(vpif_obj.dev[i]); +} + +static int vpif_probe_complete(void) +{ + struct common_obj *common; + struct video_device *vdev; + struct channel_obj *ch; + struct vb2_queue *q; + int j, err, k; + + for (j = 0; j < VPIF_DISPLAY_MAX_DEVICES; j++) { + ch = vpif_obj.dev[j]; + /* Initialize field of the channel objects */ + for (k = 0; k < VPIF_NUMOBJECTS; k++) { + common = &ch->common[k]; + spin_lock_init(&common->irqlock); + mutex_init(&common->lock); + common->set_addr = NULL; + common->ytop_off = 0; + common->ybtm_off = 0; + common->ctop_off = 0; + common->cbtm_off = 0; + common->cur_frm = NULL; + common->next_frm = NULL; + memset(&common->fmt, 0, sizeof(common->fmt)); + } + ch->initialized = 0; + if (vpif_obj.config->subdev_count) + ch->sd = vpif_obj.sd[0]; + ch->channel_id = j; + + memset(&ch->vpifparams, 0, sizeof(ch->vpifparams)); + + ch->common[VPIF_VIDEO_INDEX].fmt.type = + V4L2_BUF_TYPE_VIDEO_OUTPUT; + + /* select output 0 */ + err = vpif_set_output(vpif_obj.config, ch, 0); + if (err) + goto probe_out; + + /* set initial format */ + ch->video.stdid = V4L2_STD_525_60; + memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); + vpif_update_resolution(ch); + + /* Initialize vb2 queue */ + q = &common->buffer_queue; + q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; + q->drv_priv = ch; + q->ops = &video_qops; + q->mem_ops = &vb2_dma_contig_memops; + q->buf_struct_size = sizeof(struct vpif_disp_buffer); + q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + q->min_buffers_needed = 1; + q->lock = &common->lock; + q->dev = vpif_dev; + err = vb2_queue_init(q); + if (err) { + vpif_err("vpif_display: vb2_queue_init() failed\n"); + goto probe_out; + } + + INIT_LIST_HEAD(&common->dma_queue); + + /* register video device */ + vpif_dbg(1, debug, "channel=%p,channel->video_dev=%p\n", + ch, &ch->video_dev); + + /* Initialize the video_device structure */ + vdev = &ch->video_dev; + strscpy(vdev->name, VPIF_DRIVER_NAME, sizeof(vdev->name)); + vdev->release = video_device_release_empty; + vdev->fops = &vpif_fops; + vdev->ioctl_ops = &vpif_ioctl_ops; + vdev->v4l2_dev = &vpif_obj.v4l2_dev; + vdev->vfl_dir = VFL_DIR_TX; + vdev->queue = q; + vdev->lock = &common->lock; + vdev->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; + video_set_drvdata(&ch->video_dev, ch); + err = video_register_device(vdev, VFL_TYPE_VIDEO, + (j ? 3 : 2)); + if (err < 0) + goto probe_out; + } + + return 0; + +probe_out: + for (k = 0; k < j; k++) { + ch = vpif_obj.dev[k]; + video_unregister_device(&ch->video_dev); + } + return err; +} + +/* + * vpif_probe: This function creates device entries by register itself to the + * V4L2 driver and initializes fields of each channel objects + */ +static __init int vpif_probe(struct platform_device *pdev) +{ + struct vpif_subdev_info *subdevdata; + struct i2c_adapter *i2c_adap; + int subdev_count; + int res_idx = 0; + int i, err; + + if (!pdev->dev.platform_data) { + dev_warn(&pdev->dev, "Missing platform data. Giving up.\n"); + return -EINVAL; + } + + vpif_dev = &pdev->dev; + err = initialize_vpif(); + + if (err) { + v4l2_err(vpif_dev->driver, "Error initializing vpif\n"); + return err; + } + + err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev); + if (err) { + v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n"); + goto vpif_free; + } + + do { + int irq; + + err = platform_get_irq_optional(pdev, res_idx); + if (err < 0 && err != -ENXIO) + goto vpif_unregister; + if (err > 0) + irq = err; + else + break; + + err = devm_request_irq(&pdev->dev, irq, vpif_channel_isr, + IRQF_SHARED, VPIF_DRIVER_NAME, + (void *)(&vpif_obj.dev[res_idx]->channel_id)); + if (err) { + vpif_err("VPIF IRQ request failed\n"); + goto vpif_unregister; + } + } while (++res_idx); + + vpif_obj.config = pdev->dev.platform_data; + subdev_count = vpif_obj.config->subdev_count; + subdevdata = vpif_obj.config->subdevinfo; + vpif_obj.sd = kcalloc(subdev_count, sizeof(*vpif_obj.sd), GFP_KERNEL); + if (!vpif_obj.sd) { + err = -ENOMEM; + goto vpif_unregister; + } + + i2c_adap = i2c_get_adapter(vpif_obj.config->i2c_adapter_id); + for (i = 0; i < subdev_count; i++) { + vpif_obj.sd[i] = + v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev, + i2c_adap, + &subdevdata[i].board_info, + NULL); + if (!vpif_obj.sd[i]) { + vpif_err("Error registering v4l2 subdevice\n"); + err = -ENODEV; + goto probe_subdev_out; + } + + vpif_obj.sd[i]->grp_id = 1 << i; + } + err = vpif_probe_complete(); + if (err) + goto probe_subdev_out; + + return 0; + +probe_subdev_out: + kfree(vpif_obj.sd); +vpif_unregister: + v4l2_device_unregister(&vpif_obj.v4l2_dev); +vpif_free: + free_vpif_objs(); + + return err; +} + +/* + * vpif_remove: It un-register channels from V4L2 driver + */ +static void vpif_remove(struct platform_device *device) +{ + struct channel_obj *ch; + int i; + + v4l2_device_unregister(&vpif_obj.v4l2_dev); + + kfree(vpif_obj.sd); + /* un-register device */ + for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) { + /* Get the pointer to the channel object */ + ch = vpif_obj.dev[i]; + /* Unregister video device */ + video_unregister_device(&ch->video_dev); + } + free_vpif_objs(); +} + +#ifdef CONFIG_PM_SLEEP +static int vpif_suspend(struct device *dev) +{ + struct common_obj *common; + struct channel_obj *ch; + int i; + + for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) { + /* Get the pointer to the channel object */ + ch = vpif_obj.dev[i]; + common = &ch->common[VPIF_VIDEO_INDEX]; + + if (!vb2_start_streaming_called(&common->buffer_queue)) + continue; + + mutex_lock(&common->lock); + /* Disable channel */ + if (ch->channel_id == VPIF_CHANNEL2_VIDEO) { + enable_channel2(0); + channel2_intr_enable(0); + } + if (ch->channel_id == VPIF_CHANNEL3_VIDEO || + ycmux_mode == 2) { + enable_channel3(0); + channel3_intr_enable(0); + } + mutex_unlock(&common->lock); + } + + return 0; +} + +static int vpif_resume(struct device *dev) +{ + + struct common_obj *common; + struct channel_obj *ch; + int i; + + for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) { + /* Get the pointer to the channel object */ + ch = vpif_obj.dev[i]; + common = &ch->common[VPIF_VIDEO_INDEX]; + + if (!vb2_start_streaming_called(&common->buffer_queue)) + continue; + + mutex_lock(&common->lock); + /* Enable channel */ + if (ch->channel_id == VPIF_CHANNEL2_VIDEO) { + enable_channel2(1); + channel2_intr_enable(1); + } + if (ch->channel_id == VPIF_CHANNEL3_VIDEO || + ycmux_mode == 2) { + enable_channel3(1); + channel3_intr_enable(1); + } + mutex_unlock(&common->lock); + } + + return 0; +} + +#endif + +static SIMPLE_DEV_PM_OPS(vpif_pm_ops, vpif_suspend, vpif_resume); + +static __refdata struct platform_driver vpif_driver = { + .driver = { + .name = VPIF_DRIVER_NAME, + .pm = &vpif_pm_ops, + }, + .probe = vpif_probe, + .remove_new = vpif_remove, +}; + +module_platform_driver(vpif_driver); diff --git a/drivers/media/platform/ti/davinci/vpif_display.h b/drivers/media/platform/ti/davinci/vpif_display.h new file mode 100644 index 0000000000..dae20053dd --- /dev/null +++ b/drivers/media/platform/ti/davinci/vpif_display.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * VPIF display header file + * + * Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/ + */ + +#ifndef VPIF_DISPLAY_H +#define VPIF_DISPLAY_H + +/* Header files */ +#include +#include + +#include "vpif.h" + +/* Macros */ +#define VPIF_DISPLAY_VERSION "0.0.2" + +#define VPIF_VALID_FIELD(field) \ + (((V4L2_FIELD_ANY == field) || (V4L2_FIELD_NONE == field)) || \ + (((V4L2_FIELD_INTERLACED == field) || (V4L2_FIELD_SEQ_TB == field)) || \ + (V4L2_FIELD_SEQ_BT == field))) + +#define VPIF_DISPLAY_MAX_DEVICES (2) +#define VPIF_SLICED_BUF_SIZE (256) +#define VPIF_SLICED_MAX_SERVICES (3) +#define VPIF_VIDEO_INDEX (0) +#define VPIF_VBI_INDEX (1) +#define VPIF_HBI_INDEX (2) + +/* Setting it to 1 as HBI/VBI support yet to be added , else 3*/ +#define VPIF_NUMOBJECTS (1) + +/* Macros */ +#define ISALIGNED(a) (0 == ((a) & 7)) + +/* enumerated data types */ +/* Enumerated data type to give id to each device per channel */ +enum vpif_channel_id { + VPIF_CHANNEL2_VIDEO = 0, /* Channel2 Video */ + VPIF_CHANNEL3_VIDEO, /* Channel3 Video */ +}; + +/* structures */ + +struct video_obj { + enum v4l2_field buf_field; + u32 latest_only; /* indicate whether to return + * most recent displayed frame only */ + v4l2_std_id stdid; /* Currently selected or default + * standard */ + struct v4l2_dv_timings dv_timings; +}; + +struct vpif_disp_buffer { + struct vb2_v4l2_buffer vb; + struct list_head list; +}; + +struct common_obj { + struct vpif_disp_buffer *cur_frm; /* Pointer pointing to current + * vb2_buffer */ + struct vpif_disp_buffer *next_frm; /* Pointer pointing to next + * vb2_buffer */ + struct v4l2_format fmt; /* Used to store the format */ + struct vb2_queue buffer_queue; /* Buffer queue used in vb2 */ + + struct list_head dma_queue; /* Queue of filled frames */ + spinlock_t irqlock; /* Used for video buffer + * handling */ + + /* channel specific parameters */ + struct mutex lock; /* lock used to access this + * structure */ + u32 ytop_off; /* offset of Y top from the + * starting of the buffer */ + u32 ybtm_off; /* offset of Y bottom from the + * starting of the buffer */ + u32 ctop_off; /* offset of C top from the + * starting of the buffer */ + u32 cbtm_off; /* offset of C bottom from the + * starting of the buffer */ + /* Function pointer to set the addresses */ + void (*set_addr)(unsigned long, unsigned long, + unsigned long, unsigned long); + u32 height; + u32 width; +}; + +struct channel_obj { + /* V4l2 specific parameters */ + struct video_device video_dev; /* Identifies video device for + * this channel */ + u32 field_id; /* Indicates id of the field + * which is being displayed */ + u8 initialized; /* flag to indicate whether + * encoder is initialized */ + u32 output_idx; /* Current output index */ + struct v4l2_subdev *sd; /* Current output subdev(may be NULL) */ + + enum vpif_channel_id channel_id;/* Identifies channel */ + struct vpif_params vpifparams; + struct common_obj common[VPIF_NUMOBJECTS]; + struct video_obj video; +}; + +/* vpif device structure */ +struct vpif_device { + struct v4l2_device v4l2_dev; + struct channel_obj *dev[VPIF_DISPLAY_NUM_CHANNELS]; + struct v4l2_subdev **sd; + struct vpif_display_config *config; +}; + +#endif /* VPIF_DISPLAY_H */ diff --git a/drivers/media/platform/ti/omap/Kconfig b/drivers/media/platform/ti/omap/Kconfig new file mode 100644 index 0000000000..a9dbe10977 --- /dev/null +++ b/drivers/media/platform/ti/omap/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only +config VIDEO_OMAP2_VOUT_VRFB + bool + default y + depends on VIDEO_OMAP2_VOUT && (OMAP2_VRFB || COMPILE_TEST) + +config VIDEO_OMAP2_VOUT + tristate "OMAP2/OMAP3 V4L2-Display driver" + depends on V4L_PLATFORM_DRIVERS + depends on MMU + depends on FB_OMAP2 || (COMPILE_TEST && FB_OMAP2=n) + depends on ARCH_OMAP2 || ARCH_OMAP3 || COMPILE_TEST + depends on VIDEO_DEV + select VIDEOBUF2_DMA_CONTIG + select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3 + help + V4L2 Display driver support for OMAP2/3 based boards. diff --git a/drivers/media/platform/ti/omap/Makefile b/drivers/media/platform/ti/omap/Makefile new file mode 100644 index 0000000000..b17a0ac10c --- /dev/null +++ b/drivers/media/platform/ti/omap/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the omap video device drivers. +# + +# OMAP2/3 Display driver +omap-vout-y += omap_vout.o omap_voutlib.o +omap-vout-$(CONFIG_VIDEO_OMAP2_VOUT_VRFB) += omap_vout_vrfb.o +obj-$(CONFIG_VIDEO_OMAP2_VOUT) += omap-vout.o diff --git a/drivers/media/platform/ti/omap/omap_vout.c b/drivers/media/platform/ti/omap/omap_vout.c new file mode 100644 index 0000000000..4143274089 --- /dev/null +++ b/drivers/media/platform/ti/omap/omap_vout.c @@ -0,0 +1,1741 @@ +/* + * omap_vout.c + * + * Copyright (C) 2005-2010 Texas Instruments. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + * + * Leveraged code from the OMAP2 camera driver + * Video-for-Linux (Version 2) camera capture driver for + * the OMAP24xx camera controller. + * + * Author: Andy Lowe (source@mvista.com) + * + * Copyright (C) 2004 MontaVista Software, Inc. + * Copyright (C) 2010 Texas Instruments. + * + * History: + * 20-APR-2006 Khasim Modified VRFB based Rotation, + * The image data is always read from 0 degree + * view and written + * to the virtual space of desired rotation angle + * 4-DEC-2006 Jian Changed to support better memory management + * + * 17-Nov-2008 Hardik Changed driver to use video_ioctl2 + * + * 23-Feb-2010 Vaibhav H Modified to use new DSS2 interface + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include