From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- drivers/media/platform/ti/cal/Makefile | 3 + drivers/media/platform/ti/cal/cal-camerarx.c | 895 +++++++++++++++++ drivers/media/platform/ti/cal/cal-video.c | 1062 ++++++++++++++++++++ drivers/media/platform/ti/cal/cal.c | 1343 ++++++++++++++++++++++++++ drivers/media/platform/ti/cal/cal.h | 339 +++++++ drivers/media/platform/ti/cal/cal_regs.h | 463 +++++++++ 6 files changed, 4105 insertions(+) create mode 100644 drivers/media/platform/ti/cal/Makefile create mode 100644 drivers/media/platform/ti/cal/cal-camerarx.c create mode 100644 drivers/media/platform/ti/cal/cal-video.c create mode 100644 drivers/media/platform/ti/cal/cal.c create mode 100644 drivers/media/platform/ti/cal/cal.h create mode 100644 drivers/media/platform/ti/cal/cal_regs.h (limited to 'drivers/media/platform/ti/cal') diff --git a/drivers/media/platform/ti/cal/Makefile b/drivers/media/platform/ti/cal/Makefile new file mode 100644 index 0000000000..45ac35585f --- /dev/null +++ b/drivers/media/platform/ti/cal/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_VIDEO_TI_CAL) += ti-cal.o +ti-cal-y := cal.o cal-camerarx.o cal-video.o diff --git a/drivers/media/platform/ti/cal/cal-camerarx.c b/drivers/media/platform/ti/cal/cal-camerarx.c new file mode 100644 index 0000000000..1a4273bbe7 --- /dev/null +++ b/drivers/media/platform/ti/cal/cal-camerarx.c @@ -0,0 +1,895 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * TI Camera Access Layer (CAL) - CAMERARX + * + * Copyright (c) 2015-2020 Texas Instruments Inc. + * + * Authors: + * Benoit Parrot + * Laurent Pinchart + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "cal.h" +#include "cal_regs.h" + +/* ------------------------------------------------------------------ + * I/O Register Accessors + * ------------------------------------------------------------------ + */ + +static inline u32 camerarx_read(struct cal_camerarx *phy, u32 offset) +{ + return ioread32(phy->base + offset); +} + +static inline void camerarx_write(struct cal_camerarx *phy, u32 offset, u32 val) +{ + iowrite32(val, phy->base + offset); +} + +/* ------------------------------------------------------------------ + * CAMERARX Management + * ------------------------------------------------------------------ + */ + +static s64 cal_camerarx_get_ext_link_freq(struct cal_camerarx *phy) +{ + struct v4l2_mbus_config_mipi_csi2 *mipi_csi2 = &phy->endpoint.bus.mipi_csi2; + u32 num_lanes = mipi_csi2->num_data_lanes; + const struct cal_format_info *fmtinfo; + struct v4l2_subdev_state *state; + struct v4l2_mbus_framefmt *fmt; + u32 bpp; + s64 freq; + + state = v4l2_subdev_get_locked_active_state(&phy->subdev); + + fmt = v4l2_subdev_get_pad_format(&phy->subdev, state, CAL_CAMERARX_PAD_SINK); + + fmtinfo = cal_format_by_code(fmt->code); + if (!fmtinfo) + return -EINVAL; + + bpp = fmtinfo->bpp; + + freq = v4l2_get_link_freq(phy->source->ctrl_handler, bpp, 2 * num_lanes); + if (freq < 0) { + phy_err(phy, "failed to get link freq for subdev '%s'\n", + phy->source->name); + return freq; + } + + phy_dbg(3, phy, "Source Link Freq: %llu\n", freq); + + return freq; +} + +static void cal_camerarx_lane_config(struct cal_camerarx *phy) +{ + u32 val = cal_read(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance)); + u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK; + u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK; + struct v4l2_mbus_config_mipi_csi2 *mipi_csi2 = + &phy->endpoint.bus.mipi_csi2; + int lane; + + cal_set_field(&val, mipi_csi2->clock_lane + 1, lane_mask); + cal_set_field(&val, mipi_csi2->lane_polarities[0], polarity_mask); + for (lane = 0; lane < mipi_csi2->num_data_lanes; lane++) { + /* + * Every lane are one nibble apart starting with the + * clock followed by the data lanes so shift masks by 4. + */ + lane_mask <<= 4; + polarity_mask <<= 4; + cal_set_field(&val, mipi_csi2->data_lanes[lane] + 1, lane_mask); + cal_set_field(&val, mipi_csi2->lane_polarities[lane + 1], + polarity_mask); + } + + cal_write(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), val); + phy_dbg(3, phy, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", + phy->instance, val); +} + +static void cal_camerarx_enable(struct cal_camerarx *phy) +{ + u32 num_lanes = phy->cal->data->camerarx[phy->instance].num_lanes; + + regmap_field_write(phy->fields[F_CAMMODE], 0); + /* Always enable all lanes at the phy control level */ + regmap_field_write(phy->fields[F_LANEENABLE], (1 << num_lanes) - 1); + /* F_CSI_MODE is not present on every architecture */ + if (phy->fields[F_CSI_MODE]) + regmap_field_write(phy->fields[F_CSI_MODE], 1); + regmap_field_write(phy->fields[F_CTRLCLKEN], 1); +} + +void cal_camerarx_disable(struct cal_camerarx *phy) +{ + regmap_field_write(phy->fields[F_CTRLCLKEN], 0); +} + +/* + * TCLK values are OK at their reset values + */ +#define TCLK_TERM 0 +#define TCLK_MISS 1 +#define TCLK_SETTLE 14 + +static void cal_camerarx_config(struct cal_camerarx *phy, s64 link_freq) +{ + unsigned int reg0, reg1; + unsigned int ths_term, ths_settle; + + /* DPHY timing configuration */ + + /* THS_TERM: Programmed value = floor(20 ns/DDRClk period) */ + ths_term = div_s64(20 * link_freq, 1000 * 1000 * 1000); + phy_dbg(1, phy, "ths_term: %d (0x%02x)\n", ths_term, ths_term); + + /* THS_SETTLE: Programmed value = floor(105 ns/DDRClk period) + 4 */ + ths_settle = div_s64(105 * link_freq, 1000 * 1000 * 1000) + 4; + phy_dbg(1, phy, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle); + + reg0 = camerarx_read(phy, CAL_CSI2_PHY_REG0); + cal_set_field(®0, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE, + CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK); + cal_set_field(®0, ths_term, CAL_CSI2_PHY_REG0_THS_TERM_MASK); + cal_set_field(®0, ths_settle, CAL_CSI2_PHY_REG0_THS_SETTLE_MASK); + + phy_dbg(1, phy, "CSI2_%d_REG0 = 0x%08x\n", phy->instance, reg0); + camerarx_write(phy, CAL_CSI2_PHY_REG0, reg0); + + reg1 = camerarx_read(phy, CAL_CSI2_PHY_REG1); + cal_set_field(®1, TCLK_TERM, CAL_CSI2_PHY_REG1_TCLK_TERM_MASK); + cal_set_field(®1, 0xb8, CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK); + cal_set_field(®1, TCLK_MISS, + CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK); + cal_set_field(®1, TCLK_SETTLE, CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK); + + phy_dbg(1, phy, "CSI2_%d_REG1 = 0x%08x\n", phy->instance, reg1); + camerarx_write(phy, CAL_CSI2_PHY_REG1, reg1); +} + +static void cal_camerarx_power(struct cal_camerarx *phy, bool enable) +{ + u32 target_state; + unsigned int i; + + target_state = enable ? CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON : + CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF; + + cal_write_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), + target_state, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK); + + for (i = 0; i < 10; i++) { + u32 current_state; + + current_state = cal_read_field(phy->cal, + CAL_CSI2_COMPLEXIO_CFG(phy->instance), + CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK); + + if (current_state == target_state) + break; + + usleep_range(1000, 1100); + } + + if (i == 10) + phy_err(phy, "Failed to power %s complexio\n", + enable ? "up" : "down"); +} + +static void cal_camerarx_wait_reset(struct cal_camerarx *phy) +{ + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(750); + while (time_before(jiffies, timeout)) { + if (cal_read_field(phy->cal, + CAL_CSI2_COMPLEXIO_CFG(phy->instance), + CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) == + CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED) + break; + usleep_range(500, 5000); + } + + if (cal_read_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), + CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) != + CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED) + phy_err(phy, "Timeout waiting for Complex IO reset done\n"); +} + +static void cal_camerarx_wait_stop_state(struct cal_camerarx *phy) +{ + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(750); + while (time_before(jiffies, timeout)) { + if (cal_read_field(phy->cal, + CAL_CSI2_TIMING(phy->instance), + CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK) == 0) + break; + usleep_range(500, 5000); + } + + if (cal_read_field(phy->cal, CAL_CSI2_TIMING(phy->instance), + CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK) != 0) + phy_err(phy, "Timeout waiting for stop state\n"); +} + +static void cal_camerarx_enable_irqs(struct cal_camerarx *phy) +{ + const u32 cio_err_mask = + CAL_CSI2_COMPLEXIO_IRQ_LANE_ERRORS_MASK | + CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK | + CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK | + CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK; + const u32 vc_err_mask = + CAL_CSI2_VC_IRQ_CS_IRQ_MASK(0) | + CAL_CSI2_VC_IRQ_CS_IRQ_MASK(1) | + CAL_CSI2_VC_IRQ_CS_IRQ_MASK(2) | + CAL_CSI2_VC_IRQ_CS_IRQ_MASK(3) | + CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(0) | + CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(1) | + CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(2) | + CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(3); + + /* Enable CIO & VC error IRQs. */ + cal_write(phy->cal, CAL_HL_IRQENABLE_SET(0), + CAL_HL_IRQ_CIO_MASK(phy->instance) | + CAL_HL_IRQ_VC_MASK(phy->instance)); + cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance), + cio_err_mask); + cal_write(phy->cal, CAL_CSI2_VC_IRQENABLE(phy->instance), + vc_err_mask); +} + +static void cal_camerarx_disable_irqs(struct cal_camerarx *phy) +{ + /* Disable CIO error irqs */ + cal_write(phy->cal, CAL_HL_IRQENABLE_CLR(0), + CAL_HL_IRQ_CIO_MASK(phy->instance) | + CAL_HL_IRQ_VC_MASK(phy->instance)); + cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance), 0); + cal_write(phy->cal, CAL_CSI2_VC_IRQENABLE(phy->instance), 0); +} + +static void cal_camerarx_ppi_enable(struct cal_camerarx *phy) +{ + cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance), + 1, CAL_CSI2_PPI_CTRL_ECC_EN_MASK); + + cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance), + 1, CAL_CSI2_PPI_CTRL_IF_EN_MASK); +} + +static void cal_camerarx_ppi_disable(struct cal_camerarx *phy) +{ + cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance), + 0, CAL_CSI2_PPI_CTRL_IF_EN_MASK); +} + +static int cal_camerarx_start(struct cal_camerarx *phy) +{ + s64 link_freq; + u32 sscounter; + u32 val; + int ret; + + if (phy->enable_count > 0) { + phy->enable_count++; + return 0; + } + + link_freq = cal_camerarx_get_ext_link_freq(phy); + if (link_freq < 0) + return link_freq; + + ret = v4l2_subdev_call(phy->source, core, s_power, 1); + if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) { + phy_err(phy, "power on failed in subdev\n"); + return ret; + } + + cal_camerarx_enable_irqs(phy); + + /* + * CSI-2 PHY Link Initialization Sequence, according to the DRA74xP / + * DRA75xP / DRA76xP / DRA77xP TRM. The DRA71x / DRA72x and the AM65x / + * DRA80xM TRMs have a slightly simplified sequence. + */ + + /* + * 1. Configure all CSI-2 low level protocol registers to be ready to + * receive signals/data from the CSI-2 PHY. + * + * i.-v. Configure the lanes position and polarity. + */ + cal_camerarx_lane_config(phy); + + /* + * vi.-vii. Configure D-PHY mode, enable the required lanes and + * enable the CAMERARX clock. + */ + cal_camerarx_enable(phy); + + /* + * 2. CSI PHY and link initialization sequence. + * + * a. Deassert the CSI-2 PHY reset. Do not wait for reset completion + * at this point, as it requires the external source to send the + * CSI-2 HS clock. + */ + cal_write_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), + CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL, + CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK); + phy_dbg(3, phy, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x De-assert Complex IO Reset\n", + phy->instance, + cal_read(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance))); + + /* Dummy read to allow SCP reset to complete. */ + camerarx_read(phy, CAL_CSI2_PHY_REG0); + + /* Program the PHY timing parameters. */ + cal_camerarx_config(phy, link_freq); + + /* + * b. Assert the FORCERXMODE signal. + * + * The stop-state-counter is based on fclk cycles, and we always use + * the x16 and x4 settings, so stop-state-timeout = + * fclk-cycle * 16 * 4 * counter. + * + * Stop-state-timeout must be more than 100us as per CSI-2 spec, so we + * calculate a timeout that's 100us (rounding up). + */ + sscounter = DIV_ROUND_UP(clk_get_rate(phy->cal->fclk), 10000 * 16 * 4); + + val = cal_read(phy->cal, CAL_CSI2_TIMING(phy->instance)); + cal_set_field(&val, 1, CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK); + cal_set_field(&val, 1, CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK); + cal_set_field(&val, sscounter, + CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK); + cal_write(phy->cal, CAL_CSI2_TIMING(phy->instance), val); + phy_dbg(3, phy, "CAL_CSI2_TIMING(%d) = 0x%08x Stop States\n", + phy->instance, + cal_read(phy->cal, CAL_CSI2_TIMING(phy->instance))); + + /* Assert the FORCERXMODE signal. */ + cal_write_field(phy->cal, CAL_CSI2_TIMING(phy->instance), + 1, CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK); + phy_dbg(3, phy, "CAL_CSI2_TIMING(%d) = 0x%08x Force RXMODE\n", + phy->instance, + cal_read(phy->cal, CAL_CSI2_TIMING(phy->instance))); + + /* + * c. Connect pull-down on CSI-2 PHY link (using pad control). + * + * This is not required on DRA71x, DRA72x, AM65x and DRA80xM. Not + * implemented. + */ + + /* + * d. Power up the CSI-2 PHY. + * e. Check whether the state status reaches the ON state. + */ + cal_camerarx_power(phy, true); + + /* + * Start the source to enable the CSI-2 HS clock. We can now wait for + * CSI-2 PHY reset to complete. + */ + ret = v4l2_subdev_call(phy->source, video, s_stream, 1); + if (ret) { + v4l2_subdev_call(phy->source, core, s_power, 0); + cal_camerarx_disable_irqs(phy); + phy_err(phy, "stream on failed in subdev\n"); + return ret; + } + + cal_camerarx_wait_reset(phy); + + /* f. Wait for STOPSTATE=1 for all enabled lane modules. */ + cal_camerarx_wait_stop_state(phy); + + phy_dbg(1, phy, "CSI2_%u_REG1 = 0x%08x (bits 31-28 should be set)\n", + phy->instance, camerarx_read(phy, CAL_CSI2_PHY_REG1)); + + /* + * g. Disable pull-down on CSI-2 PHY link (using pad control). + * + * This is not required on DRA71x, DRA72x, AM65x and DRA80xM. Not + * implemented. + */ + + /* Finally, enable the PHY Protocol Interface (PPI). */ + cal_camerarx_ppi_enable(phy); + + phy->enable_count++; + + return 0; +} + +static void cal_camerarx_stop(struct cal_camerarx *phy) +{ + int ret; + + if (--phy->enable_count > 0) + return; + + cal_camerarx_ppi_disable(phy); + + cal_camerarx_disable_irqs(phy); + + cal_camerarx_power(phy, false); + + /* Assert Complex IO Reset */ + cal_write_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), + CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL, + CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK); + + phy_dbg(3, phy, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Complex IO in Reset\n", + phy->instance, + cal_read(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance))); + + /* Disable the phy */ + cal_camerarx_disable(phy); + + if (v4l2_subdev_call(phy->source, video, s_stream, 0)) + phy_err(phy, "stream off failed in subdev\n"); + + ret = v4l2_subdev_call(phy->source, core, s_power, 0); + if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) + phy_err(phy, "power off failed in subdev\n"); +} + +/* + * Errata i913: CSI2 LDO Needs to be disabled when module is powered on + * + * Enabling CSI2 LDO shorts it to core supply. It is crucial the 2 CSI2 + * LDOs on the device are disabled if CSI-2 module is powered on + * (0x4845 B304 | 0x4845 B384 [28:27] = 0x1) or in ULPS (0x4845 B304 + * | 0x4845 B384 [28:27] = 0x2) mode. Common concerns include: high + * current draw on the module supply in active mode. + * + * Errata does not apply when CSI-2 module is powered off + * (0x4845 B304 | 0x4845 B384 [28:27] = 0x0). + * + * SW Workaround: + * Set the following register bits to disable the LDO, + * which is essentially CSI2 REG10 bit 6: + * + * Core 0: 0x4845 B828 = 0x0000 0040 + * Core 1: 0x4845 B928 = 0x0000 0040 + */ +void cal_camerarx_i913_errata(struct cal_camerarx *phy) +{ + u32 reg10 = camerarx_read(phy, CAL_CSI2_PHY_REG10); + + cal_set_field(®10, 1, CAL_CSI2_PHY_REG10_I933_LDO_DISABLE_MASK); + + phy_dbg(1, phy, "CSI2_%d_REG10 = 0x%08x\n", phy->instance, reg10); + camerarx_write(phy, CAL_CSI2_PHY_REG10, reg10); +} + +static int cal_camerarx_regmap_init(struct cal_dev *cal, + struct cal_camerarx *phy) +{ + const struct cal_camerarx_data *phy_data; + unsigned int i; + + if (!cal->data) + return -EINVAL; + + phy_data = &cal->data->camerarx[phy->instance]; + + for (i = 0; i < F_MAX_FIELDS; i++) { + struct reg_field field = { + .reg = cal->syscon_camerrx_offset, + .lsb = phy_data->fields[i].lsb, + .msb = phy_data->fields[i].msb, + }; + + /* + * Here we update the reg offset with the + * value found in DT + */ + phy->fields[i] = devm_regmap_field_alloc(cal->dev, + cal->syscon_camerrx, + field); + if (IS_ERR(phy->fields[i])) { + cal_err(cal, "Unable to allocate regmap fields\n"); + return PTR_ERR(phy->fields[i]); + } + } + + return 0; +} + +static int cal_camerarx_parse_dt(struct cal_camerarx *phy) +{ + struct v4l2_fwnode_endpoint *endpoint = &phy->endpoint; + char data_lanes[V4L2_MBUS_CSI2_MAX_DATA_LANES * 2]; + struct device_node *ep_node; + unsigned int i; + int ret; + + /* + * Find the endpoint node for the port corresponding to the PHY + * instance, and parse its CSI-2-related properties. + */ + ep_node = of_graph_get_endpoint_by_regs(phy->cal->dev->of_node, + phy->instance, 0); + if (!ep_node) { + /* + * The endpoint is not mandatory, not all PHY instances need to + * be connected in DT. + */ + phy_dbg(3, phy, "Port has no endpoint\n"); + return 0; + } + + endpoint->bus_type = V4L2_MBUS_CSI2_DPHY; + ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_node), endpoint); + if (ret < 0) { + phy_err(phy, "Failed to parse endpoint\n"); + goto done; + } + + for (i = 0; i < endpoint->bus.mipi_csi2.num_data_lanes; i++) { + unsigned int lane = endpoint->bus.mipi_csi2.data_lanes[i]; + + if (lane > 4) { + phy_err(phy, "Invalid position %u for data lane %u\n", + lane, i); + ret = -EINVAL; + goto done; + } + + data_lanes[i*2] = '0' + lane; + data_lanes[i*2+1] = ' '; + } + + data_lanes[i*2-1] = '\0'; + + phy_dbg(3, phy, + "CSI-2 bus: clock lane <%u>, data lanes <%s>, flags 0x%08x\n", + endpoint->bus.mipi_csi2.clock_lane, data_lanes, + endpoint->bus.mipi_csi2.flags); + + /* Retrieve the connected device and store it for later use. */ + phy->source_ep_node = of_graph_get_remote_endpoint(ep_node); + phy->source_node = of_graph_get_port_parent(phy->source_ep_node); + if (!phy->source_node) { + phy_dbg(3, phy, "Can't get remote parent\n"); + of_node_put(phy->source_ep_node); + ret = -EINVAL; + goto done; + } + + phy_dbg(1, phy, "Found connected device %pOFn\n", phy->source_node); + +done: + of_node_put(ep_node); + return ret; +} + +/* ------------------------------------------------------------------ + * V4L2 Subdev Operations + * ------------------------------------------------------------------ + */ + +static inline struct cal_camerarx *to_cal_camerarx(struct v4l2_subdev *sd) +{ + return container_of(sd, struct cal_camerarx, subdev); +} + +static int cal_camerarx_sd_s_stream(struct v4l2_subdev *sd, int enable) +{ + struct cal_camerarx *phy = to_cal_camerarx(sd); + struct v4l2_subdev_state *state; + int ret = 0; + + state = v4l2_subdev_lock_and_get_active_state(sd); + + if (enable) + ret = cal_camerarx_start(phy); + else + cal_camerarx_stop(phy); + + v4l2_subdev_unlock_state(state); + + return ret; +} + +static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct cal_camerarx *phy = to_cal_camerarx(sd); + + /* No transcoding, source and sink codes must match. */ + if (cal_rx_pad_is_source(code->pad)) { + struct v4l2_mbus_framefmt *fmt; + + if (code->index > 0) + return -EINVAL; + + fmt = v4l2_subdev_get_pad_format(&phy->subdev, state, + CAL_CAMERARX_PAD_SINK); + code->code = fmt->code; + } else { + if (code->index >= cal_num_formats) + return -EINVAL; + + code->code = cal_formats[code->index].code; + } + + return 0; +} + +static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_frame_size_enum *fse) +{ + const struct cal_format_info *fmtinfo; + + if (fse->index > 0) + return -EINVAL; + + /* No transcoding, source and sink formats must match. */ + if (cal_rx_pad_is_source(fse->pad)) { + struct v4l2_mbus_framefmt *fmt; + + fmt = v4l2_subdev_get_pad_format(sd, state, + CAL_CAMERARX_PAD_SINK); + if (fse->code != fmt->code) + return -EINVAL; + + fse->min_width = fmt->width; + fse->max_width = fmt->width; + fse->min_height = fmt->height; + fse->max_height = fmt->height; + } else { + fmtinfo = cal_format_by_code(fse->code); + if (!fmtinfo) + return -EINVAL; + + fse->min_width = CAL_MIN_WIDTH_BYTES * 8 / ALIGN(fmtinfo->bpp, 8); + fse->max_width = CAL_MAX_WIDTH_BYTES * 8 / ALIGN(fmtinfo->bpp, 8); + fse->min_height = CAL_MIN_HEIGHT_LINES; + fse->max_height = CAL_MAX_HEIGHT_LINES; + } + + return 0; +} + +static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + struct v4l2_subdev_format *format) +{ + const struct cal_format_info *fmtinfo; + struct v4l2_mbus_framefmt *fmt; + unsigned int bpp; + + /* No transcoding, source and sink formats must match. */ + if (cal_rx_pad_is_source(format->pad)) + return v4l2_subdev_get_fmt(sd, state, format); + + /* + * Default to the first format if the requested media bus code isn't + * supported. + */ + fmtinfo = cal_format_by_code(format->format.code); + if (!fmtinfo) + fmtinfo = &cal_formats[0]; + + /* Clamp the size, update the code. The colorspace is accepted as-is. */ + bpp = ALIGN(fmtinfo->bpp, 8); + + format->format.width = clamp_t(unsigned int, format->format.width, + CAL_MIN_WIDTH_BYTES * 8 / bpp, + CAL_MAX_WIDTH_BYTES * 8 / bpp); + format->format.height = clamp_t(unsigned int, format->format.height, + CAL_MIN_HEIGHT_LINES, + CAL_MAX_HEIGHT_LINES); + format->format.code = fmtinfo->code; + format->format.field = V4L2_FIELD_NONE; + + /* Store the format and propagate it to the source pad. */ + + fmt = v4l2_subdev_get_pad_format(sd, state, CAL_CAMERARX_PAD_SINK); + *fmt = format->format; + + fmt = v4l2_subdev_get_pad_format(sd, state, + CAL_CAMERARX_PAD_FIRST_SOURCE); + *fmt = format->format; + + return 0; +} + +static int cal_camerarx_sd_init_cfg(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state) +{ + struct v4l2_subdev_format format = { + .which = state ? V4L2_SUBDEV_FORMAT_TRY + : V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = CAL_CAMERARX_PAD_SINK, + .format = { + .width = 640, + .height = 480, + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .field = V4L2_FIELD_NONE, + .colorspace = V4L2_COLORSPACE_SRGB, + .ycbcr_enc = V4L2_YCBCR_ENC_601, + .quantization = V4L2_QUANTIZATION_LIM_RANGE, + .xfer_func = V4L2_XFER_FUNC_SRGB, + }, + }; + + return cal_camerarx_sd_set_fmt(sd, state, &format); +} + +static int cal_camerarx_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad, + struct v4l2_mbus_frame_desc *fd) +{ + struct cal_camerarx *phy = to_cal_camerarx(sd); + struct v4l2_mbus_frame_desc remote_desc; + const struct media_pad *remote_pad; + int ret; + + remote_pad = media_pad_remote_pad_first(&phy->pads[CAL_CAMERARX_PAD_SINK]); + if (!remote_pad) + return -EPIPE; + + ret = v4l2_subdev_call(phy->source, pad, get_frame_desc, + remote_pad->index, &remote_desc); + if (ret) + return ret; + + if (remote_desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) { + cal_err(phy->cal, + "Frame descriptor does not describe CSI-2 link"); + return -EINVAL; + } + + if (remote_desc.num_entries > 1) + cal_err(phy->cal, + "Multiple streams not supported in remote frame descriptor, using the first one\n"); + + fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2; + fd->num_entries = 1; + fd->entry[0] = remote_desc.entry[0]; + + return 0; +} + +static const struct v4l2_subdev_video_ops cal_camerarx_video_ops = { + .s_stream = cal_camerarx_sd_s_stream, +}; + +static const struct v4l2_subdev_pad_ops cal_camerarx_pad_ops = { + .init_cfg = cal_camerarx_sd_init_cfg, + .enum_mbus_code = cal_camerarx_sd_enum_mbus_code, + .enum_frame_size = cal_camerarx_sd_enum_frame_size, + .get_fmt = v4l2_subdev_get_fmt, + .set_fmt = cal_camerarx_sd_set_fmt, + .get_frame_desc = cal_camerarx_get_frame_desc, +}; + +static const struct v4l2_subdev_ops cal_camerarx_subdev_ops = { + .video = &cal_camerarx_video_ops, + .pad = &cal_camerarx_pad_ops, +}; + +static struct media_entity_operations cal_camerarx_media_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +/* ------------------------------------------------------------------ + * Create and Destroy + * ------------------------------------------------------------------ + */ + +struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal, + unsigned int instance) +{ + struct platform_device *pdev = to_platform_device(cal->dev); + struct cal_camerarx *phy; + struct v4l2_subdev *sd; + unsigned int i; + int ret; + + phy = devm_kzalloc(cal->dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return ERR_PTR(-ENOMEM); + + phy->cal = cal; + phy->instance = instance; + + spin_lock_init(&phy->vc_lock); + + phy->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + (instance == 0) ? + "cal_rx_core0" : + "cal_rx_core1"); + phy->base = devm_ioremap_resource(cal->dev, phy->res); + if (IS_ERR(phy->base)) { + cal_err(cal, "failed to ioremap\n"); + return ERR_CAST(phy->base); + } + + cal_dbg(1, cal, "ioresource %s at %pa - %pa\n", + phy->res->name, &phy->res->start, &phy->res->end); + + ret = cal_camerarx_regmap_init(cal, phy); + if (ret) + return ERR_PTR(ret); + + ret = cal_camerarx_parse_dt(phy); + if (ret) + return ERR_PTR(ret); + + /* Initialize the V4L2 subdev and media entity. */ + sd = &phy->subdev; + v4l2_subdev_init(sd, &cal_camerarx_subdev_ops); + sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; + sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE; + snprintf(sd->name, sizeof(sd->name), "CAMERARX%u", instance); + sd->dev = cal->dev; + + phy->pads[CAL_CAMERARX_PAD_SINK].flags = MEDIA_PAD_FL_SINK; + for (i = CAL_CAMERARX_PAD_FIRST_SOURCE; i < CAL_CAMERARX_NUM_PADS; ++i) + phy->pads[i].flags = MEDIA_PAD_FL_SOURCE; + sd->entity.ops = &cal_camerarx_media_ops; + ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(phy->pads), + phy->pads); + if (ret) + goto err_node_put; + + ret = v4l2_subdev_init_finalize(sd); + if (ret) + goto err_entity_cleanup; + + ret = v4l2_device_register_subdev(&cal->v4l2_dev, sd); + if (ret) + goto err_free_state; + + return phy; + +err_free_state: + v4l2_subdev_cleanup(sd); +err_entity_cleanup: + media_entity_cleanup(&phy->subdev.entity); +err_node_put: + of_node_put(phy->source_ep_node); + of_node_put(phy->source_node); + return ERR_PTR(ret); +} + +void cal_camerarx_destroy(struct cal_camerarx *phy) +{ + if (!phy) + return; + + v4l2_device_unregister_subdev(&phy->subdev); + v4l2_subdev_cleanup(&phy->subdev); + media_entity_cleanup(&phy->subdev.entity); + of_node_put(phy->source_ep_node); + of_node_put(phy->source_node); +} diff --git a/drivers/media/platform/ti/cal/cal-video.c b/drivers/media/platform/ti/cal/cal-video.c new file mode 100644 index 0000000000..a8abcd0fee --- /dev/null +++ b/drivers/media/platform/ti/cal/cal-video.c @@ -0,0 +1,1062 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * TI Camera Access Layer (CAL) - Video Device + * + * Copyright (c) 2015-2020 Texas Instruments Inc. + * + * Authors: + * Benoit Parrot + * Laurent Pinchart + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cal.h" + +/* Print Four-character-code (FOURCC) */ +static char *fourcc_to_str(u32 fmt) +{ + static char code[5]; + + code[0] = (unsigned char)(fmt & 0xff); + code[1] = (unsigned char)((fmt >> 8) & 0xff); + code[2] = (unsigned char)((fmt >> 16) & 0xff); + code[3] = (unsigned char)((fmt >> 24) & 0xff); + code[4] = '\0'; + + return code; +} + +/* ------------------------------------------------------------------ + * V4L2 Common IOCTLs + * ------------------------------------------------------------------ + */ + +static int cal_querycap(struct file *file, void *priv, + struct v4l2_capability *cap) +{ + strscpy(cap->driver, CAL_MODULE_NAME, sizeof(cap->driver)); + strscpy(cap->card, CAL_MODULE_NAME, sizeof(cap->card)); + + return 0; +} + +static int cal_g_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + + *f = ctx->v_fmt; + + return 0; +} + +/* ------------------------------------------------------------------ + * V4L2 Video Node Centric IOCTLs + * ------------------------------------------------------------------ + */ + +static const struct cal_format_info *find_format_by_pix(struct cal_ctx *ctx, + u32 pixelformat) +{ + const struct cal_format_info *fmtinfo; + unsigned int k; + + for (k = 0; k < ctx->num_active_fmt; k++) { + fmtinfo = ctx->active_fmt[k]; + if (fmtinfo->fourcc == pixelformat) + return fmtinfo; + } + + return NULL; +} + +static const struct cal_format_info *find_format_by_code(struct cal_ctx *ctx, + u32 code) +{ + const struct cal_format_info *fmtinfo; + unsigned int k; + + for (k = 0; k < ctx->num_active_fmt; k++) { + fmtinfo = ctx->active_fmt[k]; + if (fmtinfo->code == code) + return fmtinfo; + } + + return NULL; +} + +static int cal_legacy_enum_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_fmtdesc *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_format_info *fmtinfo; + + if (f->index >= ctx->num_active_fmt) + return -EINVAL; + + fmtinfo = ctx->active_fmt[f->index]; + + f->pixelformat = fmtinfo->fourcc; + f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + return 0; +} + +static int __subdev_get_format(struct cal_ctx *ctx, + struct v4l2_mbus_framefmt *fmt) +{ + struct v4l2_subdev_format sd_fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = 0, + }; + struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format; + int ret; + + ret = v4l2_subdev_call(ctx->phy->source, pad, get_fmt, NULL, &sd_fmt); + if (ret) + return ret; + + *fmt = *mbus_fmt; + + ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__, + fmt->width, fmt->height, fmt->code); + + return 0; +} + +static int __subdev_set_format(struct cal_ctx *ctx, + struct v4l2_mbus_framefmt *fmt) +{ + struct v4l2_subdev_format sd_fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = 0, + }; + struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format; + int ret; + + *mbus_fmt = *fmt; + + ret = v4l2_subdev_call(ctx->phy->source, pad, set_fmt, NULL, &sd_fmt); + if (ret) + return ret; + + ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__, + fmt->width, fmt->height, fmt->code); + + return 0; +} + +static void cal_calc_format_size(struct cal_ctx *ctx, + const struct cal_format_info *fmtinfo, + struct v4l2_format *f) +{ + u32 bpl, max_width; + + /* + * Maximum width is bound by the DMA max width in bytes. + * We need to recalculate the actual maxi width depending on the + * number of bytes per pixels required. + */ + max_width = CAL_MAX_WIDTH_BYTES / (ALIGN(fmtinfo->bpp, 8) >> 3); + v4l_bound_align_image(&f->fmt.pix.width, 48, max_width, 2, + &f->fmt.pix.height, 32, CAL_MAX_HEIGHT_LINES, + 0, 0); + + bpl = (f->fmt.pix.width * ALIGN(fmtinfo->bpp, 8)) >> 3; + f->fmt.pix.bytesperline = ALIGN(bpl, 16); + + f->fmt.pix.sizeimage = f->fmt.pix.height * + f->fmt.pix.bytesperline; + + ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n", + __func__, fourcc_to_str(f->fmt.pix.pixelformat), + f->fmt.pix.width, f->fmt.pix.height, + f->fmt.pix.bytesperline, f->fmt.pix.sizeimage); +} + +static int cal_legacy_try_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_format_info *fmtinfo; + struct v4l2_subdev_frame_size_enum fse = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + int found; + + fmtinfo = find_format_by_pix(ctx, f->fmt.pix.pixelformat); + if (!fmtinfo) { + ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n", + f->fmt.pix.pixelformat); + + /* Just get the first one enumerated */ + fmtinfo = ctx->active_fmt[0]; + f->fmt.pix.pixelformat = fmtinfo->fourcc; + } + + f->fmt.pix.field = ctx->v_fmt.fmt.pix.field; + + /* check for/find a valid width/height */ + found = false; + fse.pad = 0; + fse.code = fmtinfo->code; + for (fse.index = 0; ; fse.index++) { + int ret; + + ret = v4l2_subdev_call(ctx->phy->source, pad, enum_frame_size, + NULL, &fse); + if (ret) + break; + + if ((f->fmt.pix.width == fse.max_width) && + (f->fmt.pix.height == fse.max_height)) { + found = true; + break; + } else if ((f->fmt.pix.width >= fse.min_width) && + (f->fmt.pix.width <= fse.max_width) && + (f->fmt.pix.height >= fse.min_height) && + (f->fmt.pix.height <= fse.max_height)) { + found = true; + break; + } + } + + if (!found) { + /* use existing values as default */ + f->fmt.pix.width = ctx->v_fmt.fmt.pix.width; + f->fmt.pix.height = ctx->v_fmt.fmt.pix.height; + } + + /* + * Use current colorspace for now, it will get + * updated properly during s_fmt + */ + f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace; + cal_calc_format_size(ctx, fmtinfo, f); + return 0; +} + +static int cal_legacy_s_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + struct vb2_queue *q = &ctx->vb_vidq; + struct v4l2_subdev_format sd_fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = CAL_CAMERARX_PAD_SINK, + }; + const struct cal_format_info *fmtinfo; + int ret; + + if (vb2_is_busy(q)) { + ctx_dbg(3, ctx, "%s device busy\n", __func__); + return -EBUSY; + } + + ret = cal_legacy_try_fmt_vid_cap(file, priv, f); + if (ret < 0) + return ret; + + fmtinfo = find_format_by_pix(ctx, f->fmt.pix.pixelformat); + + v4l2_fill_mbus_format(&sd_fmt.format, &f->fmt.pix, fmtinfo->code); + + ret = __subdev_set_format(ctx, &sd_fmt.format); + if (ret) + return ret; + + /* Just double check nothing has gone wrong */ + if (sd_fmt.format.code != fmtinfo->code) { + ctx_dbg(3, ctx, + "%s subdev changed format on us, this should not happen\n", + __func__); + return -EINVAL; + } + + v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &sd_fmt.format); + ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + ctx->v_fmt.fmt.pix.pixelformat = fmtinfo->fourcc; + ctx->v_fmt.fmt.pix.field = sd_fmt.format.field; + cal_calc_format_size(ctx, fmtinfo, &ctx->v_fmt); + + v4l2_subdev_call(&ctx->phy->subdev, pad, set_fmt, NULL, &sd_fmt); + + ctx->fmtinfo = fmtinfo; + *f = ctx->v_fmt; + + return 0; +} + +static int cal_legacy_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_format_info *fmtinfo; + struct v4l2_subdev_frame_size_enum fse = { + .index = fsize->index, + .pad = 0, + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + int ret; + + /* check for valid format */ + fmtinfo = find_format_by_pix(ctx, fsize->pixel_format); + if (!fmtinfo) { + ctx_dbg(3, ctx, "Invalid pixel code: %x\n", + fsize->pixel_format); + return -EINVAL; + } + + fse.code = fmtinfo->code; + + ret = v4l2_subdev_call(ctx->phy->source, pad, enum_frame_size, NULL, + &fse); + if (ret) + return ret; + + ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n", + __func__, fse.index, fse.code, fse.min_width, fse.max_width, + fse.min_height, fse.max_height); + + fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; + fsize->discrete.width = fse.max_width; + fsize->discrete.height = fse.max_height; + + return 0; +} + +static int cal_legacy_enum_input(struct file *file, void *priv, + struct v4l2_input *inp) +{ + if (inp->index > 0) + return -EINVAL; + + inp->type = V4L2_INPUT_TYPE_CAMERA; + sprintf(inp->name, "Camera %u", inp->index); + return 0; +} + +static int cal_legacy_g_input(struct file *file, void *priv, unsigned int *i) +{ + *i = 0; + return 0; +} + +static int cal_legacy_s_input(struct file *file, void *priv, unsigned int i) +{ + return i > 0 ? -EINVAL : 0; +} + +/* timeperframe is arbitrary and continuous */ +static int cal_legacy_enum_frameintervals(struct file *file, void *priv, + struct v4l2_frmivalenum *fival) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_format_info *fmtinfo; + struct v4l2_subdev_frame_interval_enum fie = { + .index = fival->index, + .width = fival->width, + .height = fival->height, + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + int ret; + + fmtinfo = find_format_by_pix(ctx, fival->pixel_format); + if (!fmtinfo) + return -EINVAL; + + fie.code = fmtinfo->code; + ret = v4l2_subdev_call(ctx->phy->source, pad, enum_frame_interval, + NULL, &fie); + if (ret) + return ret; + fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; + fival->discrete = fie.interval; + + return 0; +} + +static int cal_legacy_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a) +{ + struct cal_ctx *ctx = video_drvdata(file); + + return v4l2_g_parm_cap(video_devdata(file), ctx->phy->source, a); +} + +static int cal_legacy_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) +{ + struct cal_ctx *ctx = video_drvdata(file); + + return v4l2_s_parm_cap(video_devdata(file), ctx->phy->source, a); +} + +static const struct v4l2_ioctl_ops cal_ioctl_legacy_ops = { + .vidioc_querycap = cal_querycap, + .vidioc_enum_fmt_vid_cap = cal_legacy_enum_fmt_vid_cap, + .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = cal_legacy_try_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = cal_legacy_s_fmt_vid_cap, + .vidioc_enum_framesizes = cal_legacy_enum_framesizes, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_enum_input = cal_legacy_enum_input, + .vidioc_g_input = cal_legacy_g_input, + .vidioc_s_input = cal_legacy_s_input, + .vidioc_enum_frameintervals = cal_legacy_enum_frameintervals, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_log_status = v4l2_ctrl_log_status, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, + .vidioc_g_parm = cal_legacy_g_parm, + .vidioc_s_parm = cal_legacy_s_parm, +}; + +/* ------------------------------------------------------------------ + * V4L2 Media Controller Centric IOCTLs + * ------------------------------------------------------------------ + */ + +static int cal_mc_enum_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_fmtdesc *f) +{ + unsigned int i; + unsigned int idx; + + if (f->index >= cal_num_formats) + return -EINVAL; + + idx = 0; + + for (i = 0; i < cal_num_formats; ++i) { + if (f->mbus_code && cal_formats[i].code != f->mbus_code) + continue; + + if (idx == f->index) { + f->pixelformat = cal_formats[i].fourcc; + f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + return 0; + } + + idx++; + } + + return -EINVAL; +} + +static void cal_mc_try_fmt(struct cal_ctx *ctx, struct v4l2_format *f, + const struct cal_format_info **info) +{ + struct v4l2_pix_format *format = &f->fmt.pix; + const struct cal_format_info *fmtinfo; + unsigned int bpp; + + /* + * Default to the first format if the requested pixel format code isn't + * supported. + */ + fmtinfo = cal_format_by_fourcc(f->fmt.pix.pixelformat); + if (!fmtinfo) + fmtinfo = &cal_formats[0]; + + /* + * Clamp the size, update the pixel format. The field and colorspace are + * accepted as-is, except for V4L2_FIELD_ANY that is turned into + * V4L2_FIELD_NONE. + */ + bpp = ALIGN(fmtinfo->bpp, 8); + + format->width = clamp_t(unsigned int, format->width, + CAL_MIN_WIDTH_BYTES * 8 / bpp, + CAL_MAX_WIDTH_BYTES * 8 / bpp); + format->height = clamp_t(unsigned int, format->height, + CAL_MIN_HEIGHT_LINES, CAL_MAX_HEIGHT_LINES); + format->pixelformat = fmtinfo->fourcc; + + if (format->field == V4L2_FIELD_ANY) + format->field = V4L2_FIELD_NONE; + + /* + * Calculate the number of bytes per line and the image size. The + * hardware stores the stride as a number of 16 bytes words, in a + * signed 15-bit value. Only 14 bits are thus usable. + */ + format->bytesperline = ALIGN(clamp(format->bytesperline, + format->width * bpp / 8, + ((1U << 14) - 1) * 16), 16); + + format->sizeimage = format->height * format->bytesperline; + + format->colorspace = ctx->v_fmt.fmt.pix.colorspace; + + if (info) + *info = fmtinfo; + + ctx_dbg(3, ctx, "%s: %s %ux%u (bytesperline %u sizeimage %u)\n", + __func__, fourcc_to_str(format->pixelformat), + format->width, format->height, + format->bytesperline, format->sizeimage); +} + +static int cal_mc_try_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + + cal_mc_try_fmt(ctx, f, NULL); + return 0; +} + +static int cal_mc_s_fmt_vid_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_format_info *fmtinfo; + + if (vb2_is_busy(&ctx->vb_vidq)) { + ctx_dbg(3, ctx, "%s device busy\n", __func__); + return -EBUSY; + } + + cal_mc_try_fmt(ctx, f, &fmtinfo); + + ctx->v_fmt = *f; + ctx->fmtinfo = fmtinfo; + + return 0; +} + +static int cal_mc_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + struct cal_ctx *ctx = video_drvdata(file); + const struct cal_format_info *fmtinfo; + unsigned int bpp; + + if (fsize->index > 0) + return -EINVAL; + + fmtinfo = cal_format_by_fourcc(fsize->pixel_format); + if (!fmtinfo) { + ctx_dbg(3, ctx, "Invalid pixel format 0x%08x\n", + fsize->pixel_format); + return -EINVAL; + } + + bpp = ALIGN(fmtinfo->bpp, 8); + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + fsize->stepwise.min_width = CAL_MIN_WIDTH_BYTES * 8 / bpp; + fsize->stepwise.max_width = CAL_MAX_WIDTH_BYTES * 8 / bpp; + fsize->stepwise.step_width = 64 / bpp; + fsize->stepwise.min_height = CAL_MIN_HEIGHT_LINES; + fsize->stepwise.max_height = CAL_MAX_HEIGHT_LINES; + fsize->stepwise.step_height = 1; + + return 0; +} + +static const struct v4l2_ioctl_ops cal_ioctl_mc_ops = { + .vidioc_querycap = cal_querycap, + .vidioc_enum_fmt_vid_cap = cal_mc_enum_fmt_vid_cap, + .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = cal_mc_try_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = cal_mc_s_fmt_vid_cap, + .vidioc_enum_framesizes = cal_mc_enum_framesizes, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_log_status = v4l2_ctrl_log_status, +}; + +/* ------------------------------------------------------------------ + * videobuf2 Common Operations + * ------------------------------------------------------------------ + */ + +static int cal_queue_setup(struct vb2_queue *vq, + unsigned int *nbuffers, unsigned int *nplanes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct cal_ctx *ctx = vb2_get_drv_priv(vq); + unsigned int size = ctx->v_fmt.fmt.pix.sizeimage; + + if (vq->num_buffers + *nbuffers < 3) + *nbuffers = 3 - vq->num_buffers; + + if (*nplanes) { + if (sizes[0] < size) + return -EINVAL; + size = sizes[0]; + } + + *nplanes = 1; + sizes[0] = size; + + ctx_dbg(3, ctx, "nbuffers=%d, size=%d\n", *nbuffers, sizes[0]); + + return 0; +} + +static int cal_buffer_prepare(struct vb2_buffer *vb) +{ + struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + struct cal_buffer *buf = container_of(vb, struct cal_buffer, + vb.vb2_buf); + unsigned long size; + + size = ctx->v_fmt.fmt.pix.sizeimage; + if (vb2_plane_size(vb, 0) < size) { + ctx_err(ctx, + "data will not fit into plane (%lu < %lu)\n", + vb2_plane_size(vb, 0), size); + return -EINVAL; + } + + vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size); + return 0; +} + +static void cal_buffer_queue(struct vb2_buffer *vb) +{ + struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + struct cal_buffer *buf = container_of(vb, struct cal_buffer, + vb.vb2_buf); + unsigned long flags; + + /* recheck locking */ + spin_lock_irqsave(&ctx->dma.lock, flags); + list_add_tail(&buf->list, &ctx->dma.queue); + spin_unlock_irqrestore(&ctx->dma.lock, flags); +} + +static void cal_release_buffers(struct cal_ctx *ctx, + enum vb2_buffer_state state) +{ + struct cal_buffer *buf, *tmp; + + /* Release all queued buffers. */ + spin_lock_irq(&ctx->dma.lock); + + list_for_each_entry_safe(buf, tmp, &ctx->dma.queue, list) { + list_del(&buf->list); + vb2_buffer_done(&buf->vb.vb2_buf, state); + } + + if (ctx->dma.pending) { + vb2_buffer_done(&ctx->dma.pending->vb.vb2_buf, state); + ctx->dma.pending = NULL; + } + + if (ctx->dma.active) { + vb2_buffer_done(&ctx->dma.active->vb.vb2_buf, state); + ctx->dma.active = NULL; + } + + spin_unlock_irq(&ctx->dma.lock); +} + +/* ------------------------------------------------------------------ + * videobuf2 Operations + * ------------------------------------------------------------------ + */ + +static int cal_video_check_format(struct cal_ctx *ctx) +{ + const struct v4l2_mbus_framefmt *format; + struct v4l2_subdev_state *state; + struct media_pad *remote_pad; + int ret = 0; + + remote_pad = media_pad_remote_pad_first(&ctx->pad); + if (!remote_pad) + return -ENODEV; + + state = v4l2_subdev_lock_and_get_active_state(&ctx->phy->subdev); + + format = v4l2_subdev_get_pad_format(&ctx->phy->subdev, state, remote_pad->index); + if (!format) { + ret = -EINVAL; + goto out; + } + + if (ctx->fmtinfo->code != format->code || + ctx->v_fmt.fmt.pix.height != format->height || + ctx->v_fmt.fmt.pix.width != format->width || + ctx->v_fmt.fmt.pix.field != format->field) { + ret = -EPIPE; + goto out; + } + +out: + v4l2_subdev_unlock_state(state); + + return ret; +} + +static int cal_start_streaming(struct vb2_queue *vq, unsigned int count) +{ + struct cal_ctx *ctx = vb2_get_drv_priv(vq); + struct cal_buffer *buf; + dma_addr_t addr; + int ret; + + ret = video_device_pipeline_alloc_start(&ctx->vdev); + if (ret < 0) { + ctx_err(ctx, "Failed to start media pipeline: %d\n", ret); + goto error_release_buffers; + } + + /* + * Verify that the currently configured format matches the output of + * the connected CAMERARX. + */ + ret = cal_video_check_format(ctx); + if (ret < 0) { + ctx_dbg(3, ctx, + "Format mismatch between CAMERARX and video node\n"); + goto error_pipeline; + } + + ret = cal_ctx_prepare(ctx); + if (ret) { + ctx_err(ctx, "Failed to prepare context: %d\n", ret); + goto error_pipeline; + } + + spin_lock_irq(&ctx->dma.lock); + buf = list_first_entry(&ctx->dma.queue, struct cal_buffer, list); + ctx->dma.active = buf; + list_del(&buf->list); + spin_unlock_irq(&ctx->dma.lock); + + addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); + + ret = pm_runtime_resume_and_get(ctx->cal->dev); + if (ret < 0) + goto error_pipeline; + + cal_ctx_set_dma_addr(ctx, addr); + cal_ctx_start(ctx); + + ret = v4l2_subdev_call(&ctx->phy->subdev, video, s_stream, 1); + if (ret) + goto error_stop; + + if (cal_debug >= 4) + cal_quickdump_regs(ctx->cal); + + return 0; + +error_stop: + cal_ctx_stop(ctx); + pm_runtime_put_sync(ctx->cal->dev); + cal_ctx_unprepare(ctx); + +error_pipeline: + video_device_pipeline_stop(&ctx->vdev); +error_release_buffers: + cal_release_buffers(ctx, VB2_BUF_STATE_QUEUED); + + return ret; +} + +static void cal_stop_streaming(struct vb2_queue *vq) +{ + struct cal_ctx *ctx = vb2_get_drv_priv(vq); + + cal_ctx_stop(ctx); + + v4l2_subdev_call(&ctx->phy->subdev, video, s_stream, 0); + + pm_runtime_put_sync(ctx->cal->dev); + + cal_ctx_unprepare(ctx); + + cal_release_buffers(ctx, VB2_BUF_STATE_ERROR); + + video_device_pipeline_stop(&ctx->vdev); +} + +static const struct vb2_ops cal_video_qops = { + .queue_setup = cal_queue_setup, + .buf_prepare = cal_buffer_prepare, + .buf_queue = cal_buffer_queue, + .start_streaming = cal_start_streaming, + .stop_streaming = cal_stop_streaming, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, +}; + +/* ------------------------------------------------------------------ + * V4L2 Initialization and Registration + * ------------------------------------------------------------------ + */ + +static const struct v4l2_file_operations cal_fops = { + .owner = THIS_MODULE, + .open = v4l2_fh_open, + .release = vb2_fop_release, + .poll = vb2_fop_poll, + .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */ + .mmap = vb2_fop_mmap, +}; + +static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx) +{ + struct v4l2_mbus_framefmt mbus_fmt; + const struct cal_format_info *fmtinfo; + unsigned int i, j, k; + int ret = 0; + + /* Enumerate sub device formats and enable all matching local formats */ + ctx->active_fmt = devm_kcalloc(ctx->cal->dev, cal_num_formats, + sizeof(*ctx->active_fmt), GFP_KERNEL); + if (!ctx->active_fmt) + return -ENOMEM; + + ctx->num_active_fmt = 0; + + for (j = 0, i = 0; ; ++j) { + struct v4l2_subdev_mbus_code_enum mbus_code = { + .index = j, + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + + ret = v4l2_subdev_call(ctx->phy->source, pad, enum_mbus_code, + NULL, &mbus_code); + if (ret == -EINVAL) + break; + + if (ret) { + ctx_err(ctx, "Error enumerating mbus codes in subdev %s: %d\n", + ctx->phy->source->name, ret); + return ret; + } + + ctx_dbg(2, ctx, + "subdev %s: code: %04x idx: %u\n", + ctx->phy->source->name, mbus_code.code, j); + + for (k = 0; k < cal_num_formats; k++) { + fmtinfo = &cal_formats[k]; + + if (mbus_code.code == fmtinfo->code) { + ctx->active_fmt[i] = fmtinfo; + ctx_dbg(2, ctx, + "matched fourcc: %s: code: %04x idx: %u\n", + fourcc_to_str(fmtinfo->fourcc), + fmtinfo->code, i); + ctx->num_active_fmt = ++i; + } + } + } + + if (i == 0) { + ctx_err(ctx, "No suitable format reported by subdev %s\n", + ctx->phy->source->name); + return -EINVAL; + } + + ret = __subdev_get_format(ctx, &mbus_fmt); + if (ret) + return ret; + + fmtinfo = find_format_by_code(ctx, mbus_fmt.code); + if (!fmtinfo) { + ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n", + mbus_fmt.code); + return -EINVAL; + } + + /* Save current format */ + v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt); + ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + ctx->v_fmt.fmt.pix.pixelformat = fmtinfo->fourcc; + cal_calc_format_size(ctx, fmtinfo, &ctx->v_fmt); + ctx->fmtinfo = fmtinfo; + + return 0; +} + +static int cal_ctx_v4l2_init_mc_format(struct cal_ctx *ctx) +{ + const struct cal_format_info *fmtinfo; + struct v4l2_pix_format *pix_fmt = &ctx->v_fmt.fmt.pix; + + fmtinfo = cal_format_by_code(MEDIA_BUS_FMT_UYVY8_1X16); + if (!fmtinfo) + return -EINVAL; + + pix_fmt->width = 640; + pix_fmt->height = 480; + pix_fmt->field = V4L2_FIELD_NONE; + pix_fmt->colorspace = V4L2_COLORSPACE_SRGB; + pix_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601; + pix_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE; + pix_fmt->xfer_func = V4L2_XFER_FUNC_SRGB; + pix_fmt->pixelformat = fmtinfo->fourcc; + + ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + + /* Save current format */ + cal_calc_format_size(ctx, fmtinfo, &ctx->v_fmt); + ctx->fmtinfo = fmtinfo; + + return 0; +} + +int cal_ctx_v4l2_register(struct cal_ctx *ctx) +{ + struct video_device *vfd = &ctx->vdev; + int ret; + + if (!cal_mc_api) { + struct v4l2_ctrl_handler *hdl = &ctx->ctrl_handler; + + ret = cal_ctx_v4l2_init_formats(ctx); + if (ret) { + ctx_err(ctx, "Failed to init formats: %d\n", ret); + return ret; + } + + ret = v4l2_ctrl_add_handler(hdl, ctx->phy->source->ctrl_handler, + NULL, true); + if (ret < 0) { + ctx_err(ctx, "Failed to add source ctrl handler\n"); + return ret; + } + } else { + ret = cal_ctx_v4l2_init_mc_format(ctx); + if (ret) { + ctx_err(ctx, "Failed to init format: %d\n", ret); + return ret; + } + } + + ret = video_register_device(vfd, VFL_TYPE_VIDEO, cal_video_nr); + if (ret < 0) { + ctx_err(ctx, "Failed to register video device\n"); + return ret; + } + + ret = media_create_pad_link(&ctx->phy->subdev.entity, + CAL_CAMERARX_PAD_FIRST_SOURCE, + &vfd->entity, 0, + MEDIA_LNK_FL_IMMUTABLE | + MEDIA_LNK_FL_ENABLED); + if (ret) { + ctx_err(ctx, "Failed to create media link for context %u\n", + ctx->dma_ctx); + video_unregister_device(vfd); + return ret; + } + + ctx_info(ctx, "V4L2 device registered as %s\n", + video_device_node_name(vfd)); + + return 0; +} + +void cal_ctx_v4l2_unregister(struct cal_ctx *ctx) +{ + ctx_dbg(1, ctx, "unregistering %s\n", + video_device_node_name(&ctx->vdev)); + + video_unregister_device(&ctx->vdev); +} + +int cal_ctx_v4l2_init(struct cal_ctx *ctx) +{ + struct video_device *vfd = &ctx->vdev; + struct vb2_queue *q = &ctx->vb_vidq; + int ret; + + INIT_LIST_HEAD(&ctx->dma.queue); + spin_lock_init(&ctx->dma.lock); + mutex_init(&ctx->mutex); + init_waitqueue_head(&ctx->dma.wait); + + /* Initialize the vb2 queue. */ + q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + q->io_modes = VB2_MMAP | VB2_DMABUF; + q->drv_priv = ctx; + q->buf_struct_size = sizeof(struct cal_buffer); + q->ops = &cal_video_qops; + q->mem_ops = &vb2_dma_contig_memops; + q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + q->lock = &ctx->mutex; + q->min_buffers_needed = 3; + q->dev = ctx->cal->dev; + + ret = vb2_queue_init(q); + if (ret) + return ret; + + /* Initialize the video device and media entity. */ + vfd->fops = &cal_fops; + vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING + | (cal_mc_api ? V4L2_CAP_IO_MC : 0); + vfd->v4l2_dev = &ctx->cal->v4l2_dev; + vfd->queue = q; + snprintf(vfd->name, sizeof(vfd->name), "CAL output %u", ctx->dma_ctx); + vfd->release = video_device_release_empty; + vfd->ioctl_ops = cal_mc_api ? &cal_ioctl_mc_ops : &cal_ioctl_legacy_ops; + vfd->lock = &ctx->mutex; + video_set_drvdata(vfd, ctx); + + ctx->pad.flags = MEDIA_PAD_FL_SINK; + ret = media_entity_pads_init(&vfd->entity, 1, &ctx->pad); + if (ret < 0) + return ret; + + if (!cal_mc_api) { + /* Initialize the control handler. */ + struct v4l2_ctrl_handler *hdl = &ctx->ctrl_handler; + + ret = v4l2_ctrl_handler_init(hdl, 11); + if (ret < 0) { + ctx_err(ctx, "Failed to init ctrl handler\n"); + goto error; + } + + vfd->ctrl_handler = hdl; + } + + return 0; + +error: + media_entity_cleanup(&vfd->entity); + return ret; +} + +void cal_ctx_v4l2_cleanup(struct cal_ctx *ctx) +{ + if (!cal_mc_api) + v4l2_ctrl_handler_free(&ctx->ctrl_handler); + + media_entity_cleanup(&ctx->vdev.entity); +} diff --git a/drivers/media/platform/ti/cal/cal.c b/drivers/media/platform/ti/cal/cal.c new file mode 100644 index 0000000000..528909ae4b --- /dev/null +++ b/drivers/media/platform/ti/cal/cal.c @@ -0,0 +1,1343 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * TI Camera Access Layer (CAL) - Driver + * + * Copyright (c) 2015-2020 Texas Instruments Inc. + * + * Authors: + * Benoit Parrot + * Laurent Pinchart + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "cal.h" +#include "cal_regs.h" + +MODULE_DESCRIPTION("TI CAL driver"); +MODULE_AUTHOR("Benoit Parrot, "); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("0.1.0"); + +int cal_video_nr = -1; +module_param_named(video_nr, cal_video_nr, uint, 0644); +MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect"); + +unsigned int cal_debug; +module_param_named(debug, cal_debug, uint, 0644); +MODULE_PARM_DESC(debug, "activates debug info"); + +#ifdef CONFIG_VIDEO_TI_CAL_MC +#define CAL_MC_API_DEFAULT 1 +#else +#define CAL_MC_API_DEFAULT 0 +#endif + +bool cal_mc_api = CAL_MC_API_DEFAULT; +module_param_named(mc_api, cal_mc_api, bool, 0444); +MODULE_PARM_DESC(mc_api, "activates the MC API"); + +/* ------------------------------------------------------------------ + * Format Handling + * ------------------------------------------------------------------ + */ + +const struct cal_format_info cal_formats[] = { + { + .fourcc = V4L2_PIX_FMT_YUYV, + .code = MEDIA_BUS_FMT_YUYV8_1X16, + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_UYVY, + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_YVYU, + .code = MEDIA_BUS_FMT_YVYU8_1X16, + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_VYUY, + .code = MEDIA_BUS_FMT_VYUY8_1X16, + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_RGB565, + .code = MEDIA_BUS_FMT_RGB565_1X16, + .bpp = 16, + }, { + .fourcc = V4L2_PIX_FMT_SBGGR8, + .code = MEDIA_BUS_FMT_SBGGR8_1X8, + .bpp = 8, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG8, + .code = MEDIA_BUS_FMT_SGBRG8_1X8, + .bpp = 8, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG8, + .code = MEDIA_BUS_FMT_SGRBG8_1X8, + .bpp = 8, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB8, + .code = MEDIA_BUS_FMT_SRGGB8_1X8, + .bpp = 8, + }, { + .fourcc = V4L2_PIX_FMT_SBGGR10, + .code = MEDIA_BUS_FMT_SBGGR10_1X10, + .bpp = 10, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG10, + .code = MEDIA_BUS_FMT_SGBRG10_1X10, + .bpp = 10, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG10, + .code = MEDIA_BUS_FMT_SGRBG10_1X10, + .bpp = 10, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB10, + .code = MEDIA_BUS_FMT_SRGGB10_1X10, + .bpp = 10, + }, { + .fourcc = V4L2_PIX_FMT_SBGGR12, + .code = MEDIA_BUS_FMT_SBGGR12_1X12, + .bpp = 12, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG12, + .code = MEDIA_BUS_FMT_SGBRG12_1X12, + .bpp = 12, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG12, + .code = MEDIA_BUS_FMT_SGRBG12_1X12, + .bpp = 12, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB12, + .code = MEDIA_BUS_FMT_SRGGB12_1X12, + .bpp = 12, + }, +}; + +const unsigned int cal_num_formats = ARRAY_SIZE(cal_formats); + +const struct cal_format_info *cal_format_by_fourcc(u32 fourcc) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(cal_formats); ++i) { + if (cal_formats[i].fourcc == fourcc) + return &cal_formats[i]; + } + + return NULL; +} + +const struct cal_format_info *cal_format_by_code(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(cal_formats); ++i) { + if (cal_formats[i].code == code) + return &cal_formats[i]; + } + + return NULL; +} + +/* ------------------------------------------------------------------ + * Platform Data + * ------------------------------------------------------------------ + */ + +static const struct cal_camerarx_data dra72x_cal_camerarx[] = { + { + .fields = { + [F_CTRLCLKEN] = { 10, 10 }, + [F_CAMMODE] = { 11, 12 }, + [F_LANEENABLE] = { 13, 16 }, + [F_CSI_MODE] = { 17, 17 }, + }, + .num_lanes = 4, + }, + { + .fields = { + [F_CTRLCLKEN] = { 0, 0 }, + [F_CAMMODE] = { 1, 2 }, + [F_LANEENABLE] = { 3, 4 }, + [F_CSI_MODE] = { 5, 5 }, + }, + .num_lanes = 2, + }, +}; + +static const struct cal_data dra72x_cal_data = { + .camerarx = dra72x_cal_camerarx, + .num_csi2_phy = ARRAY_SIZE(dra72x_cal_camerarx), +}; + +static const struct cal_data dra72x_es1_cal_data = { + .camerarx = dra72x_cal_camerarx, + .num_csi2_phy = ARRAY_SIZE(dra72x_cal_camerarx), + .flags = DRA72_CAL_PRE_ES2_LDO_DISABLE, +}; + +static const struct cal_camerarx_data dra76x_cal_csi_phy[] = { + { + .fields = { + [F_CTRLCLKEN] = { 8, 8 }, + [F_CAMMODE] = { 9, 10 }, + [F_CSI_MODE] = { 11, 11 }, + [F_LANEENABLE] = { 27, 31 }, + }, + .num_lanes = 5, + }, + { + .fields = { + [F_CTRLCLKEN] = { 0, 0 }, + [F_CAMMODE] = { 1, 2 }, + [F_CSI_MODE] = { 3, 3 }, + [F_LANEENABLE] = { 24, 26 }, + }, + .num_lanes = 3, + }, +}; + +static const struct cal_data dra76x_cal_data = { + .camerarx = dra76x_cal_csi_phy, + .num_csi2_phy = ARRAY_SIZE(dra76x_cal_csi_phy), +}; + +static const struct cal_camerarx_data am654_cal_csi_phy[] = { + { + .fields = { + [F_CTRLCLKEN] = { 15, 15 }, + [F_CAMMODE] = { 24, 25 }, + [F_LANEENABLE] = { 0, 4 }, + }, + .num_lanes = 5, + }, +}; + +static const struct cal_data am654_cal_data = { + .camerarx = am654_cal_csi_phy, + .num_csi2_phy = ARRAY_SIZE(am654_cal_csi_phy), +}; + +/* ------------------------------------------------------------------ + * I/O Register Accessors + * ------------------------------------------------------------------ + */ + +void cal_quickdump_regs(struct cal_dev *cal) +{ + unsigned int i; + + cal_info(cal, "CAL Registers @ 0x%pa:\n", &cal->res->start); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4, + (__force const void *)cal->base, + resource_size(cal->res), false); + + for (i = 0; i < cal->data->num_csi2_phy; ++i) { + struct cal_camerarx *phy = cal->phy[i]; + + cal_info(cal, "CSI2 Core %u Registers @ %pa:\n", i, + &phy->res->start); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4, + (__force const void *)phy->base, + resource_size(phy->res), + false); + } +} + +/* ------------------------------------------------------------------ + * Context Management + * ------------------------------------------------------------------ + */ + +#define CAL_MAX_PIX_PROC 4 + +static int cal_reserve_pix_proc(struct cal_dev *cal) +{ + unsigned long ret; + + spin_lock(&cal->v4l2_dev.lock); + + ret = find_first_zero_bit(&cal->reserved_pix_proc_mask, CAL_MAX_PIX_PROC); + + if (ret == CAL_MAX_PIX_PROC) { + spin_unlock(&cal->v4l2_dev.lock); + return -ENOSPC; + } + + cal->reserved_pix_proc_mask |= BIT(ret); + + spin_unlock(&cal->v4l2_dev.lock); + + return ret; +} + +static void cal_release_pix_proc(struct cal_dev *cal, unsigned int pix_proc_num) +{ + spin_lock(&cal->v4l2_dev.lock); + + cal->reserved_pix_proc_mask &= ~BIT(pix_proc_num); + + spin_unlock(&cal->v4l2_dev.lock); +} + +static void cal_ctx_csi2_config(struct cal_ctx *ctx) +{ + u32 val; + + val = cal_read(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx)); + cal_set_field(&val, ctx->cport, CAL_CSI2_CTX_CPORT_MASK); + /* + * DT type: MIPI CSI-2 Specs + * 0x1: All - DT filter is disabled + * 0x24: RGB888 1 pixel = 3 bytes + * 0x2B: RAW10 4 pixels = 5 bytes + * 0x2A: RAW8 1 pixel = 1 byte + * 0x1E: YUV422 2 pixels = 4 bytes + */ + cal_set_field(&val, ctx->datatype, CAL_CSI2_CTX_DT_MASK); + cal_set_field(&val, ctx->vc, CAL_CSI2_CTX_VC_MASK); + cal_set_field(&val, ctx->v_fmt.fmt.pix.height, CAL_CSI2_CTX_LINES_MASK); + cal_set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK); + cal_set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE, + CAL_CSI2_CTX_PACK_MODE_MASK); + cal_write(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx), val); + ctx_dbg(3, ctx, "CAL_CSI2_CTX(%u, %u) = 0x%08x\n", + ctx->phy->instance, ctx->csi2_ctx, + cal_read(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx))); +} + +static void cal_ctx_pix_proc_config(struct cal_ctx *ctx) +{ + u32 val, extract, pack; + + switch (ctx->fmtinfo->bpp) { + case 8: + extract = CAL_PIX_PROC_EXTRACT_B8; + pack = CAL_PIX_PROC_PACK_B8; + break; + case 10: + extract = CAL_PIX_PROC_EXTRACT_B10_MIPI; + pack = CAL_PIX_PROC_PACK_B16; + break; + case 12: + extract = CAL_PIX_PROC_EXTRACT_B12_MIPI; + pack = CAL_PIX_PROC_PACK_B16; + break; + case 16: + extract = CAL_PIX_PROC_EXTRACT_B16_LE; + pack = CAL_PIX_PROC_PACK_B16; + break; + default: + /* + * If you see this warning then it means that you added + * some new entry in the cal_formats[] array with a different + * bit per pixel values then the one supported below. + * Either add support for the new bpp value below or adjust + * the new entry to use one of the value below. + * + * Instead of failing here just use 8 bpp as a default. + */ + dev_warn_once(ctx->cal->dev, + "%s:%d:%s: bpp:%d unsupported! Overwritten with 8.\n", + __FILE__, __LINE__, __func__, ctx->fmtinfo->bpp); + extract = CAL_PIX_PROC_EXTRACT_B8; + pack = CAL_PIX_PROC_PACK_B8; + break; + } + + val = cal_read(ctx->cal, CAL_PIX_PROC(ctx->pix_proc)); + cal_set_field(&val, extract, CAL_PIX_PROC_EXTRACT_MASK); + cal_set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK); + cal_set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK); + cal_set_field(&val, pack, CAL_PIX_PROC_PACK_MASK); + cal_set_field(&val, ctx->cport, CAL_PIX_PROC_CPORT_MASK); + cal_set_field(&val, 1, CAL_PIX_PROC_EN_MASK); + cal_write(ctx->cal, CAL_PIX_PROC(ctx->pix_proc), val); + ctx_dbg(3, ctx, "CAL_PIX_PROC(%u) = 0x%08x\n", ctx->pix_proc, + cal_read(ctx->cal, CAL_PIX_PROC(ctx->pix_proc))); +} + +static void cal_ctx_wr_dma_config(struct cal_ctx *ctx) +{ + unsigned int stride = ctx->v_fmt.fmt.pix.bytesperline; + u32 val; + + val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx)); + cal_set_field(&val, ctx->cport, CAL_WR_DMA_CTRL_CPORT_MASK); + cal_set_field(&val, ctx->v_fmt.fmt.pix.height, + CAL_WR_DMA_CTRL_YSIZE_MASK); + cal_set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT, + CAL_WR_DMA_CTRL_DTAG_MASK); + cal_set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR, + CAL_WR_DMA_CTRL_PATTERN_MASK); + cal_set_field(&val, 1, CAL_WR_DMA_CTRL_STALL_RD_MASK); + cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val); + ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->dma_ctx, + cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx))); + + cal_write_field(ctx->cal, CAL_WR_DMA_OFST(ctx->dma_ctx), + stride / 16, CAL_WR_DMA_OFST_MASK); + ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->dma_ctx, + cal_read(ctx->cal, CAL_WR_DMA_OFST(ctx->dma_ctx))); + + val = cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx)); + /* 64 bit word means no skipping */ + cal_set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK); + /* + * The XSIZE field is expressed in 64-bit units and prevents overflows + * in case of synchronization issues by limiting the number of bytes + * written per line. + */ + cal_set_field(&val, stride / 8, CAL_WR_DMA_XSIZE_MASK); + cal_write(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx), val); + ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->dma_ctx, + cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx))); +} + +void cal_ctx_set_dma_addr(struct cal_ctx *ctx, dma_addr_t addr) +{ + cal_write(ctx->cal, CAL_WR_DMA_ADDR(ctx->dma_ctx), addr); +} + +static void cal_ctx_wr_dma_enable(struct cal_ctx *ctx) +{ + u32 val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx)); + + cal_set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST, + CAL_WR_DMA_CTRL_MODE_MASK); + cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val); +} + +static void cal_ctx_wr_dma_disable(struct cal_ctx *ctx) +{ + u32 val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx)); + + cal_set_field(&val, CAL_WR_DMA_CTRL_MODE_DIS, + CAL_WR_DMA_CTRL_MODE_MASK); + cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val); +} + +static bool cal_ctx_wr_dma_stopped(struct cal_ctx *ctx) +{ + bool stopped; + + spin_lock_irq(&ctx->dma.lock); + stopped = ctx->dma.state == CAL_DMA_STOPPED; + spin_unlock_irq(&ctx->dma.lock); + + return stopped; +} + +static int +cal_get_remote_frame_desc_entry(struct cal_ctx *ctx, + struct v4l2_mbus_frame_desc_entry *entry) +{ + struct v4l2_mbus_frame_desc fd; + struct media_pad *phy_source_pad; + int ret; + + phy_source_pad = media_pad_remote_pad_first(&ctx->pad); + if (!phy_source_pad) + return -ENODEV; + + ret = v4l2_subdev_call(&ctx->phy->subdev, pad, get_frame_desc, + phy_source_pad->index, &fd); + if (ret) + return ret; + + if (fd.num_entries != 1) + return -EINVAL; + + *entry = fd.entry[0]; + + return 0; +} + +int cal_ctx_prepare(struct cal_ctx *ctx) +{ + struct v4l2_mbus_frame_desc_entry entry; + int ret; + + ret = cal_get_remote_frame_desc_entry(ctx, &entry); + + if (ret == -ENOIOCTLCMD) { + ctx->vc = 0; + ctx->datatype = CAL_CSI2_CTX_DT_ANY; + } else if (!ret) { + ctx_dbg(2, ctx, "Framedesc: len %u, vc %u, dt %#x\n", + entry.length, entry.bus.csi2.vc, entry.bus.csi2.dt); + + ctx->vc = entry.bus.csi2.vc; + ctx->datatype = entry.bus.csi2.dt; + } else { + return ret; + } + + ctx->use_pix_proc = !ctx->fmtinfo->meta; + + if (ctx->use_pix_proc) { + ret = cal_reserve_pix_proc(ctx->cal); + if (ret < 0) { + ctx_err(ctx, "Failed to reserve pix proc: %d\n", ret); + return ret; + } + + ctx->pix_proc = ret; + } + + return 0; +} + +void cal_ctx_unprepare(struct cal_ctx *ctx) +{ + if (ctx->use_pix_proc) + cal_release_pix_proc(ctx->cal, ctx->pix_proc); +} + +void cal_ctx_start(struct cal_ctx *ctx) +{ + struct cal_camerarx *phy = ctx->phy; + + /* + * Reset the frame number & sequence number, but only if the + * virtual channel is not already in use. + */ + + spin_lock(&phy->vc_lock); + + if (phy->vc_enable_count[ctx->vc]++ == 0) { + phy->vc_frame_number[ctx->vc] = 0; + phy->vc_sequence[ctx->vc] = 0; + } + + spin_unlock(&phy->vc_lock); + + ctx->dma.state = CAL_DMA_RUNNING; + + /* Configure the CSI-2, pixel processing and write DMA contexts. */ + cal_ctx_csi2_config(ctx); + if (ctx->use_pix_proc) + cal_ctx_pix_proc_config(ctx); + cal_ctx_wr_dma_config(ctx); + + /* Enable IRQ_WDMA_END and IRQ_WDMA_START. */ + cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(1), + CAL_HL_IRQ_WDMA_END_MASK(ctx->dma_ctx)); + cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(2), + CAL_HL_IRQ_WDMA_START_MASK(ctx->dma_ctx)); + + cal_ctx_wr_dma_enable(ctx); +} + +void cal_ctx_stop(struct cal_ctx *ctx) +{ + struct cal_camerarx *phy = ctx->phy; + long timeout; + + WARN_ON(phy->vc_enable_count[ctx->vc] == 0); + + spin_lock(&phy->vc_lock); + phy->vc_enable_count[ctx->vc]--; + spin_unlock(&phy->vc_lock); + + /* + * Request DMA stop and wait until it completes. If completion times + * out, forcefully disable the DMA. + */ + spin_lock_irq(&ctx->dma.lock); + ctx->dma.state = CAL_DMA_STOP_REQUESTED; + spin_unlock_irq(&ctx->dma.lock); + + timeout = wait_event_timeout(ctx->dma.wait, cal_ctx_wr_dma_stopped(ctx), + msecs_to_jiffies(500)); + if (!timeout) { + ctx_err(ctx, "failed to disable dma cleanly\n"); + cal_ctx_wr_dma_disable(ctx); + } + + /* Disable IRQ_WDMA_END and IRQ_WDMA_START. */ + cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(1), + CAL_HL_IRQ_WDMA_END_MASK(ctx->dma_ctx)); + cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(2), + CAL_HL_IRQ_WDMA_START_MASK(ctx->dma_ctx)); + + ctx->dma.state = CAL_DMA_STOPPED; + + /* Disable CSI2 context */ + cal_write(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx), 0); + + /* Disable pix proc */ + if (ctx->use_pix_proc) + cal_write(ctx->cal, CAL_PIX_PROC(ctx->pix_proc), 0); +} + +/* ------------------------------------------------------------------ + * IRQ Handling + * ------------------------------------------------------------------ + */ + +/* + * Track a sequence number for each virtual channel, which is shared by + * all contexts using the same virtual channel. This is done using the + * CSI-2 frame number as a base. + */ +static void cal_update_seq_number(struct cal_ctx *ctx) +{ + struct cal_dev *cal = ctx->cal; + struct cal_camerarx *phy = ctx->phy; + u16 prev_frame_num, frame_num; + u8 vc = ctx->vc; + + frame_num = + cal_read(cal, CAL_CSI2_STATUS(phy->instance, ctx->csi2_ctx)) & + 0xffff; + + if (phy->vc_frame_number[vc] != frame_num) { + prev_frame_num = phy->vc_frame_number[vc]; + + if (prev_frame_num >= frame_num) + phy->vc_sequence[vc] += 1; + else + phy->vc_sequence[vc] += frame_num - prev_frame_num; + + phy->vc_frame_number[vc] = frame_num; + } +} + +static inline void cal_irq_wdma_start(struct cal_ctx *ctx) +{ + spin_lock(&ctx->dma.lock); + + if (ctx->dma.state == CAL_DMA_STOP_REQUESTED) { + /* + * If a stop is requested, disable the write DMA context + * immediately. The CAL_WR_DMA_CTRL_j.MODE field is shadowed, + * the current frame will complete and the DMA will then stop. + */ + cal_ctx_wr_dma_disable(ctx); + ctx->dma.state = CAL_DMA_STOP_PENDING; + } else if (!list_empty(&ctx->dma.queue) && !ctx->dma.pending) { + /* + * Otherwise, if a new buffer is available, queue it to the + * hardware. + */ + struct cal_buffer *buf; + dma_addr_t addr; + + buf = list_first_entry(&ctx->dma.queue, struct cal_buffer, + list); + addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); + cal_ctx_set_dma_addr(ctx, addr); + + ctx->dma.pending = buf; + list_del(&buf->list); + } + + spin_unlock(&ctx->dma.lock); + + cal_update_seq_number(ctx); +} + +static inline void cal_irq_wdma_end(struct cal_ctx *ctx) +{ + struct cal_buffer *buf = NULL; + + spin_lock(&ctx->dma.lock); + + /* If the DMA context was stopping, it is now stopped. */ + if (ctx->dma.state == CAL_DMA_STOP_PENDING) { + ctx->dma.state = CAL_DMA_STOPPED; + wake_up(&ctx->dma.wait); + } + + /* If a new buffer was queued, complete the current buffer. */ + if (ctx->dma.pending) { + buf = ctx->dma.active; + ctx->dma.active = ctx->dma.pending; + ctx->dma.pending = NULL; + } + + spin_unlock(&ctx->dma.lock); + + if (buf) { + buf->vb.vb2_buf.timestamp = ktime_get_ns(); + buf->vb.field = ctx->v_fmt.fmt.pix.field; + buf->vb.sequence = ctx->phy->vc_sequence[ctx->vc]; + + vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); + } +} + +static void cal_irq_handle_wdma(struct cal_ctx *ctx, bool start, bool end) +{ + /* + * CAL HW interrupts are inherently racy. If we get both start and end + * interrupts, we don't know what has happened: did the DMA for a single + * frame start and end, or did one frame end and a new frame start? + * + * Usually for normal pixel frames we get the interrupts separately. If + * we do get both, we have to guess. The assumption in the code below is + * that the active vertical area is larger than the blanking vertical + * area, and thus it is more likely that we get the end of the old frame + * and the start of a new frame. + * + * However, for embedded data, which is only a few lines high, we always + * get both interrupts. Here the assumption is that we get both for the + * same frame. + */ + if (ctx->v_fmt.fmt.pix.height < 10) { + if (start) + cal_irq_wdma_start(ctx); + + if (end) + cal_irq_wdma_end(ctx); + } else { + if (end) + cal_irq_wdma_end(ctx); + + if (start) + cal_irq_wdma_start(ctx); + } +} + +static irqreturn_t cal_irq(int irq_cal, void *data) +{ + struct cal_dev *cal = data; + u32 status[3]; + unsigned int i; + + for (i = 0; i < 3; ++i) { + status[i] = cal_read(cal, CAL_HL_IRQSTATUS(i)); + if (status[i]) + cal_write(cal, CAL_HL_IRQSTATUS(i), status[i]); + } + + if (status[0]) { + if (status[0] & CAL_HL_IRQ_OCPO_ERR_MASK) + dev_err_ratelimited(cal->dev, "OCPO ERROR\n"); + + for (i = 0; i < cal->data->num_csi2_phy; ++i) { + if (status[0] & CAL_HL_IRQ_CIO_MASK(i)) { + u32 cio_stat = cal_read(cal, + CAL_CSI2_COMPLEXIO_IRQSTATUS(i)); + + dev_err_ratelimited(cal->dev, + "CIO%u error: %#08x\n", i, cio_stat); + + cal_write(cal, CAL_CSI2_COMPLEXIO_IRQSTATUS(i), + cio_stat); + } + + if (status[0] & CAL_HL_IRQ_VC_MASK(i)) { + u32 vc_stat = cal_read(cal, CAL_CSI2_VC_IRQSTATUS(i)); + + dev_err_ratelimited(cal->dev, + "CIO%u VC error: %#08x\n", + i, vc_stat); + + cal_write(cal, CAL_CSI2_VC_IRQSTATUS(i), vc_stat); + } + } + } + + for (i = 0; i < cal->num_contexts; ++i) { + bool end = !!(status[1] & CAL_HL_IRQ_WDMA_END_MASK(i)); + bool start = !!(status[2] & CAL_HL_IRQ_WDMA_START_MASK(i)); + + if (start || end) + cal_irq_handle_wdma(cal->ctx[i], start, end); + } + + return IRQ_HANDLED; +} + +/* ------------------------------------------------------------------ + * Asynchronous V4L2 subdev binding + * ------------------------------------------------------------------ + */ + +struct cal_v4l2_async_subdev { + struct v4l2_async_connection asd; /* Must be first */ + struct cal_camerarx *phy; +}; + +static inline struct cal_v4l2_async_subdev * +to_cal_asd(struct v4l2_async_connection *asd) +{ + return container_of(asd, struct cal_v4l2_async_subdev, asd); +} + +static int cal_async_notifier_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asd) +{ + struct cal_camerarx *phy = to_cal_asd(asd)->phy; + int pad; + int ret; + + if (phy->source) { + phy_info(phy, "Rejecting subdev %s (Already set!!)", + subdev->name); + return 0; + } + + phy->source = subdev; + phy_dbg(1, phy, "Using source %s for capture\n", subdev->name); + + pad = media_entity_get_fwnode_pad(&subdev->entity, + of_fwnode_handle(phy->source_ep_node), + MEDIA_PAD_FL_SOURCE); + if (pad < 0) { + phy_err(phy, "Source %s has no connected source pad\n", + subdev->name); + return pad; + } + + ret = media_create_pad_link(&subdev->entity, pad, + &phy->subdev.entity, CAL_CAMERARX_PAD_SINK, + MEDIA_LNK_FL_IMMUTABLE | + MEDIA_LNK_FL_ENABLED); + if (ret) { + phy_err(phy, "Failed to create media link for source %s\n", + subdev->name); + return ret; + } + + return 0; +} + +static int cal_async_notifier_complete(struct v4l2_async_notifier *notifier) +{ + struct cal_dev *cal = container_of(notifier, struct cal_dev, notifier); + unsigned int i; + int ret; + + for (i = 0; i < cal->num_contexts; ++i) { + ret = cal_ctx_v4l2_register(cal->ctx[i]); + if (ret) + goto err_ctx_unreg; + } + + if (!cal_mc_api) + return 0; + + ret = v4l2_device_register_subdev_nodes(&cal->v4l2_dev); + if (ret) + goto err_ctx_unreg; + + return 0; + +err_ctx_unreg: + for (; i > 0; --i) { + if (!cal->ctx[i - 1]) + continue; + + cal_ctx_v4l2_unregister(cal->ctx[i - 1]); + } + + return ret; +} + +static const struct v4l2_async_notifier_operations cal_async_notifier_ops = { + .bound = cal_async_notifier_bound, + .complete = cal_async_notifier_complete, +}; + +static int cal_async_notifier_register(struct cal_dev *cal) +{ + unsigned int i; + int ret; + + v4l2_async_nf_init(&cal->notifier, &cal->v4l2_dev); + cal->notifier.ops = &cal_async_notifier_ops; + + for (i = 0; i < cal->data->num_csi2_phy; ++i) { + struct cal_camerarx *phy = cal->phy[i]; + struct cal_v4l2_async_subdev *casd; + struct fwnode_handle *fwnode; + + if (!phy->source_node) + continue; + + fwnode = of_fwnode_handle(phy->source_node); + casd = v4l2_async_nf_add_fwnode(&cal->notifier, + fwnode, + struct cal_v4l2_async_subdev); + if (IS_ERR(casd)) { + phy_err(phy, "Failed to add subdev to notifier\n"); + ret = PTR_ERR(casd); + goto error; + } + + casd->phy = phy; + } + + ret = v4l2_async_nf_register(&cal->notifier); + if (ret) { + cal_err(cal, "Error registering async notifier\n"); + goto error; + } + + return 0; + +error: + v4l2_async_nf_cleanup(&cal->notifier); + return ret; +} + +static void cal_async_notifier_unregister(struct cal_dev *cal) +{ + v4l2_async_nf_unregister(&cal->notifier); + v4l2_async_nf_cleanup(&cal->notifier); +} + +/* ------------------------------------------------------------------ + * Media and V4L2 device handling + * ------------------------------------------------------------------ + */ + +/* + * Register user-facing devices. To be called at the end of the probe function + * when all resources are initialized and ready. + */ +static int cal_media_register(struct cal_dev *cal) +{ + int ret; + + ret = media_device_register(&cal->mdev); + if (ret) { + cal_err(cal, "Failed to register media device\n"); + return ret; + } + + /* + * Register the async notifier. This may trigger registration of the + * V4L2 video devices if all subdevs are ready. + */ + ret = cal_async_notifier_register(cal); + if (ret) { + media_device_unregister(&cal->mdev); + return ret; + } + + return 0; +} + +/* + * Unregister the user-facing devices, but don't free memory yet. To be called + * at the beginning of the remove function, to disallow access from userspace. + */ +static void cal_media_unregister(struct cal_dev *cal) +{ + unsigned int i; + + /* Unregister all the V4L2 video devices. */ + for (i = 0; i < cal->num_contexts; i++) + cal_ctx_v4l2_unregister(cal->ctx[i]); + + cal_async_notifier_unregister(cal); + media_device_unregister(&cal->mdev); +} + +/* + * Initialize the in-kernel objects. To be called at the beginning of the probe + * function, before the V4L2 device is used by the driver. + */ +static int cal_media_init(struct cal_dev *cal) +{ + struct media_device *mdev = &cal->mdev; + int ret; + + mdev->dev = cal->dev; + mdev->hw_revision = cal->revision; + strscpy(mdev->model, "CAL", sizeof(mdev->model)); + media_device_init(mdev); + + /* + * Initialize the V4L2 device (despite the function name, this performs + * initialization, not registration). + */ + cal->v4l2_dev.mdev = mdev; + ret = v4l2_device_register(cal->dev, &cal->v4l2_dev); + if (ret) { + cal_err(cal, "Failed to register V4L2 device\n"); + return ret; + } + + vb2_dma_contig_set_max_seg_size(cal->dev, DMA_BIT_MASK(32)); + + return 0; +} + +/* + * Cleanup the in-kernel objects, freeing memory. To be called at the very end + * of the remove sequence, when nothing (including userspace) can access the + * objects anymore. + */ +static void cal_media_cleanup(struct cal_dev *cal) +{ + v4l2_device_unregister(&cal->v4l2_dev); + media_device_cleanup(&cal->mdev); + + vb2_dma_contig_clear_max_seg_size(cal->dev); +} + +/* ------------------------------------------------------------------ + * Initialization and module stuff + * ------------------------------------------------------------------ + */ + +static struct cal_ctx *cal_ctx_create(struct cal_dev *cal, int inst) +{ + struct cal_ctx *ctx; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + + ctx->cal = cal; + ctx->phy = cal->phy[inst]; + ctx->dma_ctx = inst; + ctx->csi2_ctx = inst; + ctx->cport = inst; + + ret = cal_ctx_v4l2_init(ctx); + if (ret) { + kfree(ctx); + return NULL; + } + + return ctx; +} + +static void cal_ctx_destroy(struct cal_ctx *ctx) +{ + cal_ctx_v4l2_cleanup(ctx); + + kfree(ctx); +} + +static const struct of_device_id cal_of_match[] = { + { + .compatible = "ti,dra72-cal", + .data = (void *)&dra72x_cal_data, + }, + { + .compatible = "ti,dra72-pre-es2-cal", + .data = (void *)&dra72x_es1_cal_data, + }, + { + .compatible = "ti,dra76-cal", + .data = (void *)&dra76x_cal_data, + }, + { + .compatible = "ti,am654-cal", + .data = (void *)&am654_cal_data, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, cal_of_match); + +/* Get hardware revision and info. */ + +#define CAL_HL_HWINFO_VALUE 0xa3c90469 + +static void cal_get_hwinfo(struct cal_dev *cal) +{ + u32 hwinfo; + + cal->revision = cal_read(cal, CAL_HL_REVISION); + switch (FIELD_GET(CAL_HL_REVISION_SCHEME_MASK, cal->revision)) { + case CAL_HL_REVISION_SCHEME_H08: + cal_dbg(3, cal, "CAL HW revision %lu.%lu.%lu (0x%08x)\n", + FIELD_GET(CAL_HL_REVISION_MAJOR_MASK, cal->revision), + FIELD_GET(CAL_HL_REVISION_MINOR_MASK, cal->revision), + FIELD_GET(CAL_HL_REVISION_RTL_MASK, cal->revision), + cal->revision); + break; + + case CAL_HL_REVISION_SCHEME_LEGACY: + default: + cal_info(cal, "Unexpected CAL HW revision 0x%08x\n", + cal->revision); + break; + } + + hwinfo = cal_read(cal, CAL_HL_HWINFO); + if (hwinfo != CAL_HL_HWINFO_VALUE) + cal_info(cal, "CAL_HL_HWINFO = 0x%08x, expected 0x%08x\n", + hwinfo, CAL_HL_HWINFO_VALUE); +} + +static int cal_init_camerarx_regmap(struct cal_dev *cal) +{ + struct platform_device *pdev = to_platform_device(cal->dev); + struct device_node *np = cal->dev->of_node; + struct regmap_config config = { }; + struct regmap *syscon; + struct resource *res; + unsigned int offset; + void __iomem *base; + + syscon = syscon_regmap_lookup_by_phandle_args(np, "ti,camerrx-control", + 1, &offset); + if (!IS_ERR(syscon)) { + cal->syscon_camerrx = syscon; + cal->syscon_camerrx_offset = offset; + return 0; + } + + dev_warn(cal->dev, "failed to get ti,camerrx-control: %ld\n", + PTR_ERR(syscon)); + + /* + * Backward DTS compatibility. If syscon entry is not present then + * check if the camerrx_control resource is present. + */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "camerrx_control"); + base = devm_ioremap_resource(cal->dev, res); + if (IS_ERR(base)) { + cal_err(cal, "failed to ioremap camerrx_control\n"); + return PTR_ERR(base); + } + + cal_dbg(1, cal, "ioresource %s at %pa - %pa\n", + res->name, &res->start, &res->end); + + config.reg_bits = 32; + config.reg_stride = 4; + config.val_bits = 32; + config.max_register = resource_size(res) - 4; + + syscon = regmap_init_mmio(NULL, base, &config); + if (IS_ERR(syscon)) { + pr_err("regmap init failed\n"); + return PTR_ERR(syscon); + } + + /* + * In this case the base already point to the direct CM register so no + * need for an offset. + */ + cal->syscon_camerrx = syscon; + cal->syscon_camerrx_offset = 0; + + return 0; +} + +static int cal_probe(struct platform_device *pdev) +{ + struct cal_dev *cal; + bool connected = false; + unsigned int i; + int ret; + int irq; + + cal = devm_kzalloc(&pdev->dev, sizeof(*cal), GFP_KERNEL); + if (!cal) + return -ENOMEM; + + cal->data = of_device_get_match_data(&pdev->dev); + if (!cal->data) { + dev_err(&pdev->dev, "Could not get feature data based on compatible version\n"); + return -ENODEV; + } + + cal->dev = &pdev->dev; + platform_set_drvdata(pdev, cal); + + /* Acquire resources: clocks, CAMERARX regmap, I/O memory and IRQ. */ + cal->fclk = devm_clk_get(&pdev->dev, "fck"); + if (IS_ERR(cal->fclk)) { + dev_err(&pdev->dev, "cannot get CAL fclk\n"); + return PTR_ERR(cal->fclk); + } + + ret = cal_init_camerarx_regmap(cal); + if (ret < 0) + return ret; + + cal->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "cal_top"); + cal->base = devm_ioremap_resource(&pdev->dev, cal->res); + if (IS_ERR(cal->base)) + return PTR_ERR(cal->base); + + cal_dbg(1, cal, "ioresource %s at %pa - %pa\n", + cal->res->name, &cal->res->start, &cal->res->end); + + irq = platform_get_irq(pdev, 0); + cal_dbg(1, cal, "got irq# %d\n", irq); + ret = devm_request_irq(&pdev->dev, irq, cal_irq, 0, CAL_MODULE_NAME, + cal); + if (ret) + return ret; + + /* Read the revision and hardware info to verify hardware access. */ + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret) + goto error_pm_runtime; + + cal_get_hwinfo(cal); + pm_runtime_put_sync(&pdev->dev); + + /* Initialize the media device. */ + ret = cal_media_init(cal); + if (ret < 0) + goto error_pm_runtime; + + /* Create CAMERARX PHYs. */ + for (i = 0; i < cal->data->num_csi2_phy; ++i) { + cal->phy[i] = cal_camerarx_create(cal, i); + if (IS_ERR(cal->phy[i])) { + ret = PTR_ERR(cal->phy[i]); + cal->phy[i] = NULL; + goto error_camerarx; + } + + if (cal->phy[i]->source_node) + connected = true; + } + + if (!connected) { + cal_err(cal, "Neither port is configured, no point in staying up\n"); + ret = -ENODEV; + goto error_camerarx; + } + + /* Create contexts. */ + for (i = 0; i < cal->data->num_csi2_phy; ++i) { + if (!cal->phy[i]->source_node) + continue; + + cal->ctx[cal->num_contexts] = cal_ctx_create(cal, i); + if (!cal->ctx[cal->num_contexts]) { + cal_err(cal, "Failed to create context %u\n", cal->num_contexts); + ret = -ENODEV; + goto error_context; + } + + cal->num_contexts++; + } + + /* Register the media device. */ + ret = cal_media_register(cal); + if (ret) + goto error_context; + + return 0; + +error_context: + for (i = 0; i < cal->num_contexts; i++) + cal_ctx_destroy(cal->ctx[i]); + +error_camerarx: + for (i = 0; i < cal->data->num_csi2_phy; i++) + cal_camerarx_destroy(cal->phy[i]); + + cal_media_cleanup(cal); + +error_pm_runtime: + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static void cal_remove(struct platform_device *pdev) +{ + struct cal_dev *cal = platform_get_drvdata(pdev); + unsigned int i; + int ret; + + cal_dbg(1, cal, "Removing %s\n", CAL_MODULE_NAME); + + ret = pm_runtime_resume_and_get(&pdev->dev); + + cal_media_unregister(cal); + + for (i = 0; i < cal->data->num_csi2_phy; i++) + cal_camerarx_disable(cal->phy[i]); + + for (i = 0; i < cal->num_contexts; i++) + cal_ctx_destroy(cal->ctx[i]); + + for (i = 0; i < cal->data->num_csi2_phy; i++) + cal_camerarx_destroy(cal->phy[i]); + + cal_media_cleanup(cal); + + if (ret >= 0) + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); +} + +static int cal_runtime_resume(struct device *dev) +{ + struct cal_dev *cal = dev_get_drvdata(dev); + unsigned int i; + u32 val; + + if (cal->data->flags & DRA72_CAL_PRE_ES2_LDO_DISABLE) { + /* + * Apply errata on both port everytime we (re-)enable + * the clock + */ + for (i = 0; i < cal->data->num_csi2_phy; i++) + cal_camerarx_i913_errata(cal->phy[i]); + } + + /* + * Enable global interrupts that are not related to a particular + * CAMERARAX or context. + */ + cal_write(cal, CAL_HL_IRQENABLE_SET(0), CAL_HL_IRQ_OCPO_ERR_MASK); + + val = cal_read(cal, CAL_CTRL); + cal_set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, + CAL_CTRL_BURSTSIZE_MASK); + cal_set_field(&val, 0xf, CAL_CTRL_TAGCNT_MASK); + cal_set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED, + CAL_CTRL_POSTED_WRITES_MASK); + cal_set_field(&val, 0xff, CAL_CTRL_MFLAGL_MASK); + cal_set_field(&val, 0xff, CAL_CTRL_MFLAGH_MASK); + cal_write(cal, CAL_CTRL, val); + cal_dbg(3, cal, "CAL_CTRL = 0x%08x\n", cal_read(cal, CAL_CTRL)); + + return 0; +} + +static const struct dev_pm_ops cal_pm_ops = { + .runtime_resume = cal_runtime_resume, +}; + +static struct platform_driver cal_pdrv = { + .probe = cal_probe, + .remove_new = cal_remove, + .driver = { + .name = CAL_MODULE_NAME, + .pm = &cal_pm_ops, + .of_match_table = cal_of_match, + }, +}; + +module_platform_driver(cal_pdrv); diff --git a/drivers/media/platform/ti/cal/cal.h b/drivers/media/platform/ti/cal/cal.h new file mode 100644 index 0000000000..0856297adc --- /dev/null +++ b/drivers/media/platform/ti/cal/cal.h @@ -0,0 +1,339 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * TI Camera Access Layer (CAL) + * + * Copyright (c) 2015-2020 Texas Instruments Inc. + * + * Authors: + * Benoit Parrot + * Laurent Pinchart + */ +#ifndef __TI_CAL_H__ +#define __TI_CAL_H__ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define CAL_MODULE_NAME "cal" +#define CAL_MAX_NUM_CONTEXT 8 +#define CAL_NUM_CSI2_PORTS 2 + +/* + * The width is limited by the size of the CAL_WR_DMA_XSIZE_j.XSIZE field, + * expressed in multiples of 64 bits. The height is limited by the size of the + * CAL_CSI2_CTXi_j.CTXi_LINES and CAL_WR_DMA_CTRL_j.YSIZE fields, expressed in + * lines. + */ +#define CAL_MIN_WIDTH_BYTES 16 +#define CAL_MAX_WIDTH_BYTES (8192 * 8) +#define CAL_MIN_HEIGHT_LINES 1 +#define CAL_MAX_HEIGHT_LINES 16383 + +#define CAL_CAMERARX_PAD_SINK 0 +#define CAL_CAMERARX_PAD_FIRST_SOURCE 1 +#define CAL_CAMERARX_NUM_SOURCE_PADS 1 +#define CAL_CAMERARX_NUM_PADS (1 + CAL_CAMERARX_NUM_SOURCE_PADS) + +static inline bool cal_rx_pad_is_sink(u32 pad) +{ + /* Camera RX has 1 sink pad, and N source pads */ + return pad == 0; +} + +static inline bool cal_rx_pad_is_source(u32 pad) +{ + /* Camera RX has 1 sink pad, and N source pads */ + return pad >= CAL_CAMERARX_PAD_FIRST_SOURCE && + pad <= CAL_CAMERARX_NUM_SOURCE_PADS; +} + +struct device; +struct device_node; +struct resource; +struct regmap; +struct regmap_fied; + +/* CTRL_CORE_CAMERRX_CONTROL register field id */ +enum cal_camerarx_field { + F_CTRLCLKEN, + F_CAMMODE, + F_LANEENABLE, + F_CSI_MODE, + F_MAX_FIELDS, +}; + +enum cal_dma_state { + CAL_DMA_RUNNING, + CAL_DMA_STOP_REQUESTED, + CAL_DMA_STOP_PENDING, + CAL_DMA_STOPPED, +}; + +struct cal_format_info { + u32 fourcc; + u32 code; + /* Bits per pixel */ + u8 bpp; + bool meta; +}; + +/* buffer for one video frame */ +struct cal_buffer { + /* common v4l buffer stuff -- must be first */ + struct vb2_v4l2_buffer vb; + struct list_head list; +}; + +/** + * struct cal_dmaqueue - Queue of DMA buffers + */ +struct cal_dmaqueue { + /** + * @lock: Protects all fields in the cal_dmaqueue. + */ + spinlock_t lock; + + /** + * @queue: Buffers queued to the driver and waiting for DMA processing. + * Buffers are added to the list by the vb2 .buffer_queue() operation, + * and move to @pending when they are scheduled for the next frame. + */ + struct list_head queue; + /** + * @pending: Buffer provided to the hardware to DMA the next frame. + * Will move to @active at the end of the current frame. + */ + struct cal_buffer *pending; + /** + * @active: Buffer being DMA'ed to for the current frame. Will be + * retired and given back to vb2 at the end of the current frame if + * a @pending buffer has been scheduled to replace it. + */ + struct cal_buffer *active; + + /** @state: State of the DMA engine. */ + enum cal_dma_state state; + /** @wait: Wait queue to signal a @state transition to CAL_DMA_STOPPED. */ + struct wait_queue_head wait; +}; + +struct cal_camerarx_data { + struct { + unsigned int lsb; + unsigned int msb; + } fields[F_MAX_FIELDS]; + unsigned int num_lanes; +}; + +struct cal_data { + const struct cal_camerarx_data *camerarx; + unsigned int num_csi2_phy; + unsigned int flags; +}; + +/* + * The Camera Adaptation Layer (CAL) module is paired with one or more complex + * I/O PHYs (CAMERARX). It contains multiple instances of CSI-2, processing and + * DMA contexts. + * + * The cal_dev structure represents the whole subsystem, including the CAL and + * the CAMERARX instances. Instances of struct cal_dev are named cal through the + * driver. + * + * The cal_camerarx structure represents one CAMERARX instance. Instances of + * cal_camerarx are named phy through the driver. + * + * The cal_ctx structure represents the combination of one CSI-2 context, one + * processing context and one DMA context. Instance of struct cal_ctx are named + * ctx through the driver. + */ + +struct cal_camerarx { + void __iomem *base; + struct resource *res; + struct regmap_field *fields[F_MAX_FIELDS]; + + struct cal_dev *cal; + unsigned int instance; + + struct v4l2_fwnode_endpoint endpoint; + struct device_node *source_ep_node; + struct device_node *source_node; + struct v4l2_subdev *source; + + struct v4l2_subdev subdev; + struct media_pad pads[CAL_CAMERARX_NUM_PADS]; + + /* protects the vc_* fields below */ + spinlock_t vc_lock; + u8 vc_enable_count[4]; + u16 vc_frame_number[4]; + u32 vc_sequence[4]; + + unsigned int enable_count; +}; + +struct cal_dev { + struct clk *fclk; + int irq; + void __iomem *base; + struct resource *res; + struct device *dev; + + const struct cal_data *data; + u32 revision; + + /* Control Module handle */ + struct regmap *syscon_camerrx; + u32 syscon_camerrx_offset; + + /* Camera Core Module handle */ + struct cal_camerarx *phy[CAL_NUM_CSI2_PORTS]; + + u32 num_contexts; + struct cal_ctx *ctx[CAL_MAX_NUM_CONTEXT]; + + struct media_device mdev; + struct v4l2_device v4l2_dev; + struct v4l2_async_notifier notifier; + + unsigned long reserved_pix_proc_mask; +}; + +/* + * There is one cal_ctx structure for each camera core context. + */ +struct cal_ctx { + struct v4l2_ctrl_handler ctrl_handler; + struct video_device vdev; + struct media_pad pad; + + struct cal_dev *cal; + struct cal_camerarx *phy; + + /* v4l2_ioctl mutex */ + struct mutex mutex; + + struct cal_dmaqueue dma; + + /* video capture */ + const struct cal_format_info *fmtinfo; + /* Used to store current pixel format */ + struct v4l2_format v_fmt; + + /* Current subdev enumerated format (legacy) */ + const struct cal_format_info **active_fmt; + unsigned int num_active_fmt; + + struct vb2_queue vb_vidq; + u8 dma_ctx; + u8 cport; + u8 csi2_ctx; + u8 pix_proc; + u8 vc; + u8 datatype; + + bool use_pix_proc; +}; + +extern unsigned int cal_debug; +extern int cal_video_nr; +extern bool cal_mc_api; + +#define cal_dbg(level, cal, fmt, arg...) \ + do { \ + if (cal_debug >= (level)) \ + dev_printk(KERN_DEBUG, (cal)->dev, fmt, ##arg); \ + } while (0) +#define cal_info(cal, fmt, arg...) \ + dev_info((cal)->dev, fmt, ##arg) +#define cal_err(cal, fmt, arg...) \ + dev_err((cal)->dev, fmt, ##arg) + +#define ctx_dbg(level, ctx, fmt, arg...) \ + cal_dbg(level, (ctx)->cal, "ctx%u: " fmt, (ctx)->dma_ctx, ##arg) +#define ctx_info(ctx, fmt, arg...) \ + cal_info((ctx)->cal, "ctx%u: " fmt, (ctx)->dma_ctx, ##arg) +#define ctx_err(ctx, fmt, arg...) \ + cal_err((ctx)->cal, "ctx%u: " fmt, (ctx)->dma_ctx, ##arg) + +#define phy_dbg(level, phy, fmt, arg...) \ + cal_dbg(level, (phy)->cal, "phy%u: " fmt, (phy)->instance, ##arg) +#define phy_info(phy, fmt, arg...) \ + cal_info((phy)->cal, "phy%u: " fmt, (phy)->instance, ##arg) +#define phy_err(phy, fmt, arg...) \ + cal_err((phy)->cal, "phy%u: " fmt, (phy)->instance, ##arg) + +static inline u32 cal_read(struct cal_dev *cal, u32 offset) +{ + return ioread32(cal->base + offset); +} + +static inline void cal_write(struct cal_dev *cal, u32 offset, u32 val) +{ + iowrite32(val, cal->base + offset); +} + +static __always_inline u32 cal_read_field(struct cal_dev *cal, u32 offset, u32 mask) +{ + return FIELD_GET(mask, cal_read(cal, offset)); +} + +static inline void cal_write_field(struct cal_dev *cal, u32 offset, u32 value, + u32 mask) +{ + u32 val = cal_read(cal, offset); + + val &= ~mask; + val |= (value << __ffs(mask)) & mask; + cal_write(cal, offset, val); +} + +static inline void cal_set_field(u32 *valp, u32 field, u32 mask) +{ + u32 val = *valp; + + val &= ~mask; + val |= (field << __ffs(mask)) & mask; + *valp = val; +} + +extern const struct cal_format_info cal_formats[]; +extern const unsigned int cal_num_formats; +const struct cal_format_info *cal_format_by_fourcc(u32 fourcc); +const struct cal_format_info *cal_format_by_code(u32 code); + +void cal_quickdump_regs(struct cal_dev *cal); + +void cal_camerarx_disable(struct cal_camerarx *phy); +void cal_camerarx_i913_errata(struct cal_camerarx *phy); +struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal, + unsigned int instance); +void cal_camerarx_destroy(struct cal_camerarx *phy); + +int cal_ctx_prepare(struct cal_ctx *ctx); +void cal_ctx_unprepare(struct cal_ctx *ctx); +void cal_ctx_set_dma_addr(struct cal_ctx *ctx, dma_addr_t addr); +void cal_ctx_start(struct cal_ctx *ctx); +void cal_ctx_stop(struct cal_ctx *ctx); + +int cal_ctx_v4l2_register(struct cal_ctx *ctx); +void cal_ctx_v4l2_unregister(struct cal_ctx *ctx); +int cal_ctx_v4l2_init(struct cal_ctx *ctx); +void cal_ctx_v4l2_cleanup(struct cal_ctx *ctx); + +#endif /* __TI_CAL_H__ */ diff --git a/drivers/media/platform/ti/cal/cal_regs.h b/drivers/media/platform/ti/cal/cal_regs.h new file mode 100644 index 0000000000..40e4f972fc --- /dev/null +++ b/drivers/media/platform/ti/cal/cal_regs.h @@ -0,0 +1,463 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * TI CAL camera interface driver + * + * Copyright (c) 2015 Texas Instruments Inc. + * + * Benoit Parrot, + */ + +#ifndef __TI_CAL_REGS_H +#define __TI_CAL_REGS_H + +/* + * struct cal_dev.flags possibilities + * + * DRA72_CAL_PRE_ES2_LDO_DISABLE: + * Errata i913: CSI2 LDO Needs to be disabled when module is powered on + * + * Enabling CSI2 LDO shorts it to core supply. It is crucial the 2 CSI2 + * LDOs on the device are disabled if CSI-2 module is powered on + * (0x4845 B304 | 0x4845 B384 [28:27] = 0x1) or in ULPS (0x4845 B304 + * | 0x4845 B384 [28:27] = 0x2) mode. Common concerns include: high + * current draw on the module supply in active mode. + * + * Errata does not apply when CSI-2 module is powered off + * (0x4845 B304 | 0x4845 B384 [28:27] = 0x0). + * + * SW Workaround: + * Set the following register bits to disable the LDO, + * which is essentially CSI2 REG10 bit 6: + * + * Core 0: 0x4845 B828 = 0x0000 0040 + * Core 1: 0x4845 B928 = 0x0000 0040 + */ +#define DRA72_CAL_PRE_ES2_LDO_DISABLE BIT(0) + +/* CAL register offsets */ + +#define CAL_HL_REVISION 0x0000 +#define CAL_HL_HWINFO 0x0004 +#define CAL_HL_SYSCONFIG 0x0010 +#define CAL_HL_IRQ_EOI 0x001c +#define CAL_HL_IRQSTATUS_RAW(m) (0x20U + (m) * 0x10U) +#define CAL_HL_IRQSTATUS(m) (0x24U + (m) * 0x10U) +#define CAL_HL_IRQENABLE_SET(m) (0x28U + (m) * 0x10U) +#define CAL_HL_IRQENABLE_CLR(m) (0x2cU + (m) * 0x10U) +#define CAL_PIX_PROC(m) (0xc0U + (m) * 0x4U) +#define CAL_CTRL 0x100 +#define CAL_CTRL1 0x104 +#define CAL_LINE_NUMBER_EVT 0x108 +#define CAL_VPORT_CTRL1 0x120 +#define CAL_VPORT_CTRL2 0x124 +#define CAL_BYS_CTRL1 0x130 +#define CAL_BYS_CTRL2 0x134 +#define CAL_RD_DMA_CTRL 0x140 +#define CAL_RD_DMA_PIX_ADDR 0x144 +#define CAL_RD_DMA_PIX_OFST 0x148 +#define CAL_RD_DMA_XSIZE 0x14c +#define CAL_RD_DMA_YSIZE 0x150 +#define CAL_RD_DMA_INIT_ADDR 0x154 +#define CAL_RD_DMA_INIT_OFST 0x168 +#define CAL_RD_DMA_CTRL2 0x16c +#define CAL_WR_DMA_CTRL(m) (0x200U + (m) * 0x10U) +#define CAL_WR_DMA_ADDR(m) (0x204U + (m) * 0x10U) +#define CAL_WR_DMA_OFST(m) (0x208U + (m) * 0x10U) +#define CAL_WR_DMA_XSIZE(m) (0x20cU + (m) * 0x10U) +#define CAL_CSI2_PPI_CTRL(m) (0x300U + (m) * 0x80U) +#define CAL_CSI2_COMPLEXIO_CFG(m) (0x304U + (m) * 0x80U) +#define CAL_CSI2_COMPLEXIO_IRQSTATUS(m) (0x308U + (m) * 0x80U) +#define CAL_CSI2_SHORT_PACKET(m) (0x30cU + (m) * 0x80U) +#define CAL_CSI2_COMPLEXIO_IRQENABLE(m) (0x310U + (m) * 0x80U) +#define CAL_CSI2_TIMING(m) (0x314U + (m) * 0x80U) +#define CAL_CSI2_VC_IRQENABLE(m) (0x318U + (m) * 0x80U) +#define CAL_CSI2_VC_IRQSTATUS(m) (0x328U + (m) * 0x80U) +#define CAL_CSI2_CTX(phy, csi2_ctx) (0x330U + (phy) * 0x80U + (csi2_ctx) * 4) +#define CAL_CSI2_STATUS(phy, csi2_ctx) (0x350U + (phy) * 0x80U + (csi2_ctx) * 4) + +/* CAL CSI2 PHY register offsets */ +#define CAL_CSI2_PHY_REG0 0x000 +#define CAL_CSI2_PHY_REG1 0x004 +#define CAL_CSI2_PHY_REG2 0x008 +#define CAL_CSI2_PHY_REG10 0x028 + +/* CAL Control Module Core Camerrx Control register offsets */ +#define CM_CTRL_CORE_CAMERRX_CONTROL 0x000 + +/********************************************************************* +* Field Definition Macros +*********************************************************************/ + +#define CAL_HL_REVISION_MINOR_MASK GENMASK(5, 0) +#define CAL_HL_REVISION_CUSTOM_MASK GENMASK(7, 6) +#define CAL_HL_REVISION_MAJOR_MASK GENMASK(10, 8) +#define CAL_HL_REVISION_RTL_MASK GENMASK(15, 11) +#define CAL_HL_REVISION_FUNC_MASK GENMASK(27, 16) +#define CAL_HL_REVISION_SCHEME_MASK GENMASK(31, 30) +#define CAL_HL_REVISION_SCHEME_H08 1 +#define CAL_HL_REVISION_SCHEME_LEGACY 0 + +#define CAL_HL_HWINFO_WFIFO_MASK GENMASK(3, 0) +#define CAL_HL_HWINFO_RFIFO_MASK GENMASK(7, 4) +#define CAL_HL_HWINFO_PCTX_MASK GENMASK(12, 8) +#define CAL_HL_HWINFO_WCTX_MASK GENMASK(18, 13) +#define CAL_HL_HWINFO_VFIFO_MASK GENMASK(22, 19) +#define CAL_HL_HWINFO_NCPORT_MASK GENMASK(27, 23) +#define CAL_HL_HWINFO_NPPI_CTXS0_MASK GENMASK(29, 28) +#define CAL_HL_HWINFO_NPPI_CTXS1_MASK GENMASK(31, 30) +#define CAL_HL_HWINFO_NPPI_CONTEXTS_ZERO 0 +#define CAL_HL_HWINFO_NPPI_CONTEXTS_FOUR 1 +#define CAL_HL_HWINFO_NPPI_CONTEXTS_EIGHT 2 +#define CAL_HL_HWINFO_NPPI_CONTEXTS_RESERVED 3 + +#define CAL_HL_SYSCONFIG_SOFTRESET_MASK BIT(0) +#define CAL_HL_SYSCONFIG_SOFTRESET_DONE 0x0 +#define CAL_HL_SYSCONFIG_SOFTRESET_PENDING 0x1 +#define CAL_HL_SYSCONFIG_SOFTRESET_NOACTION 0x0 +#define CAL_HL_SYSCONFIG_SOFTRESET_RESET 0x1 +#define CAL_HL_SYSCONFIG_IDLE_MASK GENMASK(3, 2) +#define CAL_HL_SYSCONFIG_IDLEMODE_FORCE 0 +#define CAL_HL_SYSCONFIG_IDLEMODE_NO 1 +#define CAL_HL_SYSCONFIG_IDLEMODE_SMART1 2 +#define CAL_HL_SYSCONFIG_IDLEMODE_SMART2 3 + +#define CAL_HL_IRQ_EOI_LINE_NUMBER_MASK BIT(0) +#define CAL_HL_IRQ_EOI_LINE_NUMBER_READ0 0 +#define CAL_HL_IRQ_EOI_LINE_NUMBER_EOI0 0 + +#define CAL_HL_IRQ_WDMA_END_MASK(m) BIT(m) +#define CAL_HL_IRQ_WDMA_START_MASK(m) BIT(m) + +#define CAL_HL_IRQ_OCPO_ERR_MASK BIT(6) + +#define CAL_HL_IRQ_CIO_MASK(i) BIT(16 + (i) * 8) +#define CAL_HL_IRQ_VC_MASK(i) BIT(17 + (i) * 8) + +#define CAL_PIX_PROC_EN_MASK BIT(0) +#define CAL_PIX_PROC_EXTRACT_MASK GENMASK(4, 1) +#define CAL_PIX_PROC_EXTRACT_B6 0x0 +#define CAL_PIX_PROC_EXTRACT_B7 0x1 +#define CAL_PIX_PROC_EXTRACT_B8 0x2 +#define CAL_PIX_PROC_EXTRACT_B10 0x3 +#define CAL_PIX_PROC_EXTRACT_B10_MIPI 0x4 +#define CAL_PIX_PROC_EXTRACT_B12 0x5 +#define CAL_PIX_PROC_EXTRACT_B12_MIPI 0x6 +#define CAL_PIX_PROC_EXTRACT_B14 0x7 +#define CAL_PIX_PROC_EXTRACT_B14_MIPI 0x8 +#define CAL_PIX_PROC_EXTRACT_B16_BE 0x9 +#define CAL_PIX_PROC_EXTRACT_B16_LE 0xa +#define CAL_PIX_PROC_DPCMD_MASK GENMASK(9, 5) +#define CAL_PIX_PROC_DPCMD_BYPASS 0x0 +#define CAL_PIX_PROC_DPCMD_DPCM_10_8_1 0x2 +#define CAL_PIX_PROC_DPCMD_DPCM_12_8_1 0x8 +#define CAL_PIX_PROC_DPCMD_DPCM_10_7_1 0x4 +#define CAL_PIX_PROC_DPCMD_DPCM_10_7_2 0x5 +#define CAL_PIX_PROC_DPCMD_DPCM_10_6_1 0x6 +#define CAL_PIX_PROC_DPCMD_DPCM_10_6_2 0x7 +#define CAL_PIX_PROC_DPCMD_DPCM_12_7_1 0xa +#define CAL_PIX_PROC_DPCMD_DPCM_12_6_1 0xc +#define CAL_PIX_PROC_DPCMD_DPCM_14_10 0xe +#define CAL_PIX_PROC_DPCMD_DPCM_14_8_1 0x10 +#define CAL_PIX_PROC_DPCMD_DPCM_16_12_1 0x12 +#define CAL_PIX_PROC_DPCMD_DPCM_16_10_1 0x14 +#define CAL_PIX_PROC_DPCMD_DPCM_16_8_1 0x16 +#define CAL_PIX_PROC_DPCME_MASK GENMASK(15, 11) +#define CAL_PIX_PROC_DPCME_BYPASS 0x0 +#define CAL_PIX_PROC_DPCME_DPCM_10_8_1 0x2 +#define CAL_PIX_PROC_DPCME_DPCM_12_8_1 0x8 +#define CAL_PIX_PROC_DPCME_DPCM_14_10 0xe +#define CAL_PIX_PROC_DPCME_DPCM_14_8_1 0x10 +#define CAL_PIX_PROC_DPCME_DPCM_16_12_1 0x12 +#define CAL_PIX_PROC_DPCME_DPCM_16_10_1 0x14 +#define CAL_PIX_PROC_DPCME_DPCM_16_8_1 0x16 +#define CAL_PIX_PROC_PACK_MASK GENMASK(18, 16) +#define CAL_PIX_PROC_PACK_B8 0x0 +#define CAL_PIX_PROC_PACK_B10_MIPI 0x2 +#define CAL_PIX_PROC_PACK_B12 0x3 +#define CAL_PIX_PROC_PACK_B12_MIPI 0x4 +#define CAL_PIX_PROC_PACK_B16 0x5 +#define CAL_PIX_PROC_PACK_ARGB 0x6 +#define CAL_PIX_PROC_CPORT_MASK GENMASK(23, 19) + +#define CAL_CTRL_POSTED_WRITES_MASK BIT(0) +#define CAL_CTRL_POSTED_WRITES_NONPOSTED 0 +#define CAL_CTRL_POSTED_WRITES 1 +#define CAL_CTRL_TAGCNT_MASK GENMASK(4, 1) +#define CAL_CTRL_BURSTSIZE_MASK GENMASK(6, 5) +#define CAL_CTRL_BURSTSIZE_BURST16 0x0 +#define CAL_CTRL_BURSTSIZE_BURST32 0x1 +#define CAL_CTRL_BURSTSIZE_BURST64 0x2 +#define CAL_CTRL_BURSTSIZE_BURST128 0x3 +#define CAL_CTRL_LL_FORCE_STATE_MASK GENMASK(12, 7) +#define CAL_CTRL_MFLAGL_MASK GENMASK(20, 13) +#define CAL_CTRL_PWRSCPCLK_MASK BIT(21) +#define CAL_CTRL_PWRSCPCLK_AUTO 0 +#define CAL_CTRL_PWRSCPCLK_FORCE 1 +#define CAL_CTRL_RD_DMA_STALL_MASK BIT(22) +#define CAL_CTRL_MFLAGH_MASK GENMASK(31, 24) + +#define CAL_CTRL1_PPI_GROUPING_MASK GENMASK(1, 0) +#define CAL_CTRL1_PPI_GROUPING_DISABLED 0 +#define CAL_CTRL1_PPI_GROUPING_RESERVED 1 +#define CAL_CTRL1_PPI_GROUPING_0 2 +#define CAL_CTRL1_PPI_GROUPING_1 3 +#define CAL_CTRL1_INTERLEAVE01_MASK GENMASK(3, 2) +#define CAL_CTRL1_INTERLEAVE01_DISABLED 0 +#define CAL_CTRL1_INTERLEAVE01_PIX1 1 +#define CAL_CTRL1_INTERLEAVE01_PIX4 2 +#define CAL_CTRL1_INTERLEAVE01_RESERVED 3 +#define CAL_CTRL1_INTERLEAVE23_MASK GENMASK(5, 4) +#define CAL_CTRL1_INTERLEAVE23_DISABLED 0 +#define CAL_CTRL1_INTERLEAVE23_PIX1 1 +#define CAL_CTRL1_INTERLEAVE23_PIX4 2 +#define CAL_CTRL1_INTERLEAVE23_RESERVED 3 + +#define CAL_LINE_NUMBER_EVT_CPORT_MASK GENMASK(4, 0) +#define CAL_LINE_NUMBER_EVT_MASK GENMASK(29, 16) + +#define CAL_VPORT_CTRL1_PCLK_MASK GENMASK(16, 0) +#define CAL_VPORT_CTRL1_XBLK_MASK GENMASK(24, 17) +#define CAL_VPORT_CTRL1_YBLK_MASK GENMASK(30, 25) +#define CAL_VPORT_CTRL1_WIDTH_MASK BIT(31) +#define CAL_VPORT_CTRL1_WIDTH_ONE 0 +#define CAL_VPORT_CTRL1_WIDTH_TWO 1 + +#define CAL_VPORT_CTRL2_CPORT_MASK GENMASK(4, 0) +#define CAL_VPORT_CTRL2_FREERUNNING_MASK BIT(15) +#define CAL_VPORT_CTRL2_FREERUNNING_GATED 0 +#define CAL_VPORT_CTRL2_FREERUNNING_FREE 1 +#define CAL_VPORT_CTRL2_FS_RESETS_MASK BIT(16) +#define CAL_VPORT_CTRL2_FS_RESETS_NO 0 +#define CAL_VPORT_CTRL2_FS_RESETS_YES 1 +#define CAL_VPORT_CTRL2_FSM_RESET_MASK BIT(17) +#define CAL_VPORT_CTRL2_FSM_RESET_NOEFFECT 0 +#define CAL_VPORT_CTRL2_FSM_RESET 1 +#define CAL_VPORT_CTRL2_RDY_THR_MASK GENMASK(31, 18) + +#define CAL_BYS_CTRL1_PCLK_MASK GENMASK(16, 0) +#define CAL_BYS_CTRL1_XBLK_MASK GENMASK(24, 17) +#define CAL_BYS_CTRL1_YBLK_MASK GENMASK(30, 25) +#define CAL_BYS_CTRL1_BYSINEN_MASK BIT(31) + +#define CAL_BYS_CTRL2_CPORTIN_MASK GENMASK(4, 0) +#define CAL_BYS_CTRL2_CPORTOUT_MASK GENMASK(9, 5) +#define CAL_BYS_CTRL2_DUPLICATEDDATA_MASK BIT(10) +#define CAL_BYS_CTRL2_DUPLICATEDDATA_NO 0 +#define CAL_BYS_CTRL2_DUPLICATEDDATA_YES 1 +#define CAL_BYS_CTRL2_FREERUNNING_MASK BIT(11) +#define CAL_BYS_CTRL2_FREERUNNING_NO 0 +#define CAL_BYS_CTRL2_FREERUNNING_YES 1 + +#define CAL_RD_DMA_CTRL_GO_MASK BIT(0) +#define CAL_RD_DMA_CTRL_GO_DIS 0 +#define CAL_RD_DMA_CTRL_GO_EN 1 +#define CAL_RD_DMA_CTRL_GO_IDLE 0 +#define CAL_RD_DMA_CTRL_GO_BUSY 1 +#define CAL_RD_DMA_CTRL_INIT_MASK BIT(1) +#define CAL_RD_DMA_CTRL_BW_LIMITER_MASK GENMASK(10, 2) +#define CAL_RD_DMA_CTRL_OCP_TAG_CNT_MASK GENMASK(14, 11) +#define CAL_RD_DMA_CTRL_PCLK_MASK GENMASK(31, 15) + +#define CAL_RD_DMA_PIX_ADDR_MASK GENMASK(31, 3) + +#define CAL_RD_DMA_PIX_OFST_MASK GENMASK(31, 4) + +#define CAL_RD_DMA_XSIZE_MASK GENMASK(31, 19) + +#define CAL_RD_DMA_YSIZE_MASK GENMASK(29, 16) + +#define CAL_RD_DMA_INIT_ADDR_MASK GENMASK(31, 3) + +#define CAL_RD_DMA_INIT_OFST_MASK GENMASK(31, 3) + +#define CAL_RD_DMA_CTRL2_CIRC_MODE_MASK GENMASK(2, 0) +#define CAL_RD_DMA_CTRL2_CIRC_MODE_DIS 0 +#define CAL_RD_DMA_CTRL2_CIRC_MODE_ONE 1 +#define CAL_RD_DMA_CTRL2_CIRC_MODE_FOUR 2 +#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTEEN 3 +#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTYFOUR 4 +#define CAL_RD_DMA_CTRL2_CIRC_MODE_RESERVED 5 +#define CAL_RD_DMA_CTRL2_ICM_CSTART_MASK BIT(3) +#define CAL_RD_DMA_CTRL2_PATTERN_MASK GENMASK(5, 4) +#define CAL_RD_DMA_CTRL2_PATTERN_LINEAR 0 +#define CAL_RD_DMA_CTRL2_PATTERN_YUV420 1 +#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP2 2 +#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP4 3 +#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_MASK BIT(6) +#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_FREERUNNING 0 +#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_WAITFORBYSOUT 1 +#define CAL_RD_DMA_CTRL2_CIRC_SIZE_MASK GENMASK(29, 16) + +#define CAL_WR_DMA_CTRL_MODE_MASK GENMASK(2, 0) +#define CAL_WR_DMA_CTRL_MODE_DIS 0 +#define CAL_WR_DMA_CTRL_MODE_SHD 1 +#define CAL_WR_DMA_CTRL_MODE_CNT 2 +#define CAL_WR_DMA_CTRL_MODE_CNT_INIT 3 +#define CAL_WR_DMA_CTRL_MODE_CONST 4 +#define CAL_WR_DMA_CTRL_MODE_RESERVED 5 +#define CAL_WR_DMA_CTRL_PATTERN_MASK GENMASK(4, 3) +#define CAL_WR_DMA_CTRL_PATTERN_LINEAR 0 +#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP2 2 +#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP4 3 +#define CAL_WR_DMA_CTRL_PATTERN_RESERVED 1 +#define CAL_WR_DMA_CTRL_ICM_PSTART_MASK BIT(5) +#define CAL_WR_DMA_CTRL_DTAG_MASK GENMASK(8, 6) +#define CAL_WR_DMA_CTRL_DTAG_ATT_HDR 0 +#define CAL_WR_DMA_CTRL_DTAG_ATT_DAT 1 +#define CAL_WR_DMA_CTRL_DTAG 2 +#define CAL_WR_DMA_CTRL_DTAG_PIX_HDR 3 +#define CAL_WR_DMA_CTRL_DTAG_PIX_DAT 4 +#define CAL_WR_DMA_CTRL_DTAG_D5 5 +#define CAL_WR_DMA_CTRL_DTAG_D6 6 +#define CAL_WR_DMA_CTRL_DTAG_D7 7 +#define CAL_WR_DMA_CTRL_CPORT_MASK GENMASK(13, 9) +#define CAL_WR_DMA_CTRL_STALL_RD_MASK BIT(14) +#define CAL_WR_DMA_CTRL_YSIZE_MASK GENMASK(31, 18) + +#define CAL_WR_DMA_ADDR_MASK GENMASK(31, 4) + +#define CAL_WR_DMA_OFST_MASK GENMASK(18, 4) +#define CAL_WR_DMA_OFST_CIRC_MODE_MASK GENMASK(23, 22) +#define CAL_WR_DMA_OFST_CIRC_MODE_ONE 1 +#define CAL_WR_DMA_OFST_CIRC_MODE_FOUR 2 +#define CAL_WR_DMA_OFST_CIRC_MODE_SIXTYFOUR 3 +#define CAL_WR_DMA_OFST_CIRC_MODE_DISABLED 0 +#define CAL_WR_DMA_OFST_CIRC_SIZE_MASK GENMASK(31, 24) + +#define CAL_WR_DMA_XSIZE_XSKIP_MASK GENMASK(15, 3) +#define CAL_WR_DMA_XSIZE_MASK GENMASK(31, 19) + +#define CAL_CSI2_PPI_CTRL_IF_EN_MASK BIT(0) +#define CAL_CSI2_PPI_CTRL_ECC_EN_MASK BIT(2) +#define CAL_CSI2_PPI_CTRL_FRAME_MASK BIT(3) +#define CAL_CSI2_PPI_CTRL_FRAME_IMMEDIATE 0 +#define CAL_CSI2_PPI_CTRL_FRAME 1 + +#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK GENMASK(2, 0) +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_5 5 +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_4 4 +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_3 3 +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_2 2 +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_1 1 +#define CAL_CSI2_COMPLEXIO_CFG_POSITION_NOT_USED 0 +#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK BIT(3) +#define CAL_CSI2_COMPLEXIO_CFG_POL_PLUSMINUS 0 +#define CAL_CSI2_COMPLEXIO_CFG_POL_MINUSPLUS 1 +#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POSITION_MASK GENMASK(6, 4) +#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POL_MASK BIT(7) +#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POSITION_MASK GENMASK(10, 8) +#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POL_MASK BIT(11) +#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POSITION_MASK GENMASK(14, 12) +#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POL_MASK BIT(15) +#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POSITION_MASK GENMASK(18, 16) +#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POL_MASK BIT(19) +#define CAL_CSI2_COMPLEXIO_CFG_PWR_AUTO_MASK BIT(24) +#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK GENMASK(26, 25) +#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_OFF 0 +#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON 1 +#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ULP 2 +#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK GENMASK(28, 27) +#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF 0 +#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON 1 +#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ULP 2 +#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK BIT(29) +#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED 1 +#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETONGOING 0 +#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK BIT(30) +#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL 0 +#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL 1 + +#define CAL_CSI2_SHORT_PACKET_MASK GENMASK(23, 0) + +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS1_MASK BIT(0) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS2_MASK BIT(1) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS3_MASK BIT(2) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS4_MASK BIT(3) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS5_MASK BIT(4) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1_MASK BIT(5) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2_MASK BIT(6) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3_MASK BIT(7) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4_MASK BIT(8) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5_MASK BIT(9) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC1_MASK BIT(10) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC2_MASK BIT(11) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC3_MASK BIT(12) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC4_MASK BIT(13) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC5_MASK BIT(14) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL1_MASK BIT(15) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL2_MASK BIT(16) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL3_MASK BIT(17) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL4_MASK BIT(18) +#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL5_MASK BIT(19) +#define CAL_CSI2_COMPLEXIO_IRQ_LANE_ERRORS_MASK GENMASK(19, 0) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM1_MASK BIT(20) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM2_MASK BIT(21) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM3_MASK BIT(22) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM4_MASK BIT(23) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM5_MASK BIT(24) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER_MASK BIT(25) +#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT_MASK BIT(26) +#define CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK BIT(27) +#define CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK BIT(28) +#define CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK BIT(30) + +#define CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK GENMASK(12, 0) +#define CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK BIT(13) +#define CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK BIT(14) +#define CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK BIT(15) + +#define CAL_CSI2_VC_IRQ_FS_IRQ_MASK(n) BIT(0 + ((n) * 8)) +#define CAL_CSI2_VC_IRQ_FE_IRQ_MASK(n) BIT(1 + ((n) * 8)) +#define CAL_CSI2_VC_IRQ_LS_IRQ_MASK(n) BIT(2 + ((n) * 8)) +#define CAL_CSI2_VC_IRQ_LE_IRQ_MASK(n) BIT(3 + ((n) * 8)) +#define CAL_CSI2_VC_IRQ_CS_IRQ_MASK(n) BIT(4 + ((n) * 8)) +#define CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(n) BIT(5 + ((n) * 8)) + +#define CAL_CSI2_CTX_DT_MASK GENMASK(5, 0) +#define CAL_CSI2_CTX_DT_DISABLED 0 +#define CAL_CSI2_CTX_DT_ANY 1 +#define CAL_CSI2_CTX_VC_MASK GENMASK(7, 6) +#define CAL_CSI2_CTX_CPORT_MASK GENMASK(12, 8) +#define CAL_CSI2_CTX_ATT_MASK BIT(13) +#define CAL_CSI2_CTX_ATT_PIX 0 +#define CAL_CSI2_CTX_ATT 1 +#define CAL_CSI2_CTX_PACK_MODE_MASK BIT(14) +#define CAL_CSI2_CTX_PACK_MODE_LINE 0 +#define CAL_CSI2_CTX_PACK_MODE_FRAME 1 +#define CAL_CSI2_CTX_LINES_MASK GENMASK(29, 16) + +#define CAL_CSI2_STATUS_FRAME_MASK GENMASK(15, 0) + +#define CAL_CSI2_PHY_REG0_THS_SETTLE_MASK GENMASK(7, 0) +#define CAL_CSI2_PHY_REG0_THS_TERM_MASK GENMASK(15, 8) +#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK BIT(24) +#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE 1 +#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_ENABLE 0 + +#define CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK GENMASK(7, 0) +#define CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK GENMASK(9, 8) +#define CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK GENMASK(17, 10) +#define CAL_CSI2_PHY_REG1_TCLK_TERM_MASK GENMASK(24, 18) +#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_MASK BIT(25) +#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_ERROR 1 +#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_SUCCESS 0 +#define CAL_CSI2_PHY_REG1_RESET_DONE_STATUS_MASK GENMASK(29, 28) + +#define CAL_CSI2_PHY_REG10_I933_LDO_DISABLE_MASK BIT(6) + +#define CAL_CSI2_PHY_REG2_CCP2_SYNC_PATTERN_MASK GENMASK(23, 0) +#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC3_MASK GENMASK(25, 24) +#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC2_MASK GENMASK(27, 26) +#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC1_MASK GENMASK(29, 28) +#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC0_MASK GENMASK(31, 30) + +#define CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK BIT(0) +#define CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK GENMASK(2, 1) +#define CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK GENMASK(4, 3) +#define CM_CAMERRX_CTRL_CSI1_MODE_MASK BIT(5) +#define CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK BIT(10) +#define CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK GENMASK(12, 11) +#define CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK GENMASK(16, 13) +#define CM_CAMERRX_CTRL_CSI0_MODE_MASK BIT(17) + +#endif -- cgit v1.2.3