diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/media/platform/qcom | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/media/platform/qcom')
66 files changed, 35291 insertions, 0 deletions
diff --git a/drivers/media/platform/qcom/Kconfig b/drivers/media/platform/qcom/Kconfig new file mode 100644 index 0000000000..cc5799b9ea --- /dev/null +++ b/drivers/media/platform/qcom/Kconfig @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only + +comment "Qualcomm media platform drivers" + +source "drivers/media/platform/qcom/camss/Kconfig" +source "drivers/media/platform/qcom/venus/Kconfig" diff --git a/drivers/media/platform/qcom/Makefile b/drivers/media/platform/qcom/Makefile new file mode 100644 index 0000000000..4f055c396e --- /dev/null +++ b/drivers/media/platform/qcom/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-y += camss/ +obj-y += venus/ diff --git a/drivers/media/platform/qcom/camss/Kconfig b/drivers/media/platform/qcom/camss/Kconfig new file mode 100644 index 0000000000..4eda48cb1a --- /dev/null +++ b/drivers/media/platform/qcom/camss/Kconfig @@ -0,0 +1,9 @@ +config VIDEO_QCOM_CAMSS + tristate "Qualcomm V4L2 Camera Subsystem driver" + depends on V4L_PLATFORM_DRIVERS + depends on VIDEO_DEV + depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST + select MEDIA_CONTROLLER + select VIDEO_V4L2_SUBDEV_API + select VIDEOBUF2_DMA_SG + select V4L2_FWNODE diff --git a/drivers/media/platform/qcom/camss/Makefile b/drivers/media/platform/qcom/camss/Makefile new file mode 100644 index 0000000000..4e22223589 --- /dev/null +++ b/drivers/media/platform/qcom/camss/Makefile @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Makefile for Qualcomm CAMSS driver + +qcom-camss-objs += \ + camss.o \ + camss-csid.o \ + camss-csid-4-1.o \ + camss-csid-4-7.o \ + camss-csid-gen2.o \ + camss-csiphy-2ph-1-0.o \ + camss-csiphy-3ph-1-0.o \ + camss-csiphy.o \ + camss-ispif.o \ + camss-vfe-4-1.o \ + camss-vfe-4-7.o \ + camss-vfe-4-8.o \ + camss-vfe-170.o \ + camss-vfe-480.o \ + camss-vfe-gen1.o \ + camss-vfe.o \ + camss-video.o \ + +obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom-camss.o diff --git a/drivers/media/platform/qcom/camss/camss-csid-4-1.c b/drivers/media/platform/qcom/camss/camss-csid-4-1.c new file mode 100644 index 0000000000..d2aec0679d --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-csid-4-1.c @@ -0,0 +1,328 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-csid-4-1.c + * + * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module + * + * Copyright (C) 2020 Linaro Ltd. + */ + +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/of.h> + +#include "camss-csid.h" +#include "camss-csid-gen1.h" +#include "camss.h" + +#define CAMSS_CSID_HW_VERSION 0x0 +#define CAMSS_CSID_CORE_CTRL_0 0x004 +#define CAMSS_CSID_CORE_CTRL_1 0x008 +#define CAMSS_CSID_RST_CMD 0x00c +#define CAMSS_CSID_CID_LUT_VC_n(n) (0x010 + 0x4 * (n)) +#define CAMSS_CSID_CID_n_CFG(n) (0x020 + 0x4 * (n)) +#define CAMSS_CSID_CID_n_CFG_ISPIF_EN BIT(0) +#define CAMSS_CSID_CID_n_CFG_RDI_EN BIT(1) +#define CAMSS_CSID_CID_n_CFG_DECODE_FORMAT_SHIFT 4 +#define CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_8 (PLAIN_FORMAT_PLAIN8 << 8) +#define CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_16 (PLAIN_FORMAT_PLAIN16 << 8) +#define CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_LSB (0 << 9) +#define CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_MSB (1 << 9) +#define CAMSS_CSID_CID_n_CFG_RDI_MODE_RAW_DUMP (0 << 10) +#define CAMSS_CSID_CID_n_CFG_RDI_MODE_PLAIN_PACKING (1 << 10) +#define CAMSS_CSID_IRQ_CLEAR_CMD 0x060 +#define CAMSS_CSID_IRQ_MASK 0x064 +#define CAMSS_CSID_IRQ_STATUS 0x068 +#define CAMSS_CSID_TG_CTRL 0x0a0 +#define CAMSS_CSID_TG_CTRL_DISABLE 0xa06436 +#define CAMSS_CSID_TG_CTRL_ENABLE 0xa06437 +#define CAMSS_CSID_TG_VC_CFG 0x0a4 +#define CAMSS_CSID_TG_VC_CFG_H_BLANKING 0x3ff +#define CAMSS_CSID_TG_VC_CFG_V_BLANKING 0x7f +#define CAMSS_CSID_TG_DT_n_CGG_0(n) (0x0ac + 0xc * (n)) +#define CAMSS_CSID_TG_DT_n_CGG_1(n) (0x0b0 + 0xc * (n)) +#define CAMSS_CSID_TG_DT_n_CGG_2(n) (0x0b4 + 0xc * (n)) + +static const struct csid_format csid_formats[] = { + { + MEDIA_BUS_FMT_UYVY8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_VYUY8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_YUYV8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_YVYU8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_SBGGR8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SBGGR10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SBGGR12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_Y10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, +}; + +static void csid_configure_stream(struct csid_device *csid, u8 enable) +{ + struct csid_testgen_config *tg = &csid->testgen; + u32 val; + + if (enable) { + struct v4l2_mbus_framefmt *input_format; + const struct csid_format *format; + u8 vc = 0; /* Virtual Channel 0 */ + u8 cid = vc * 4; /* id of Virtual Channel and Data Type set */ + u8 dt_shift; + + if (tg->enabled) { + /* Config Test Generator */ + u32 num_lines, num_bytes_per_line; + + input_format = &csid->fmt[MSM_CSID_PAD_SRC]; + format = csid_get_fmt_entry(csid->formats, csid->nformats, + input_format->code); + num_bytes_per_line = input_format->width * format->bpp * format->spp / 8; + num_lines = input_format->height; + + /* 31:24 V blank, 23:13 H blank, 3:2 num of active DT */ + /* 1:0 VC */ + val = ((CAMSS_CSID_TG_VC_CFG_V_BLANKING & 0xff) << 24) | + ((CAMSS_CSID_TG_VC_CFG_H_BLANKING & 0x7ff) << 13); + writel_relaxed(val, csid->base + CAMSS_CSID_TG_VC_CFG); + + /* 28:16 bytes per lines, 12:0 num of lines */ + val = ((num_bytes_per_line & 0x1fff) << 16) | + (num_lines & 0x1fff); + writel_relaxed(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_0(0)); + + /* 5:0 data type */ + val = format->data_type; + writel_relaxed(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_1(0)); + + /* 2:0 output test pattern */ + val = tg->mode - 1; + writel_relaxed(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_2(0)); + } else { + struct csid_phy_config *phy = &csid->phy; + + input_format = &csid->fmt[MSM_CSID_PAD_SINK]; + format = csid_get_fmt_entry(csid->formats, csid->nformats, + input_format->code); + + val = phy->lane_cnt - 1; + val |= phy->lane_assign << 4; + + writel_relaxed(val, csid->base + CAMSS_CSID_CORE_CTRL_0); + + val = phy->csiphy_id << 17; + val |= 0x9; + + writel_relaxed(val, csid->base + CAMSS_CSID_CORE_CTRL_1); + } + + /* Config LUT */ + + dt_shift = (cid % 4) * 8; + val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc)); + val &= ~(0xff << dt_shift); + val |= format->data_type << dt_shift; + writel_relaxed(val, csid->base + CAMSS_CSID_CID_LUT_VC_n(vc)); + + val = CAMSS_CSID_CID_n_CFG_ISPIF_EN; + val |= CAMSS_CSID_CID_n_CFG_RDI_EN; + val |= format->decode_format << CAMSS_CSID_CID_n_CFG_DECODE_FORMAT_SHIFT; + val |= CAMSS_CSID_CID_n_CFG_RDI_MODE_RAW_DUMP; + writel_relaxed(val, csid->base + CAMSS_CSID_CID_n_CFG(cid)); + + if (tg->enabled) { + val = CAMSS_CSID_TG_CTRL_ENABLE; + writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL); + } + } else { + if (tg->enabled) { + val = CAMSS_CSID_TG_CTRL_DISABLE; + writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL); + } + } +} + +static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val) +{ + if (val > 0 && val <= csid->testgen.nmodes) + csid->testgen.mode = val; + + return 0; +} + +static u32 csid_hw_version(struct csid_device *csid) +{ + u32 hw_version = readl_relaxed(csid->base + CAMSS_CSID_HW_VERSION); + + dev_dbg(csid->camss->dev, "CSID HW Version = 0x%08x\n", hw_version); + + return hw_version; +} + +static irqreturn_t csid_isr(int irq, void *dev) +{ + struct csid_device *csid = dev; + u32 value; + + value = readl_relaxed(csid->base + CAMSS_CSID_IRQ_STATUS); + writel_relaxed(value, csid->base + CAMSS_CSID_IRQ_CLEAR_CMD); + + if ((value >> 11) & 0x1) + complete(&csid->reset_complete); + + return IRQ_HANDLED; +} + +static int csid_reset(struct csid_device *csid) +{ + unsigned long time; + + reinit_completion(&csid->reset_complete); + + writel_relaxed(0x7fff, csid->base + CAMSS_CSID_RST_CMD); + + time = wait_for_completion_timeout(&csid->reset_complete, + msecs_to_jiffies(CSID_RESET_TIMEOUT_MS)); + if (!time) { + dev_err(csid->camss->dev, "CSID reset timeout\n"); + return -EIO; + } + + return 0; +} + +static u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code, + unsigned int match_format_idx, u32 match_code) +{ + if (match_format_idx > 0) + return 0; + + return sink_code; +} + +static void csid_subdev_init(struct csid_device *csid) +{ + csid->formats = csid_formats; + csid->nformats = ARRAY_SIZE(csid_formats); + csid->testgen.modes = csid_testgen_modes; + csid->testgen.nmodes = CSID_PAYLOAD_MODE_NUM_SUPPORTED_GEN1; +} + +const struct csid_hw_ops csid_ops_4_1 = { + .configure_stream = csid_configure_stream, + .configure_testgen_pattern = csid_configure_testgen_pattern, + .hw_version = csid_hw_version, + .isr = csid_isr, + .reset = csid_reset, + .src_pad_code = csid_src_pad_code, + .subdev_init = csid_subdev_init, +}; diff --git a/drivers/media/platform/qcom/camss/camss-csid-4-7.c b/drivers/media/platform/qcom/camss/camss-csid-4-7.c new file mode 100644 index 0000000000..e7436ec6d0 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-csid-4-7.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-csid-4-7.c + * + * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module + * + * Copyright (C) 2020 Linaro Ltd. + */ +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/of.h> + +#include "camss-csid.h" +#include "camss-csid-gen1.h" +#include "camss.h" + +#define CAMSS_CSID_HW_VERSION 0x0 +#define CAMSS_CSID_CORE_CTRL_0 0x004 +#define CAMSS_CSID_CORE_CTRL_1 0x008 +#define CAMSS_CSID_RST_CMD 0x010 +#define CAMSS_CSID_CID_LUT_VC_n(n) (0x014 + 0x4 * (n)) +#define CAMSS_CSID_CID_n_CFG(n) (0x024 + 0x4 * (n)) +#define CAMSS_CSID_CID_n_CFG_ISPIF_EN BIT(0) +#define CAMSS_CSID_CID_n_CFG_RDI_EN BIT(1) +#define CAMSS_CSID_CID_n_CFG_DECODE_FORMAT_SHIFT 4 +#define CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_8 (PLAIN_FORMAT_PLAIN8 << 8) +#define CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_16 (PLAIN_FORMAT_PLAIN16 << 8) +#define CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_LSB (0 << 9) +#define CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_MSB (1 << 9) +#define CAMSS_CSID_CID_n_CFG_RDI_MODE_RAW_DUMP (0 << 10) +#define CAMSS_CSID_CID_n_CFG_RDI_MODE_PLAIN_PACKING (1 << 10) +#define CAMSS_CSID_IRQ_CLEAR_CMD 0x064 +#define CAMSS_CSID_IRQ_MASK 0x068 +#define CAMSS_CSID_IRQ_STATUS 0x06c +#define CAMSS_CSID_TG_CTRL 0x0a8 +#define CAMSS_CSID_TG_CTRL_DISABLE 0xa06436 +#define CAMSS_CSID_TG_CTRL_ENABLE 0xa06437 +#define CAMSS_CSID_TG_VC_CFG 0x0ac +#define CAMSS_CSID_TG_VC_CFG_H_BLANKING 0x3ff +#define CAMSS_CSID_TG_VC_CFG_V_BLANKING 0x7f +#define CAMSS_CSID_TG_DT_n_CGG_0(n) (0x0b4 + 0xc * (n)) +#define CAMSS_CSID_TG_DT_n_CGG_1(n) (0x0b8 + 0xc * (n)) +#define CAMSS_CSID_TG_DT_n_CGG_2(n) (0x0bc + 0xc * (n)) + +static const struct csid_format csid_formats[] = { + { + MEDIA_BUS_FMT_UYVY8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_VYUY8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_YUYV8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_YVYU8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_SBGGR8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SBGGR10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SBGGR12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SBGGR14_1X14, + DATA_TYPE_RAW_14BIT, + DECODE_FORMAT_UNCOMPRESSED_14_BIT, + 14, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG14_1X14, + DATA_TYPE_RAW_14BIT, + DECODE_FORMAT_UNCOMPRESSED_14_BIT, + 14, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG14_1X14, + DATA_TYPE_RAW_14BIT, + DECODE_FORMAT_UNCOMPRESSED_14_BIT, + 14, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB14_1X14, + DATA_TYPE_RAW_14BIT, + DECODE_FORMAT_UNCOMPRESSED_14_BIT, + 14, + 1, + }, + { + MEDIA_BUS_FMT_Y10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, +}; + +static void csid_configure_stream(struct csid_device *csid, u8 enable) +{ + struct csid_testgen_config *tg = &csid->testgen; + u32 sink_code = csid->fmt[MSM_CSID_PAD_SINK].code; + u32 src_code = csid->fmt[MSM_CSID_PAD_SRC].code; + u32 val; + + if (enable) { + struct v4l2_mbus_framefmt *input_format; + const struct csid_format *format; + u8 vc = 0; /* Virtual Channel 0 */ + u8 cid = vc * 4; /* id of Virtual Channel and Data Type set */ + u8 dt_shift; + + if (tg->enabled) { + /* Config Test Generator */ + u32 num_bytes_per_line, num_lines; + + input_format = &csid->fmt[MSM_CSID_PAD_SRC]; + format = csid_get_fmt_entry(csid->formats, csid->nformats, + input_format->code); + num_bytes_per_line = input_format->width * format->bpp * format->spp / 8; + num_lines = input_format->height; + + /* 31:24 V blank, 23:13 H blank, 3:2 num of active DT */ + /* 1:0 VC */ + val = ((CAMSS_CSID_TG_VC_CFG_V_BLANKING & 0xff) << 24) | + ((CAMSS_CSID_TG_VC_CFG_H_BLANKING & 0x7ff) << 13); + writel_relaxed(val, csid->base + CAMSS_CSID_TG_VC_CFG); + + /* 28:16 bytes per lines, 12:0 num of lines */ + val = ((num_bytes_per_line & 0x1fff) << 16) | + (num_lines & 0x1fff); + writel_relaxed(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_0(0)); + + /* 5:0 data type */ + val = format->data_type; + writel_relaxed(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_1(0)); + + /* 2:0 output test pattern */ + val = tg->mode - 1; + writel_relaxed(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_2(0)); + } else { + struct csid_phy_config *phy = &csid->phy; + + input_format = &csid->fmt[MSM_CSID_PAD_SINK]; + format = csid_get_fmt_entry(csid->formats, csid->nformats, + input_format->code); + + val = phy->lane_cnt - 1; + val |= phy->lane_assign << 4; + + writel_relaxed(val, csid->base + CAMSS_CSID_CORE_CTRL_0); + + val = phy->csiphy_id << 17; + val |= 0x9; + + writel_relaxed(val, csid->base + CAMSS_CSID_CORE_CTRL_1); + } + + /* Config LUT */ + + dt_shift = (cid % 4) * 8; + + val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc)); + val &= ~(0xff << dt_shift); + val |= format->data_type << dt_shift; + writel_relaxed(val, csid->base + CAMSS_CSID_CID_LUT_VC_n(vc)); + + val = CAMSS_CSID_CID_n_CFG_ISPIF_EN; + val |= CAMSS_CSID_CID_n_CFG_RDI_EN; + val |= format->decode_format << CAMSS_CSID_CID_n_CFG_DECODE_FORMAT_SHIFT; + val |= CAMSS_CSID_CID_n_CFG_RDI_MODE_RAW_DUMP; + + if ((sink_code == MEDIA_BUS_FMT_SBGGR10_1X10 && + src_code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE) || + (sink_code == MEDIA_BUS_FMT_Y10_1X10 && + src_code == MEDIA_BUS_FMT_Y10_2X8_PADHI_LE)) { + val |= CAMSS_CSID_CID_n_CFG_RDI_MODE_PLAIN_PACKING; + val |= CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_16; + val |= CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_LSB; + } + + writel_relaxed(val, csid->base + CAMSS_CSID_CID_n_CFG(cid)); + + if (tg->enabled) { + val = CAMSS_CSID_TG_CTRL_ENABLE; + writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL); + } + } else { + if (tg->enabled) { + val = CAMSS_CSID_TG_CTRL_DISABLE; + writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL); + } + } +} + +static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val) +{ + if (val > 0 && val <= csid->testgen.nmodes) + csid->testgen.mode = val; + + return 0; +} + +static u32 csid_hw_version(struct csid_device *csid) +{ + u32 hw_version = readl_relaxed(csid->base + CAMSS_CSID_HW_VERSION); + + dev_dbg(csid->camss->dev, "CSID HW Version = 0x%08x\n", hw_version); + + return hw_version; +} + +/* + * isr - CSID module interrupt service routine + * @irq: Interrupt line + * @dev: CSID device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t csid_isr(int irq, void *dev) +{ + struct csid_device *csid = dev; + u32 value; + + value = readl_relaxed(csid->base + CAMSS_CSID_IRQ_STATUS); + writel_relaxed(value, csid->base + CAMSS_CSID_IRQ_CLEAR_CMD); + + if ((value >> 11) & 0x1) + complete(&csid->reset_complete); + + return IRQ_HANDLED; +} + +/* + * csid_reset - Trigger reset on CSID module and wait to complete + * @csid: CSID device + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_reset(struct csid_device *csid) +{ + unsigned long time; + + reinit_completion(&csid->reset_complete); + + writel_relaxed(0x7fff, csid->base + CAMSS_CSID_RST_CMD); + + time = wait_for_completion_timeout(&csid->reset_complete, + msecs_to_jiffies(CSID_RESET_TIMEOUT_MS)); + if (!time) { + dev_err(csid->camss->dev, "CSID reset timeout\n"); + return -EIO; + } + + return 0; +} + +static u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code, + unsigned int match_format_idx, u32 match_code) +{ + switch (sink_code) { + case MEDIA_BUS_FMT_SBGGR10_1X10: + { + u32 src_code[] = { + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, + }; + + return csid_find_code(src_code, ARRAY_SIZE(src_code), + match_format_idx, match_code); + } + case MEDIA_BUS_FMT_Y10_1X10: + { + u32 src_code[] = { + MEDIA_BUS_FMT_Y10_1X10, + MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, + }; + + return csid_find_code(src_code, ARRAY_SIZE(src_code), + match_format_idx, match_code); + } + default: + if (match_format_idx > 0) + return 0; + + return sink_code; + } +} + +static void csid_subdev_init(struct csid_device *csid) +{ + csid->formats = csid_formats; + csid->nformats = ARRAY_SIZE(csid_formats); + csid->testgen.modes = csid_testgen_modes; + csid->testgen.nmodes = CSID_PAYLOAD_MODE_NUM_SUPPORTED_GEN1; +} + +const struct csid_hw_ops csid_ops_4_7 = { + .configure_stream = csid_configure_stream, + .configure_testgen_pattern = csid_configure_testgen_pattern, + .hw_version = csid_hw_version, + .isr = csid_isr, + .reset = csid_reset, + .src_pad_code = csid_src_pad_code, + .subdev_init = csid_subdev_init, +}; diff --git a/drivers/media/platform/qcom/camss/camss-csid-gen1.h b/drivers/media/platform/qcom/camss/camss-csid-gen1.h new file mode 100644 index 0000000000..80a2bc6eff --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-csid-gen1.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * camss-csid-gen1.h + * + * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module Generation 1 + * + * Copyright (C) 2021 Linaro Ltd. + */ +#ifndef QC_MSM_CAMSS_CSID_GEN1_H +#define QC_MSM_CAMSS_CSID_GEN1_H + +#define DECODE_FORMAT_UNCOMPRESSED_6_BIT 0x0 +#define DECODE_FORMAT_UNCOMPRESSED_8_BIT 0x1 +#define DECODE_FORMAT_UNCOMPRESSED_10_BIT 0x2 +#define DECODE_FORMAT_UNCOMPRESSED_12_BIT 0x3 +#define DECODE_FORMAT_DPCM_10_6_10 0x4 +#define DECODE_FORMAT_DPCM_10_8_10 0x5 +#define DECODE_FORMAT_DPCM_12_6_12 0x6 +#define DECODE_FORMAT_DPCM_12_8_12 0x7 +#define DECODE_FORMAT_UNCOMPRESSED_14_BIT 0x8 +#define DECODE_FORMAT_DPCM_14_8_14 0x9 +#define DECODE_FORMAT_DPCM_14_10_14 0xa + +#define PLAIN_FORMAT_PLAIN8 0x0 /* supports DPCM, UNCOMPRESSED_6/8_BIT */ +#define PLAIN_FORMAT_PLAIN16 0x1 /* supports DPCM, UNCOMPRESSED_10/16_BIT */ + +#endif /* QC_MSM_CAMSS_CSID_GEN1_H */ diff --git a/drivers/media/platform/qcom/camss/camss-csid-gen2.c b/drivers/media/platform/qcom/camss/camss-csid-gen2.c new file mode 100644 index 0000000000..0147cc062e --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.c @@ -0,0 +1,630 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-csid-4-7.c + * + * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module + * + * Copyright (C) 2020 Linaro Ltd. + */ +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/of.h> + +#include "camss-csid.h" +#include "camss-csid-gen2.h" +#include "camss.h" + +/* The CSID 2 IP-block is different from the others, + * and is of a bare-bones Lite version, with no PIX + * interface support. As a result of that it has an + * alternate register layout. + */ +#define IS_LITE (csid->id >= 2 ? 1 : 0) + +#define CSID_HW_VERSION 0x0 +#define HW_VERSION_STEPPING 0 +#define HW_VERSION_REVISION 16 +#define HW_VERSION_GENERATION 28 + +#define CSID_RST_STROBES 0x10 +#define RST_STROBES 0 + +#define CSID_CSI2_RX_IRQ_STATUS 0x20 +#define CSID_CSI2_RX_IRQ_MASK 0x24 +#define CSID_CSI2_RX_IRQ_CLEAR 0x28 + +#define CSID_CSI2_RDIN_IRQ_STATUS(rdi) ((IS_LITE ? 0x30 : 0x40) \ + + 0x10 * (rdi)) +#define CSID_CSI2_RDIN_IRQ_MASK(rdi) ((IS_LITE ? 0x34 : 0x44) \ + + 0x10 * (rdi)) +#define CSID_CSI2_RDIN_IRQ_CLEAR(rdi) ((IS_LITE ? 0x38 : 0x48) \ + + 0x10 * (rdi)) +#define CSID_CSI2_RDIN_IRQ_SET(rdi) ((IS_LITE ? 0x3C : 0x4C) \ + + 0x10 * (rdi)) + +#define CSID_TOP_IRQ_STATUS 0x70 +#define TOP_IRQ_STATUS_RESET_DONE 0 +#define CSID_TOP_IRQ_MASK 0x74 +#define CSID_TOP_IRQ_CLEAR 0x78 +#define CSID_TOP_IRQ_SET 0x7C +#define CSID_IRQ_CMD 0x80 +#define IRQ_CMD_CLEAR 0 +#define IRQ_CMD_SET 4 + +#define CSID_CSI2_RX_CFG0 0x100 +#define CSI2_RX_CFG0_NUM_ACTIVE_LANES 0 +#define CSI2_RX_CFG0_DL0_INPUT_SEL 4 +#define CSI2_RX_CFG0_DL1_INPUT_SEL 8 +#define CSI2_RX_CFG0_DL2_INPUT_SEL 12 +#define CSI2_RX_CFG0_DL3_INPUT_SEL 16 +#define CSI2_RX_CFG0_PHY_NUM_SEL 20 +#define CSI2_RX_CFG0_PHY_TYPE_SEL 24 + +#define CSID_CSI2_RX_CFG1 0x104 +#define CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN 0 +#define CSI2_RX_CFG1_DE_SCRAMBLE_EN 1 +#define CSI2_RX_CFG1_VC_MODE 2 +#define CSI2_RX_CFG1_COMPLETE_STREAM_EN 4 +#define CSI2_RX_CFG1_COMPLETE_STREAM_FRAME_TIMING 5 +#define CSI2_RX_CFG1_MISR_EN 6 +#define CSI2_RX_CFG1_CGC_MODE 7 +#define CGC_MODE_DYNAMIC_GATING 0 +#define CGC_MODE_ALWAYS_ON 1 + +#define CSID_RDI_CFG0(rdi) ((IS_LITE ? 0x200 : 0x300) \ + + 0x100 * (rdi)) +#define RDI_CFG0_BYTE_CNTR_EN 0 +#define RDI_CFG0_FORMAT_MEASURE_EN 1 +#define RDI_CFG0_TIMESTAMP_EN 2 +#define RDI_CFG0_DROP_H_EN 3 +#define RDI_CFG0_DROP_V_EN 4 +#define RDI_CFG0_CROP_H_EN 5 +#define RDI_CFG0_CROP_V_EN 6 +#define RDI_CFG0_MISR_EN 7 +#define RDI_CFG0_CGC_MODE 8 +#define CGC_MODE_DYNAMIC 0 +#define CGC_MODE_ALWAYS_ON 1 +#define RDI_CFG0_PLAIN_ALIGNMENT 9 +#define PLAIN_ALIGNMENT_LSB 0 +#define PLAIN_ALIGNMENT_MSB 1 +#define RDI_CFG0_PLAIN_FORMAT 10 +#define RDI_CFG0_DECODE_FORMAT 12 +#define RDI_CFG0_DATA_TYPE 16 +#define RDI_CFG0_VIRTUAL_CHANNEL 22 +#define RDI_CFG0_DT_ID 27 +#define RDI_CFG0_EARLY_EOF_EN 29 +#define RDI_CFG0_PACKING_FORMAT 30 +#define RDI_CFG0_ENABLE 31 + +#define CSID_RDI_CFG1(rdi) ((IS_LITE ? 0x204 : 0x304)\ + + 0x100 * (rdi)) +#define RDI_CFG1_TIMESTAMP_STB_SEL 0 + +#define CSID_RDI_CTRL(rdi) ((IS_LITE ? 0x208 : 0x308)\ + + 0x100 * (rdi)) +#define RDI_CTRL_HALT_CMD 0 +#define HALT_CMD_HALT_AT_FRAME_BOUNDARY 0 +#define HALT_CMD_RESUME_AT_FRAME_BOUNDARY 1 +#define RDI_CTRL_HALT_MODE 2 + +#define CSID_RDI_FRM_DROP_PATTERN(rdi) ((IS_LITE ? 0x20C : 0x30C)\ + + 0x100 * (rdi)) +#define CSID_RDI_FRM_DROP_PERIOD(rdi) ((IS_LITE ? 0x210 : 0x310)\ + + 0x100 * (rdi)) +#define CSID_RDI_IRQ_SUBSAMPLE_PATTERN(rdi) ((IS_LITE ? 0x214 : 0x314)\ + + 0x100 * (rdi)) +#define CSID_RDI_IRQ_SUBSAMPLE_PERIOD(rdi) ((IS_LITE ? 0x218 : 0x318)\ + + 0x100 * (rdi)) +#define CSID_RDI_RPP_PIX_DROP_PATTERN(rdi) ((IS_LITE ? 0x224 : 0x324)\ + + 0x100 * (rdi)) +#define CSID_RDI_RPP_PIX_DROP_PERIOD(rdi) ((IS_LITE ? 0x228 : 0x328)\ + + 0x100 * (rdi)) +#define CSID_RDI_RPP_LINE_DROP_PATTERN(rdi) ((IS_LITE ? 0x22C : 0x32C)\ + + 0x100 * (rdi)) +#define CSID_RDI_RPP_LINE_DROP_PERIOD(rdi) ((IS_LITE ? 0x230 : 0x330)\ + + 0x100 * (rdi)) + +#define CSID_TPG_CTRL 0x600 +#define TPG_CTRL_TEST_EN 0 +#define TPG_CTRL_FS_PKT_EN 1 +#define TPG_CTRL_FE_PKT_EN 2 +#define TPG_CTRL_NUM_ACTIVE_LANES 4 +#define TPG_CTRL_CYCLES_BETWEEN_PKTS 8 +#define TPG_CTRL_NUM_TRAIL_BYTES 20 + +#define CSID_TPG_VC_CFG0 0x604 +#define TPG_VC_CFG0_VC_NUM 0 +#define TPG_VC_CFG0_NUM_ACTIVE_SLOTS 8 +#define NUM_ACTIVE_SLOTS_0_ENABLED 0 +#define NUM_ACTIVE_SLOTS_0_1_ENABLED 1 +#define NUM_ACTIVE_SLOTS_0_1_2_ENABLED 2 +#define NUM_ACTIVE_SLOTS_0_1_3_ENABLED 3 +#define TPG_VC_CFG0_LINE_INTERLEAVING_MODE 10 +#define INTELEAVING_MODE_INTERLEAVED 0 +#define INTELEAVING_MODE_ONE_SHOT 1 +#define TPG_VC_CFG0_NUM_FRAMES 16 + +#define CSID_TPG_VC_CFG1 0x608 +#define TPG_VC_CFG1_H_BLANKING_COUNT 0 +#define TPG_VC_CFG1_V_BLANKING_COUNT 12 +#define TPG_VC_CFG1_V_BLANK_FRAME_WIDTH_SEL 24 + +#define CSID_TPG_LFSR_SEED 0x60C + +#define CSID_TPG_DT_n_CFG_0(n) (0x610 + (n) * 0xC) +#define TPG_DT_n_CFG_0_FRAME_HEIGHT 0 +#define TPG_DT_n_CFG_0_FRAME_WIDTH 16 + +#define CSID_TPG_DT_n_CFG_1(n) (0x614 + (n) * 0xC) +#define TPG_DT_n_CFG_1_DATA_TYPE 0 +#define TPG_DT_n_CFG_1_ECC_XOR_MASK 8 +#define TPG_DT_n_CFG_1_CRC_XOR_MASK 16 + +#define CSID_TPG_DT_n_CFG_2(n) (0x618 + (n) * 0xC) +#define TPG_DT_n_CFG_2_PAYLOAD_MODE 0 +#define TPG_DT_n_CFG_2_USER_SPECIFIED_PAYLOAD 4 +#define TPG_DT_n_CFG_2_ENCODE_FORMAT 16 + +#define CSID_TPG_COLOR_BARS_CFG 0x640 +#define TPG_COLOR_BARS_CFG_UNICOLOR_BAR_EN 0 +#define TPG_COLOR_BARS_CFG_UNICOLOR_BAR_SEL 4 +#define TPG_COLOR_BARS_CFG_SPLIT_EN 5 +#define TPG_COLOR_BARS_CFG_ROTATE_PERIOD 8 + +#define CSID_TPG_COLOR_BOX_CFG 0x644 +#define TPG_COLOR_BOX_CFG_MODE 0 +#define TPG_COLOR_BOX_PATTERN_SEL 2 + +static const struct csid_format csid_formats[] = { + { + MEDIA_BUS_FMT_UYVY8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_VYUY8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_YUYV8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_YVYU8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_SBGGR8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SBGGR10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_Y8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_Y10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SBGGR12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SBGGR14_1X14, + DATA_TYPE_RAW_14BIT, + DECODE_FORMAT_UNCOMPRESSED_14_BIT, + 14, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG14_1X14, + DATA_TYPE_RAW_14BIT, + DECODE_FORMAT_UNCOMPRESSED_14_BIT, + 14, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG14_1X14, + DATA_TYPE_RAW_14BIT, + DECODE_FORMAT_UNCOMPRESSED_14_BIT, + 14, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB14_1X14, + DATA_TYPE_RAW_14BIT, + DECODE_FORMAT_UNCOMPRESSED_14_BIT, + 14, + 1, + }, +}; + +static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc) +{ + struct csid_testgen_config *tg = &csid->testgen; + u32 val; + u32 phy_sel = 0; + u8 lane_cnt = csid->phy.lane_cnt; + /* Source pads matching RDI channels on hardware. Pad 1 -> RDI0, Pad 2 -> RDI1, etc. */ + struct v4l2_mbus_framefmt *input_format = &csid->fmt[MSM_CSID_PAD_FIRST_SRC + vc]; + const struct csid_format *format = csid_get_fmt_entry(csid->formats, csid->nformats, + input_format->code); + + if (!lane_cnt) + lane_cnt = 4; + + if (!tg->enabled) + phy_sel = csid->phy.csiphy_id; + + if (enable) { + /* + * DT_ID is a two bit bitfield that is concatenated with + * the four least significant bits of the five bit VC + * bitfield to generate an internal CID value. + * + * CSID_RDI_CFG0(vc) + * DT_ID : 28:27 + * VC : 26:22 + * DT : 21:16 + * + * CID : VC 3:0 << 2 | DT_ID 1:0 + */ + u8 dt_id = vc & 0x03; + + if (tg->enabled) { + /* configure one DT, infinite frames */ + val = vc << TPG_VC_CFG0_VC_NUM; + val |= INTELEAVING_MODE_ONE_SHOT << TPG_VC_CFG0_LINE_INTERLEAVING_MODE; + val |= 0 << TPG_VC_CFG0_NUM_FRAMES; + writel_relaxed(val, csid->base + CSID_TPG_VC_CFG0); + + val = 0x740 << TPG_VC_CFG1_H_BLANKING_COUNT; + val |= 0x3ff << TPG_VC_CFG1_V_BLANKING_COUNT; + writel_relaxed(val, csid->base + CSID_TPG_VC_CFG1); + + writel_relaxed(0x12345678, csid->base + CSID_TPG_LFSR_SEED); + + val = (input_format->height & 0x1fff) << TPG_DT_n_CFG_0_FRAME_HEIGHT; + val |= (input_format->width & 0x1fff) << TPG_DT_n_CFG_0_FRAME_WIDTH; + writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_0(0)); + + val = format->data_type << TPG_DT_n_CFG_1_DATA_TYPE; + writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_1(0)); + + val = (tg->mode - 1) << TPG_DT_n_CFG_2_PAYLOAD_MODE; + val |= 0xBE << TPG_DT_n_CFG_2_USER_SPECIFIED_PAYLOAD; + val |= format->decode_format << TPG_DT_n_CFG_2_ENCODE_FORMAT; + writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_2(0)); + + writel_relaxed(0, csid->base + CSID_TPG_COLOR_BARS_CFG); + + writel_relaxed(0, csid->base + CSID_TPG_COLOR_BOX_CFG); + } + + val = 1 << RDI_CFG0_BYTE_CNTR_EN; + val |= 1 << RDI_CFG0_FORMAT_MEASURE_EN; + val |= 1 << RDI_CFG0_TIMESTAMP_EN; + /* note: for non-RDI path, this should be format->decode_format */ + val |= DECODE_FORMAT_PAYLOAD_ONLY << RDI_CFG0_DECODE_FORMAT; + val |= format->data_type << RDI_CFG0_DATA_TYPE; + val |= vc << RDI_CFG0_VIRTUAL_CHANNEL; + val |= dt_id << RDI_CFG0_DT_ID; + writel_relaxed(val, csid->base + CSID_RDI_CFG0(vc)); + + /* CSID_TIMESTAMP_STB_POST_IRQ */ + val = 2 << RDI_CFG1_TIMESTAMP_STB_SEL; + writel_relaxed(val, csid->base + CSID_RDI_CFG1(vc)); + + val = 1; + writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PERIOD(vc)); + + val = 0; + writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PATTERN(vc)); + + val = 1; + writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PERIOD(vc)); + + val = 0; + writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PATTERN(vc)); + + val = 1; + writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PERIOD(vc)); + + val = 0; + writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PATTERN(vc)); + + val = 1; + writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PERIOD(vc)); + + val = 0; + writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PATTERN(vc)); + + val = 0; + writel_relaxed(val, csid->base + CSID_RDI_CTRL(vc)); + + val = readl_relaxed(csid->base + CSID_RDI_CFG0(vc)); + val |= 1 << RDI_CFG0_ENABLE; + writel_relaxed(val, csid->base + CSID_RDI_CFG0(vc)); + } + + if (tg->enabled) { + val = enable << TPG_CTRL_TEST_EN; + val |= 1 << TPG_CTRL_FS_PKT_EN; + val |= 1 << TPG_CTRL_FE_PKT_EN; + val |= (lane_cnt - 1) << TPG_CTRL_NUM_ACTIVE_LANES; + val |= 0x64 << TPG_CTRL_CYCLES_BETWEEN_PKTS; + val |= 0xA << TPG_CTRL_NUM_TRAIL_BYTES; + writel_relaxed(val, csid->base + CSID_TPG_CTRL); + } + + val = (lane_cnt - 1) << CSI2_RX_CFG0_NUM_ACTIVE_LANES; + val |= csid->phy.lane_assign << CSI2_RX_CFG0_DL0_INPUT_SEL; + val |= phy_sel << CSI2_RX_CFG0_PHY_NUM_SEL; + writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG0); + + val = 1 << CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN; + if (vc > 3) + val |= 1 << CSI2_RX_CFG1_VC_MODE; + val |= 1 << CSI2_RX_CFG1_MISR_EN; + writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1); + + if (enable) + val = HALT_CMD_RESUME_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD; + else + val = HALT_CMD_HALT_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD; + writel_relaxed(val, csid->base + CSID_RDI_CTRL(vc)); +} + +static void csid_configure_stream(struct csid_device *csid, u8 enable) +{ + u8 i; + /* Loop through all enabled VCs and configure stream for each */ + for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++) + if (csid->phy.en_vc & BIT(i)) + __csid_configure_stream(csid, enable, i); +} + +static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val) +{ + if (val > 0 && val <= csid->testgen.nmodes) + csid->testgen.mode = val; + + return 0; +} + +/* + * csid_hw_version - CSID hardware version query + * @csid: CSID device + * + * Return HW version or error + */ +static u32 csid_hw_version(struct csid_device *csid) +{ + u32 hw_version; + u32 hw_gen; + u32 hw_rev; + u32 hw_step; + + hw_version = readl_relaxed(csid->base + CSID_HW_VERSION); + hw_gen = (hw_version >> HW_VERSION_GENERATION) & 0xF; + hw_rev = (hw_version >> HW_VERSION_REVISION) & 0xFFF; + hw_step = (hw_version >> HW_VERSION_STEPPING) & 0xFFFF; + dev_dbg(csid->camss->dev, "CSID HW Version = %u.%u.%u\n", + hw_gen, hw_rev, hw_step); + + return hw_version; +} + +/* + * csid_isr - CSID module interrupt service routine + * @irq: Interrupt line + * @dev: CSID device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t csid_isr(int irq, void *dev) +{ + struct csid_device *csid = dev; + u32 val; + u8 reset_done; + int i; + + val = readl_relaxed(csid->base + CSID_TOP_IRQ_STATUS); + writel_relaxed(val, csid->base + CSID_TOP_IRQ_CLEAR); + reset_done = val & BIT(TOP_IRQ_STATUS_RESET_DONE); + + val = readl_relaxed(csid->base + CSID_CSI2_RX_IRQ_STATUS); + writel_relaxed(val, csid->base + CSID_CSI2_RX_IRQ_CLEAR); + + /* Read and clear IRQ status for each enabled RDI channel */ + for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++) + if (csid->phy.en_vc & BIT(i)) { + val = readl_relaxed(csid->base + CSID_CSI2_RDIN_IRQ_STATUS(i)); + writel_relaxed(val, csid->base + CSID_CSI2_RDIN_IRQ_CLEAR(i)); + } + + val = 1 << IRQ_CMD_CLEAR; + writel_relaxed(val, csid->base + CSID_IRQ_CMD); + + if (reset_done) + complete(&csid->reset_complete); + + return IRQ_HANDLED; +} + +/* + * csid_reset - Trigger reset on CSID module and wait to complete + * @csid: CSID device + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_reset(struct csid_device *csid) +{ + unsigned long time; + u32 val; + + reinit_completion(&csid->reset_complete); + + writel_relaxed(1, csid->base + CSID_TOP_IRQ_CLEAR); + writel_relaxed(1, csid->base + CSID_IRQ_CMD); + writel_relaxed(1, csid->base + CSID_TOP_IRQ_MASK); + writel_relaxed(1, csid->base + CSID_IRQ_CMD); + + /* preserve registers */ + val = 0x1e << RST_STROBES; + writel_relaxed(val, csid->base + CSID_RST_STROBES); + + time = wait_for_completion_timeout(&csid->reset_complete, + msecs_to_jiffies(CSID_RESET_TIMEOUT_MS)); + if (!time) { + dev_err(csid->camss->dev, "CSID reset timeout\n"); + return -EIO; + } + + return 0; +} + +static u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code, + unsigned int match_format_idx, u32 match_code) +{ + switch (sink_code) { + case MEDIA_BUS_FMT_SBGGR10_1X10: + { + u32 src_code[] = { + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, + }; + + return csid_find_code(src_code, ARRAY_SIZE(src_code), + match_format_idx, match_code); + } + case MEDIA_BUS_FMT_Y10_1X10: + { + u32 src_code[] = { + MEDIA_BUS_FMT_Y10_1X10, + MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, + }; + + return csid_find_code(src_code, ARRAY_SIZE(src_code), + match_format_idx, match_code); + } + default: + if (match_format_idx > 0) + return 0; + + return sink_code; + } +} + +static void csid_subdev_init(struct csid_device *csid) +{ + csid->formats = csid_formats; + csid->nformats = ARRAY_SIZE(csid_formats); + csid->testgen.modes = csid_testgen_modes; + csid->testgen.nmodes = CSID_PAYLOAD_MODE_NUM_SUPPORTED_GEN2; +} + +const struct csid_hw_ops csid_ops_gen2 = { + .configure_stream = csid_configure_stream, + .configure_testgen_pattern = csid_configure_testgen_pattern, + .hw_version = csid_hw_version, + .isr = csid_isr, + .reset = csid_reset, + .src_pad_code = csid_src_pad_code, + .subdev_init = csid_subdev_init, +}; diff --git a/drivers/media/platform/qcom/camss/camss-csid-gen2.h b/drivers/media/platform/qcom/camss/camss-csid-gen2.h new file mode 100644 index 0000000000..3a8ad001b3 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * camss-csid-gen1.h + * + * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module Generation 1 + * + * Copyright (C) 2021 Linaro Ltd. + */ +#ifndef QC_MSM_CAMSS_CSID_GEN2_H +#define QC_MSM_CAMSS_CSID_GEN2_H + +#define DECODE_FORMAT_UNCOMPRESSED_6_BIT 0x0 +#define DECODE_FORMAT_UNCOMPRESSED_8_BIT 0x1 +#define DECODE_FORMAT_UNCOMPRESSED_10_BIT 0x2 +#define DECODE_FORMAT_UNCOMPRESSED_12_BIT 0x3 +#define DECODE_FORMAT_UNCOMPRESSED_14_BIT 0x4 +#define DECODE_FORMAT_UNCOMPRESSED_16_BIT 0x5 +#define DECODE_FORMAT_UNCOMPRESSED_20_BIT 0x6 +#define DECODE_FORMAT_DPCM_10_6_10 0x7 +#define DECODE_FORMAT_DPCM_10_8_10 0x8 +#define DECODE_FORMAT_DPCM_12_6_12 0x9 +#define DECODE_FORMAT_DPCM_12_8_12 0xa +#define DECODE_FORMAT_DPCM_14_8_14 0xb +#define DECODE_FORMAT_DPCM_14_10_14 0xc +#define DECODE_FORMAT_DPCM_12_10_12 0xd +#define DECODE_FORMAT_USER_DEFINED 0xe +#define DECODE_FORMAT_PAYLOAD_ONLY 0xf + +#define ENCODE_FORMAT_RAW_8_BIT 0x1 +#define ENCODE_FORMAT_RAW_10_BIT 0x2 +#define ENCODE_FORMAT_RAW_12_BIT 0x3 +#define ENCODE_FORMAT_RAW_14_BIT 0x4 +#define ENCODE_FORMAT_RAW_16_BIT 0x5 + +#define PLAIN_FORMAT_PLAIN8 0x0 /* supports DPCM, UNCOMPRESSED_6/8_BIT */ +#define PLAIN_FORMAT_PLAIN16 0x1 /* supports DPCM, UNCOMPRESSED_10/16_BIT */ +#define PLAIN_FORMAT_PLAIN32 0x2 /* supports UNCOMPRESSED_20_BIT */ + +#endif /* QC_MSM_CAMSS_CSID_GEN2_H */ diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c new file mode 100644 index 0000000000..6360314f04 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-csid.c @@ -0,0 +1,908 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-csid.c + * + * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module + * + * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2018 Linaro Ltd. + */ +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> +#include <media/media-entity.h> +#include <media/v4l2-device.h> +#include <media/v4l2-event.h> +#include <media/v4l2-subdev.h> + +#include "camss-csid.h" +#include "camss-csid-gen1.h" +#include "camss.h" + +/* offset of CSID registers in VFE region for VFE 480 */ +#define VFE_480_CSID_OFFSET 0x1200 +#define VFE_480_LITE_CSID_OFFSET 0x200 + +#define MSM_CSID_NAME "msm_csid" + +const char * const csid_testgen_modes[] = { + "Disabled", + "Incrementing", + "Alternating 0x55/0xAA", + "All Zeros 0x00", + "All Ones 0xFF", + "Pseudo-random Data", + "User Specified", + "Complex pattern", + "Color box", + "Color bars", + NULL +}; + +u32 csid_find_code(u32 *codes, unsigned int ncodes, + unsigned int match_format_idx, u32 match_code) +{ + int i; + + if (!match_code && (match_format_idx >= ncodes)) + return 0; + + for (i = 0; i < ncodes; i++) + if (match_code) { + if (codes[i] == match_code) + return match_code; + } else { + if (i == match_format_idx) + return codes[i]; + } + + return codes[0]; +} + +const struct csid_format *csid_get_fmt_entry(const struct csid_format *formats, + unsigned int nformats, + u32 code) +{ + unsigned int i; + + for (i = 0; i < nformats; i++) + if (code == formats[i].code) + return &formats[i]; + + WARN(1, "Unknown format\n"); + + return &formats[0]; +} + +/* + * csid_set_clock_rates - Calculate and set clock rates on CSID module + * @csiphy: CSID device + */ +static int csid_set_clock_rates(struct csid_device *csid) +{ + struct device *dev = csid->camss->dev; + const struct csid_format *fmt; + s64 link_freq; + int i, j; + int ret; + + fmt = csid_get_fmt_entry(csid->formats, csid->nformats, + csid->fmt[MSM_CSIPHY_PAD_SINK].code); + link_freq = camss_get_link_freq(&csid->subdev.entity, fmt->bpp, + csid->phy.lane_cnt); + if (link_freq < 0) + link_freq = 0; + + for (i = 0; i < csid->nclocks; i++) { + struct camss_clock *clock = &csid->clock[i]; + + if (!strcmp(clock->name, "csi0") || + !strcmp(clock->name, "csi1") || + !strcmp(clock->name, "csi2") || + !strcmp(clock->name, "csi3")) { + u64 min_rate = link_freq / 4; + long rate; + + camss_add_clock_margin(&min_rate); + + for (j = 0; j < clock->nfreqs; j++) + if (min_rate < clock->freq[j]) + break; + + if (j == clock->nfreqs) { + dev_err(dev, + "Pixel clock is too high for CSID\n"); + return -EINVAL; + } + + /* if sensor pixel clock is not available */ + /* set highest possible CSID clock rate */ + if (min_rate == 0) + j = clock->nfreqs - 1; + + rate = clk_round_rate(clock->clk, clock->freq[j]); + if (rate < 0) { + dev_err(dev, "clk round rate failed: %ld\n", + rate); + return -EINVAL; + } + + ret = clk_set_rate(clock->clk, rate); + if (ret < 0) { + dev_err(dev, "clk set rate failed: %d\n", ret); + return ret; + } + } else if (clock->nfreqs) { + clk_set_rate(clock->clk, clock->freq[0]); + } + } + + return 0; +} + +/* + * csid_set_power - Power on/off CSID module + * @sd: CSID V4L2 subdevice + * @on: Requested power state + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_set_power(struct v4l2_subdev *sd, int on) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct camss *camss = csid->camss; + struct device *dev = camss->dev; + struct vfe_device *vfe = &camss->vfe[csid->id]; + u32 version = camss->version; + int ret = 0; + + if (on) { + if (version == CAMSS_8250 || version == CAMSS_845) { + ret = vfe_get(vfe); + if (ret < 0) + return ret; + } + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + + ret = regulator_bulk_enable(csid->num_supplies, + csid->supplies); + if (ret < 0) { + pm_runtime_put_sync(dev); + return ret; + } + + ret = csid_set_clock_rates(csid); + if (ret < 0) { + regulator_bulk_disable(csid->num_supplies, + csid->supplies); + pm_runtime_put_sync(dev); + return ret; + } + + ret = camss_enable_clocks(csid->nclocks, csid->clock, dev); + if (ret < 0) { + regulator_bulk_disable(csid->num_supplies, + csid->supplies); + pm_runtime_put_sync(dev); + return ret; + } + + csid->phy.need_vc_update = true; + + enable_irq(csid->irq); + + ret = csid->ops->reset(csid); + if (ret < 0) { + disable_irq(csid->irq); + camss_disable_clocks(csid->nclocks, csid->clock); + regulator_bulk_disable(csid->num_supplies, + csid->supplies); + pm_runtime_put_sync(dev); + return ret; + } + + csid->ops->hw_version(csid); + } else { + disable_irq(csid->irq); + camss_disable_clocks(csid->nclocks, csid->clock); + regulator_bulk_disable(csid->num_supplies, + csid->supplies); + pm_runtime_put_sync(dev); + if (version == CAMSS_8250 || version == CAMSS_845) + vfe_put(vfe); + } + + return ret; +} + +/* + * csid_set_stream - Enable/disable streaming on CSID module + * @sd: CSID V4L2 subdevice + * @enable: Requested streaming state + * + * Main configuration of CSID module is also done here. + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + int ret; + + if (enable) { + ret = v4l2_ctrl_handler_setup(&csid->ctrls); + if (ret < 0) { + dev_err(csid->camss->dev, + "could not sync v4l2 controls: %d\n", ret); + return ret; + } + + if (!csid->testgen.enabled && + !media_pad_remote_pad_first(&csid->pads[MSM_CSID_PAD_SINK])) + return -ENOLINK; + } + + if (csid->phy.need_vc_update) { + csid->ops->configure_stream(csid, enable); + csid->phy.need_vc_update = false; + } + + return 0; +} + +/* + * __csid_get_format - Get pointer to format structure + * @csid: CSID device + * @cfg: V4L2 subdev pad configuration + * @pad: pad from which format is requested + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE format structure + */ +static struct v4l2_mbus_framefmt * +__csid_get_format(struct csid_device *csid, + struct v4l2_subdev_state *sd_state, + unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(&csid->subdev, sd_state, + pad); + + return &csid->fmt[pad]; +} + +/* + * csid_try_format - Handle try format by pad subdev method + * @csid: CSID device + * @cfg: V4L2 subdev pad configuration + * @pad: pad on which format is requested + * @fmt: pointer to v4l2 format structure + * @which: wanted subdev format + */ +static void csid_try_format(struct csid_device *csid, + struct v4l2_subdev_state *sd_state, + unsigned int pad, + struct v4l2_mbus_framefmt *fmt, + enum v4l2_subdev_format_whence which) +{ + unsigned int i; + + switch (pad) { + case MSM_CSID_PAD_SINK: + /* Set format on sink pad */ + + for (i = 0; i < csid->nformats; i++) + if (fmt->code == csid->formats[i].code) + break; + + /* If not found, use UYVY as default */ + if (i >= csid->nformats) + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + + fmt->width = clamp_t(u32, fmt->width, 1, 8191); + fmt->height = clamp_t(u32, fmt->height, 1, 8191); + + fmt->field = V4L2_FIELD_NONE; + fmt->colorspace = V4L2_COLORSPACE_SRGB; + + break; + + case MSM_CSID_PAD_SRC: + if (csid->testgen_mode->cur.val == 0) { + /* Test generator is disabled, */ + /* keep pad formats in sync */ + u32 code = fmt->code; + + *fmt = *__csid_get_format(csid, sd_state, + MSM_CSID_PAD_SINK, which); + fmt->code = csid->ops->src_pad_code(csid, fmt->code, 0, code); + } else { + /* Test generator is enabled, set format on source */ + /* pad to allow test generator usage */ + + for (i = 0; i < csid->nformats; i++) + if (csid->formats[i].code == fmt->code) + break; + + /* If not found, use UYVY as default */ + if (i >= csid->nformats) + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + + fmt->width = clamp_t(u32, fmt->width, 1, 8191); + fmt->height = clamp_t(u32, fmt->height, 1, 8191); + + fmt->field = V4L2_FIELD_NONE; + } + break; + } + + fmt->colorspace = V4L2_COLORSPACE_SRGB; +} + +/* + * csid_enum_mbus_code - Handle pixel format enumeration + * @sd: CSID V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @code: pointer to v4l2_subdev_mbus_code_enum structure + * return -EINVAL or zero on success + */ +static int csid_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + + if (code->pad == MSM_CSID_PAD_SINK) { + if (code->index >= csid->nformats) + return -EINVAL; + + code->code = csid->formats[code->index].code; + } else { + if (csid->testgen_mode->cur.val == 0) { + struct v4l2_mbus_framefmt *sink_fmt; + + sink_fmt = __csid_get_format(csid, sd_state, + MSM_CSID_PAD_SINK, + code->which); + + code->code = csid->ops->src_pad_code(csid, sink_fmt->code, + code->index, 0); + if (!code->code) + return -EINVAL; + } else { + if (code->index >= csid->nformats) + return -EINVAL; + + code->code = csid->formats[code->index].code; + } + } + + return 0; +} + +/* + * csid_enum_frame_size - Handle frame size enumeration + * @sd: CSID V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fse: pointer to v4l2_subdev_frame_size_enum structure + * return -EINVAL or zero on success + */ +static int csid_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt format; + + if (fse->index != 0) + return -EINVAL; + + format.code = fse->code; + format.width = 1; + format.height = 1; + csid_try_format(csid, sd_state, fse->pad, &format, fse->which); + fse->min_width = format.width; + fse->min_height = format.height; + + if (format.code != fse->code) + return -EINVAL; + + format.code = fse->code; + format.width = -1; + format.height = -1; + csid_try_format(csid, sd_state, fse->pad, &format, fse->which); + fse->max_width = format.width; + fse->max_height = format.height; + + return 0; +} + +/* + * csid_get_format - Handle get format by pads subdev method + * @sd: CSID V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int csid_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *fmt) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __csid_get_format(csid, sd_state, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + fmt->format = *format; + + return 0; +} + +/* + * csid_set_format - Handle set format by pads subdev method + * @sd: CSID V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int csid_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *fmt) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + int i; + + format = __csid_get_format(csid, sd_state, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + csid_try_format(csid, sd_state, fmt->pad, &fmt->format, fmt->which); + *format = fmt->format; + + /* Propagate the format from sink to source pads */ + if (fmt->pad == MSM_CSID_PAD_SINK) { + for (i = MSM_CSID_PAD_FIRST_SRC; i < MSM_CSID_PADS_NUM; ++i) { + format = __csid_get_format(csid, sd_state, i, fmt->which); + + *format = fmt->format; + csid_try_format(csid, sd_state, i, format, fmt->which); + } + } + + return 0; +} + +/* + * csid_init_formats - Initialize formats on all pads + * @sd: CSID V4L2 subdevice + * @fh: V4L2 subdev file handle + * + * Initialize all pad formats with default values. + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_subdev_format format = { + .pad = MSM_CSID_PAD_SINK, + .which = fh ? V4L2_SUBDEV_FORMAT_TRY : + V4L2_SUBDEV_FORMAT_ACTIVE, + .format = { + .code = MEDIA_BUS_FMT_UYVY8_2X8, + .width = 1920, + .height = 1080 + } + }; + + return csid_set_format(sd, fh ? fh->state : NULL, &format); +} + +/* + * csid_set_test_pattern - Set test generator's pattern mode + * @csid: CSID device + * @value: desired test pattern mode + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_set_test_pattern(struct csid_device *csid, s32 value) +{ + struct csid_testgen_config *tg = &csid->testgen; + + /* If CSID is linked to CSIPHY, do not allow to enable test generator */ + if (value && media_pad_remote_pad_first(&csid->pads[MSM_CSID_PAD_SINK])) + return -EBUSY; + + tg->enabled = !!value; + + return csid->ops->configure_testgen_pattern(csid, value); +} + +/* + * csid_s_ctrl - Handle set control subdev method + * @ctrl: pointer to v4l2 control structure + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct csid_device *csid = container_of(ctrl->handler, + struct csid_device, ctrls); + int ret = -EINVAL; + + switch (ctrl->id) { + case V4L2_CID_TEST_PATTERN: + ret = csid_set_test_pattern(csid, ctrl->val); + break; + } + + return ret; +} + +static const struct v4l2_ctrl_ops csid_ctrl_ops = { + .s_ctrl = csid_s_ctrl, +}; + +/* + * msm_csid_subdev_init - Initialize CSID device structure and resources + * @csid: CSID device + * @res: CSID module resources table + * @id: CSID module id + * + * Return 0 on success or a negative error code otherwise + */ +int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid, + const struct resources *res, u8 id) +{ + struct device *dev = camss->dev; + struct platform_device *pdev = to_platform_device(dev); + int i, j; + int ret; + + csid->camss = camss; + csid->id = id; + + if (camss->version == CAMSS_8x16) { + csid->ops = &csid_ops_4_1; + } else if (camss->version == CAMSS_8x96 || + camss->version == CAMSS_660) { + csid->ops = &csid_ops_4_7; + } else if (camss->version == CAMSS_845 || + camss->version == CAMSS_8250) { + csid->ops = &csid_ops_gen2; + } else { + return -EINVAL; + } + csid->ops->subdev_init(csid); + + /* Memory */ + + if (camss->version == CAMSS_8250) { + /* for titan 480, CSID registers are inside the VFE region, + * between the VFE "top" and "bus" registers. this requires + * VFE to be initialized before CSID + */ + if (id >= 2) /* VFE/CSID lite */ + csid->base = camss->vfe[id].base + VFE_480_LITE_CSID_OFFSET; + else + csid->base = camss->vfe[id].base + VFE_480_CSID_OFFSET; + } else { + csid->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]); + if (IS_ERR(csid->base)) + return PTR_ERR(csid->base); + } + + /* Interrupt */ + + ret = platform_get_irq_byname(pdev, res->interrupt[0]); + if (ret < 0) + return ret; + + csid->irq = ret; + snprintf(csid->irq_name, sizeof(csid->irq_name), "%s_%s%d", + dev_name(dev), MSM_CSID_NAME, csid->id); + ret = devm_request_irq(dev, csid->irq, csid->ops->isr, + IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, + csid->irq_name, csid); + if (ret < 0) { + dev_err(dev, "request_irq failed: %d\n", ret); + return ret; + } + + /* Clocks */ + + csid->nclocks = 0; + while (res->clock[csid->nclocks]) + csid->nclocks++; + + csid->clock = devm_kcalloc(dev, csid->nclocks, sizeof(*csid->clock), + GFP_KERNEL); + if (!csid->clock) + return -ENOMEM; + + for (i = 0; i < csid->nclocks; i++) { + struct camss_clock *clock = &csid->clock[i]; + + clock->clk = devm_clk_get(dev, res->clock[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->name = res->clock[i]; + + clock->nfreqs = 0; + while (res->clock_rate[i][clock->nfreqs]) + clock->nfreqs++; + + if (!clock->nfreqs) { + clock->freq = NULL; + continue; + } + + clock->freq = devm_kcalloc(dev, + clock->nfreqs, + sizeof(*clock->freq), + GFP_KERNEL); + if (!clock->freq) + return -ENOMEM; + + for (j = 0; j < clock->nfreqs; j++) + clock->freq[j] = res->clock_rate[i][j]; + } + + /* Regulator */ + for (i = 0; i < ARRAY_SIZE(res->regulators); i++) { + if (res->regulators[i]) + csid->num_supplies++; + } + + if (csid->num_supplies) { + csid->supplies = devm_kmalloc_array(camss->dev, + csid->num_supplies, + sizeof(*csid->supplies), + GFP_KERNEL); + if (!csid->supplies) + return -ENOMEM; + } + + for (i = 0; i < csid->num_supplies; i++) + csid->supplies[i].supply = res->regulators[i]; + + ret = devm_regulator_bulk_get(camss->dev, csid->num_supplies, + csid->supplies); + if (ret) + return ret; + + init_completion(&csid->reset_complete); + + return 0; +} + +/* + * msm_csid_get_csid_id - Get CSID HW module id + * @entity: Pointer to CSID media entity structure + * @id: Return CSID HW module id here + */ +void msm_csid_get_csid_id(struct media_entity *entity, u8 *id) +{ + struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); + struct csid_device *csid = v4l2_get_subdevdata(sd); + + *id = csid->id; +} + +/* + * csid_get_lane_assign - Calculate CSI2 lane assign configuration parameter + * @lane_cfg - CSI2 lane configuration + * + * Return lane assign + */ +static u32 csid_get_lane_assign(struct csiphy_lanes_cfg *lane_cfg) +{ + u32 lane_assign = 0; + int i; + + for (i = 0; i < lane_cfg->num_data; i++) + lane_assign |= lane_cfg->data[i].pos << (i * 4); + + return lane_assign; +} + +/* + * csid_link_setup - Setup CSID connections + * @entity: Pointer to media entity structure + * @local: Pointer to local pad + * @remote: Pointer to remote pad + * @flags: Link flags + * + * Return 0 on success + */ +static int csid_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags) +{ + if (flags & MEDIA_LNK_FL_ENABLED) + if (media_pad_remote_pad_first(local)) + return -EBUSY; + + if ((local->flags & MEDIA_PAD_FL_SINK) && + (flags & MEDIA_LNK_FL_ENABLED)) { + struct v4l2_subdev *sd; + struct csid_device *csid; + struct csiphy_device *csiphy; + struct csiphy_lanes_cfg *lane_cfg; + + sd = media_entity_to_v4l2_subdev(entity); + csid = v4l2_get_subdevdata(sd); + + /* If test generator is enabled */ + /* do not allow a link from CSIPHY to CSID */ + if (csid->testgen_mode->cur.val != 0) + return -EBUSY; + + sd = media_entity_to_v4l2_subdev(remote->entity); + csiphy = v4l2_get_subdevdata(sd); + + /* If a sensor is not linked to CSIPHY */ + /* do no allow a link from CSIPHY to CSID */ + if (!csiphy->cfg.csi2) + return -EPERM; + + csid->phy.csiphy_id = csiphy->id; + + lane_cfg = &csiphy->cfg.csi2->lane_cfg; + csid->phy.lane_cnt = lane_cfg->num_data; + csid->phy.lane_assign = csid_get_lane_assign(lane_cfg); + } + /* Decide which virtual channels to enable based on which source pads are enabled */ + if (local->flags & MEDIA_PAD_FL_SOURCE) { + struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct device *dev = csid->camss->dev; + + if (flags & MEDIA_LNK_FL_ENABLED) + csid->phy.en_vc |= BIT(local->index - 1); + else + csid->phy.en_vc &= ~BIT(local->index - 1); + + csid->phy.need_vc_update = true; + + dev_dbg(dev, "%s: Enabled CSID virtual channels mask 0x%x\n", + __func__, csid->phy.en_vc); + } + + return 0; +} + +static const struct v4l2_subdev_core_ops csid_core_ops = { + .s_power = csid_set_power, + .subscribe_event = v4l2_ctrl_subdev_subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, +}; + +static const struct v4l2_subdev_video_ops csid_video_ops = { + .s_stream = csid_set_stream, +}; + +static const struct v4l2_subdev_pad_ops csid_pad_ops = { + .enum_mbus_code = csid_enum_mbus_code, + .enum_frame_size = csid_enum_frame_size, + .get_fmt = csid_get_format, + .set_fmt = csid_set_format, +}; + +static const struct v4l2_subdev_ops csid_v4l2_ops = { + .core = &csid_core_ops, + .video = &csid_video_ops, + .pad = &csid_pad_ops, +}; + +static const struct v4l2_subdev_internal_ops csid_v4l2_internal_ops = { + .open = csid_init_formats, +}; + +static const struct media_entity_operations csid_media_ops = { + .link_setup = csid_link_setup, + .link_validate = v4l2_subdev_link_validate, +}; + +/* + * msm_csid_register_entity - Register subdev node for CSID module + * @csid: CSID device + * @v4l2_dev: V4L2 device + * + * Return 0 on success or a negative error code otherwise + */ +int msm_csid_register_entity(struct csid_device *csid, + struct v4l2_device *v4l2_dev) +{ + struct v4l2_subdev *sd = &csid->subdev; + struct media_pad *pads = csid->pads; + struct device *dev = csid->camss->dev; + int i; + int ret; + + v4l2_subdev_init(sd, &csid_v4l2_ops); + sd->internal_ops = &csid_v4l2_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | + V4L2_SUBDEV_FL_HAS_EVENTS; + snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d", + MSM_CSID_NAME, csid->id); + v4l2_set_subdevdata(sd, csid); + + ret = v4l2_ctrl_handler_init(&csid->ctrls, 1); + if (ret < 0) { + dev_err(dev, "Failed to init ctrl handler: %d\n", ret); + return ret; + } + + csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls, + &csid_ctrl_ops, V4L2_CID_TEST_PATTERN, + csid->testgen.nmodes, 0, 0, + csid->testgen.modes); + + if (csid->ctrls.error) { + dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error); + ret = csid->ctrls.error; + goto free_ctrl; + } + + csid->subdev.ctrl_handler = &csid->ctrls; + + ret = csid_init_formats(sd, NULL); + if (ret < 0) { + dev_err(dev, "Failed to init format: %d\n", ret); + goto free_ctrl; + } + + pads[MSM_CSID_PAD_SINK].flags = MEDIA_PAD_FL_SINK; + for (i = MSM_CSID_PAD_FIRST_SRC; i < MSM_CSID_PADS_NUM; ++i) + pads[i].flags = MEDIA_PAD_FL_SOURCE; + + sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; + sd->entity.ops = &csid_media_ops; + ret = media_entity_pads_init(&sd->entity, MSM_CSID_PADS_NUM, pads); + if (ret < 0) { + dev_err(dev, "Failed to init media entity: %d\n", ret); + goto free_ctrl; + } + + ret = v4l2_device_register_subdev(v4l2_dev, sd); + if (ret < 0) { + dev_err(dev, "Failed to register subdev: %d\n", ret); + goto media_cleanup; + } + + return 0; + +media_cleanup: + media_entity_cleanup(&sd->entity); +free_ctrl: + v4l2_ctrl_handler_free(&csid->ctrls); + + return ret; +} + +/* + * msm_csid_unregister_entity - Unregister CSID module subdev node + * @csid: CSID device + */ +void msm_csid_unregister_entity(struct csid_device *csid) +{ + v4l2_device_unregister_subdev(&csid->subdev); + media_entity_cleanup(&csid->subdev.entity); + v4l2_ctrl_handler_free(&csid->ctrls); +} diff --git a/drivers/media/platform/qcom/camss/camss-csid.h b/drivers/media/platform/qcom/camss/camss-csid.h new file mode 100644 index 0000000000..d4b48432a0 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-csid.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * camss-csid.h + * + * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module + * + * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2018 Linaro Ltd. + */ +#ifndef QC_MSM_CAMSS_CSID_H +#define QC_MSM_CAMSS_CSID_H + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <media/media-entity.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-device.h> +#include <media/v4l2-mediabus.h> +#include <media/v4l2-subdev.h> + +#define MSM_CSID_PAD_SINK 0 +#define MSM_CSID_PAD_FIRST_SRC 1 +#define MSM_CSID_PADS_NUM 5 + +#define MSM_CSID_PAD_SRC (MSM_CSID_PAD_FIRST_SRC) + +/* CSID hardware can demultiplex up to 4 outputs */ +#define MSM_CSID_MAX_SRC_STREAMS 4 + +#define DATA_TYPE_EMBEDDED_DATA_8BIT 0x12 +#define DATA_TYPE_YUV420_8BIT 0x18 +#define DATA_TYPE_YUV420_10BIT 0x19 +#define DATA_TYPE_YUV420_8BIT_LEGACY 0x1a +#define DATA_TYPE_YUV420_8BIT_SHIFTED 0x1c /* Chroma Shifted Pixel Sampling */ +#define DATA_TYPE_YUV420_10BIT_SHIFTED 0x1d /* Chroma Shifted Pixel Sampling */ +#define DATA_TYPE_YUV422_8BIT 0x1e +#define DATA_TYPE_YUV422_10BIT 0x1f +#define DATA_TYPE_RGB444 0x20 +#define DATA_TYPE_RGB555 0x21 +#define DATA_TYPE_RGB565 0x22 +#define DATA_TYPE_RGB666 0x23 +#define DATA_TYPE_RGB888 0x24 +#define DATA_TYPE_RAW_24BIT 0x27 +#define DATA_TYPE_RAW_6BIT 0x28 +#define DATA_TYPE_RAW_7BIT 0x29 +#define DATA_TYPE_RAW_8BIT 0x2a +#define DATA_TYPE_RAW_10BIT 0x2b +#define DATA_TYPE_RAW_12BIT 0x2c +#define DATA_TYPE_RAW_14BIT 0x2d +#define DATA_TYPE_RAW_16BIT 0x2e +#define DATA_TYPE_RAW_20BIT 0x2f + +#define CSID_RESET_TIMEOUT_MS 500 + +enum csid_testgen_mode { + CSID_PAYLOAD_MODE_DISABLED = 0, + CSID_PAYLOAD_MODE_INCREMENTING = 1, + CSID_PAYLOAD_MODE_ALTERNATING_55_AA = 2, + CSID_PAYLOAD_MODE_ALL_ZEROES = 3, + CSID_PAYLOAD_MODE_ALL_ONES = 4, + CSID_PAYLOAD_MODE_RANDOM = 5, + CSID_PAYLOAD_MODE_USER_SPECIFIED = 6, + CSID_PAYLOAD_MODE_NUM_SUPPORTED_GEN1 = 6, /* excluding disabled */ + CSID_PAYLOAD_MODE_COMPLEX_PATTERN = 7, + CSID_PAYLOAD_MODE_COLOR_BOX = 8, + CSID_PAYLOAD_MODE_COLOR_BARS = 9, + CSID_PAYLOAD_MODE_NUM_SUPPORTED_GEN2 = 9, /* excluding disabled */ +}; + +struct csid_format { + u32 code; + u8 data_type; + u8 decode_format; + u8 bpp; + u8 spp; /* bus samples per pixel */ +}; + +struct csid_testgen_config { + enum csid_testgen_mode mode; + const char * const*modes; + u8 nmodes; + u8 enabled; +}; + +struct csid_phy_config { + u8 csiphy_id; + u8 lane_cnt; + u32 lane_assign; + u32 en_vc; + u8 need_vc_update; +}; + +struct csid_device; + +struct csid_hw_ops { + /* + * configure_stream - Configures and starts CSID input stream + * @csid: CSID device + */ + void (*configure_stream)(struct csid_device *csid, u8 enable); + + /* + * configure_testgen_pattern - Validates and configures output pattern mode + * of test pattern generator + * @csid: CSID device + */ + int (*configure_testgen_pattern)(struct csid_device *csid, s32 val); + + /* + * hw_version - Read hardware version register from hardware + * @csid: CSID device + */ + u32 (*hw_version)(struct csid_device *csid); + + /* + * isr - CSID module interrupt service routine + * @irq: Interrupt line + * @dev: CSID device + * + * Return IRQ_HANDLED on success + */ + irqreturn_t (*isr)(int irq, void *dev); + + /* + * reset - Trigger reset on CSID module and wait to complete + * @csid: CSID device + * + * Return 0 on success or a negative error code otherwise + */ + int (*reset)(struct csid_device *csid); + + /* + * src_pad_code - Pick an output/src format based on the input/sink format + * @csid: CSID device + * @sink_code: The sink format of the input + * @match_format_idx: Request preferred index, as defined by subdevice csid_format. + * Set @match_code to 0 if used. + * @match_code: Request preferred code, set @match_format_idx to 0 if used + * + * Return 0 on failure or src format code otherwise + */ + u32 (*src_pad_code)(struct csid_device *csid, u32 sink_code, + unsigned int match_format_idx, u32 match_code); + + /* + * subdev_init - Initialize CSID device according for hardware revision + * @csid: CSID device + */ + void (*subdev_init)(struct csid_device *csid); +}; + +struct csid_device { + struct camss *camss; + u8 id; + struct v4l2_subdev subdev; + struct media_pad pads[MSM_CSID_PADS_NUM]; + void __iomem *base; + u32 irq; + char irq_name[30]; + struct camss_clock *clock; + int nclocks; + struct regulator_bulk_data *supplies; + int num_supplies; + struct completion reset_complete; + struct csid_testgen_config testgen; + struct csid_phy_config phy; + struct v4l2_mbus_framefmt fmt[MSM_CSID_PADS_NUM]; + struct v4l2_ctrl_handler ctrls; + struct v4l2_ctrl *testgen_mode; + const struct csid_format *formats; + unsigned int nformats; + const struct csid_hw_ops *ops; +}; + +struct resources; + +/* + * csid_find_code - Find a format code in an array using array index or format code + * @codes: Array of format codes + * @ncodes: Length of @code array + * @req_format_idx: Request preferred index, as defined by subdevice csid_format. + * Set @match_code to 0 if used. + * @match_code: Request preferred code, set @req_format_idx to 0 if used + * + * Return 0 on failure or format code otherwise + */ +u32 csid_find_code(u32 *codes, unsigned int ncode, + unsigned int match_format_idx, u32 match_code); + +/* + * csid_get_fmt_entry - Find csid_format entry with matching format code + * @formats: Array of format csid_format entries + * @nformats: Length of @nformats array + * @code: Desired format code + * + * Return formats[0] on failure to find code + */ +const struct csid_format *csid_get_fmt_entry(const struct csid_format *formats, + unsigned int nformats, + u32 code); + +int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid, + const struct resources *res, u8 id); + +int msm_csid_register_entity(struct csid_device *csid, + struct v4l2_device *v4l2_dev); + +void msm_csid_unregister_entity(struct csid_device *csid); + +void msm_csid_get_csid_id(struct media_entity *entity, u8 *id); + +extern const char * const csid_testgen_modes[]; + +extern const struct csid_hw_ops csid_ops_4_1; +extern const struct csid_hw_ops csid_ops_4_7; +extern const struct csid_hw_ops csid_ops_gen2; + + +#endif /* QC_MSM_CAMSS_CSID_H */ diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c new file mode 100644 index 0000000000..cd4a8c3692 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-csiphy-2ph-1-0.c + * + * Qualcomm MSM Camera Subsystem - CSIPHY Module 2phase v1.0 + * + * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2016-2018 Linaro Ltd. + */ + +#include "camss-csiphy.h" + +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/io.h> + +#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n)) +#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n)) +#define CAMSS_CSI_PHY_LN_CLK 1 +#define CAMSS_CSI_PHY_GLBL_RESET 0x140 +#define CAMSS_CSI_PHY_GLBL_PWR_CFG 0x144 +#define CAMSS_CSI_PHY_GLBL_IRQ_CMD 0x164 +#define CAMSS_CSI_PHY_HW_VERSION 0x188 +#define CAMSS_CSI_PHY_INTERRUPT_STATUSn(n) (0x18c + 0x4 * (n)) +#define CAMSS_CSI_PHY_INTERRUPT_MASKn(n) (0x1ac + 0x4 * (n)) +#define CAMSS_CSI_PHY_INTERRUPT_CLEARn(n) (0x1cc + 0x4 * (n)) +#define CAMSS_CSI_PHY_GLBL_T_INIT_CFG0 0x1ec +#define CAMSS_CSI_PHY_T_WAKEUP_CFG0 0x1f4 + +static u8 csiphy_get_lane_mask(struct csiphy_lanes_cfg *lane_cfg) +{ + u8 lane_mask; + int i; + + lane_mask = 1 << CAMSS_CSI_PHY_LN_CLK; + + for (i = 0; i < lane_cfg->num_data; i++) + lane_mask |= 1 << lane_cfg->data[i].pos; + + return lane_mask; +} + +static void csiphy_hw_version_read(struct csiphy_device *csiphy, + struct device *dev) +{ + u8 hw_version = readl_relaxed(csiphy->base + + CAMSS_CSI_PHY_HW_VERSION); + + dev_dbg(dev, "CSIPHY HW Version = 0x%02x\n", hw_version); +} + +/* + * csiphy_reset - Perform software reset on CSIPHY module + * @csiphy: CSIPHY device + */ +static void csiphy_reset(struct csiphy_device *csiphy) +{ + writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET); + usleep_range(5000, 8000); + writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET); +} + +/* + * csiphy_settle_cnt_calc - Calculate settle count value + * + * Helper function to calculate settle count value. This is + * based on the CSI2 T_hs_settle parameter which in turn + * is calculated based on the CSI2 transmitter link frequency. + * + * Return settle count value or 0 if the CSI2 link frequency + * is not available + */ +static u8 csiphy_settle_cnt_calc(s64 link_freq, u32 timer_clk_rate) +{ + u32 ui; /* ps */ + u32 timer_period; /* ps */ + u32 t_hs_prepare_max; /* ps */ + u32 t_hs_prepare_zero_min; /* ps */ + u32 t_hs_settle; /* ps */ + u8 settle_cnt; + + if (link_freq <= 0) + return 0; + + ui = div_u64(1000000000000LL, link_freq); + ui /= 2; + t_hs_prepare_max = 85000 + 6 * ui; + t_hs_prepare_zero_min = 145000 + 10 * ui; + t_hs_settle = (t_hs_prepare_max + t_hs_prepare_zero_min) / 2; + + timer_period = div_u64(1000000000000LL, timer_clk_rate); + settle_cnt = t_hs_settle / timer_period - 1; + + return settle_cnt; +} + +static void csiphy_lanes_enable(struct csiphy_device *csiphy, + struct csiphy_config *cfg, + s64 link_freq, u8 lane_mask) +{ + struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg; + u8 settle_cnt; + u8 val, l = 0; + int i = 0; + + settle_cnt = csiphy_settle_cnt_calc(link_freq, csiphy->timer_clk_rate); + + writel_relaxed(0x1, csiphy->base + + CAMSS_CSI_PHY_GLBL_T_INIT_CFG0); + writel_relaxed(0x1, csiphy->base + + CAMSS_CSI_PHY_T_WAKEUP_CFG0); + + val = 0x1; + val |= lane_mask << 1; + writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG); + + val = cfg->combo_mode << 4; + writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET); + + for (i = 0; i <= c->num_data; i++) { + if (i == c->num_data) + l = CAMSS_CSI_PHY_LN_CLK; + else + l = c->data[i].pos; + + writel_relaxed(0x10, csiphy->base + + CAMSS_CSI_PHY_LNn_CFG2(l)); + writel_relaxed(settle_cnt, csiphy->base + + CAMSS_CSI_PHY_LNn_CFG3(l)); + writel_relaxed(0x3f, csiphy->base + + CAMSS_CSI_PHY_INTERRUPT_MASKn(l)); + writel_relaxed(0x3f, csiphy->base + + CAMSS_CSI_PHY_INTERRUPT_CLEARn(l)); + } +} + +static void csiphy_lanes_disable(struct csiphy_device *csiphy, + struct csiphy_config *cfg) +{ + struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg; + u8 l = 0; + int i = 0; + + for (i = 0; i <= c->num_data; i++) { + if (i == c->num_data) + l = CAMSS_CSI_PHY_LN_CLK; + else + l = c->data[i].pos; + + writel_relaxed(0x0, csiphy->base + + CAMSS_CSI_PHY_LNn_CFG2(l)); + } + + writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG); +} + +/* + * csiphy_isr - CSIPHY module interrupt handler + * @irq: Interrupt line + * @dev: CSIPHY device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t csiphy_isr(int irq, void *dev) +{ + struct csiphy_device *csiphy = dev; + u8 i; + + for (i = 0; i < 8; i++) { + u8 val = readl_relaxed(csiphy->base + + CAMSS_CSI_PHY_INTERRUPT_STATUSn(i)); + writel_relaxed(val, csiphy->base + + CAMSS_CSI_PHY_INTERRUPT_CLEARn(i)); + writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD); + writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD); + writel_relaxed(0x0, csiphy->base + + CAMSS_CSI_PHY_INTERRUPT_CLEARn(i)); + } + + return IRQ_HANDLED; +} + +const struct csiphy_hw_ops csiphy_ops_2ph_1_0 = { + .get_lane_mask = csiphy_get_lane_mask, + .hw_version_read = csiphy_hw_version_read, + .reset = csiphy_reset, + .lanes_enable = csiphy_lanes_enable, + .lanes_disable = csiphy_lanes_disable, + .isr = csiphy_isr, +}; diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c new file mode 100644 index 0000000000..4dba61b8d3 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c @@ -0,0 +1,521 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-csiphy-3ph-1-0.c + * + * Qualcomm MSM Camera Subsystem - CSIPHY Module 3phase v1.0 + * + * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2016-2018 Linaro Ltd. + */ + +#include "camss.h" +#include "camss-csiphy.h" + +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/io.h> + +#define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n)) +#define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6)) +#define CSIPHY_3PH_LNn_CFG2(n) (0x004 + 0x100 * (n)) +#define CSIPHY_3PH_LNn_CFG2_LP_REC_EN_INT BIT(3) +#define CSIPHY_3PH_LNn_CFG3(n) (0x008 + 0x100 * (n)) +#define CSIPHY_3PH_LNn_CFG4(n) (0x00c + 0x100 * (n)) +#define CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS 0xa4 +#define CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS_660 0xa5 +#define CSIPHY_3PH_LNn_CFG5(n) (0x010 + 0x100 * (n)) +#define CSIPHY_3PH_LNn_CFG5_T_HS_DTERM 0x02 +#define CSIPHY_3PH_LNn_CFG5_HS_REC_EQ_FQ_INT 0x50 +#define CSIPHY_3PH_LNn_TEST_IMP(n) (0x01c + 0x100 * (n)) +#define CSIPHY_3PH_LNn_TEST_IMP_HS_TERM_IMP 0xa +#define CSIPHY_3PH_LNn_MISC1(n) (0x028 + 0x100 * (n)) +#define CSIPHY_3PH_LNn_MISC1_IS_CLKLANE BIT(2) +#define CSIPHY_3PH_LNn_CFG6(n) (0x02c + 0x100 * (n)) +#define CSIPHY_3PH_LNn_CFG6_SWI_FORCE_INIT_EXIT BIT(0) +#define CSIPHY_3PH_LNn_CFG7(n) (0x030 + 0x100 * (n)) +#define CSIPHY_3PH_LNn_CFG7_SWI_T_INIT 0x2 +#define CSIPHY_3PH_LNn_CFG8(n) (0x034 + 0x100 * (n)) +#define CSIPHY_3PH_LNn_CFG8_SWI_SKIP_WAKEUP BIT(0) +#define CSIPHY_3PH_LNn_CFG8_SKEW_FILTER_ENABLE BIT(1) +#define CSIPHY_3PH_LNn_CFG9(n) (0x038 + 0x100 * (n)) +#define CSIPHY_3PH_LNn_CFG9_SWI_T_WAKEUP 0x1 +#define CSIPHY_3PH_LNn_CSI_LANE_CTRL15(n) (0x03c + 0x100 * (n)) +#define CSIPHY_3PH_LNn_CSI_LANE_CTRL15_SWI_SOT_SYMBOL 0xb8 + +#define CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(n) (0x800 + 0x4 * (n)) +#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE BIT(7) +#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_COMMON_PWRDN_B BIT(0) +#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID BIT(1) +#define CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(n) (0x8b0 + 0x4 * (n)) + +#define CSIPHY_DEFAULT_PARAMS 0 +#define CSIPHY_LANE_ENABLE 1 +#define CSIPHY_SETTLE_CNT_LOWER_BYTE 2 +#define CSIPHY_SETTLE_CNT_HIGHER_BYTE 3 +#define CSIPHY_DNP_PARAMS 4 +#define CSIPHY_2PH_REGS 5 +#define CSIPHY_3PH_REGS 6 + +struct csiphy_reg_t { + s32 reg_addr; + s32 reg_data; + s32 delay; + u32 csiphy_param_type; +}; + +/* GEN2 1.0 2PH */ +static const struct +csiphy_reg_t lane_regs_sdm845[5][14] = { + { + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + }, + { + {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0708, 0x14, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + }, + { + {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + }, + { + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0400, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + }, + { + {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0600, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS}, + }, +}; + +/* GEN2 1.2.1 2PH */ +static const struct +csiphy_reg_t lane_regs_sm8250[5][20] = { + { + {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0900, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0908, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0904, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0904, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0034, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0010, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x001C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0000, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0024, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + }, + { + {0x0730, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C80, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C88, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C84, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0734, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0710, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x071C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0708, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x070c, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0724, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + }, + { + {0x0230, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0A04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0234, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0210, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x021C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0208, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0200, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x020c, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0224, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + }, + { + {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0B04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0434, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0410, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x041C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0400, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x040c, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0424, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + }, + { + {0x0630, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0C04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0634, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0610, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x061C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0608, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE}, + {0x0600, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x060c, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0624, 0x00, 0x00, CSIPHY_DNP_PARAMS}, + {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS}, + {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS}, + }, +}; + +static void csiphy_hw_version_read(struct csiphy_device *csiphy, + struct device *dev) +{ + u32 hw_version; + + writel(CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID, + csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6)); + + hw_version = readl_relaxed(csiphy->base + + CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(12)); + hw_version |= readl_relaxed(csiphy->base + + CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(13)) << 8; + hw_version |= readl_relaxed(csiphy->base + + CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(14)) << 16; + hw_version |= readl_relaxed(csiphy->base + + CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(15)) << 24; + + dev_dbg(dev, "CSIPHY 3PH HW Version = 0x%08x\n", hw_version); +} + +/* + * csiphy_reset - Perform software reset on CSIPHY module + * @csiphy: CSIPHY device + */ +static void csiphy_reset(struct csiphy_device *csiphy) +{ + writel_relaxed(0x1, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0)); + usleep_range(5000, 8000); + writel_relaxed(0x0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0)); +} + +static irqreturn_t csiphy_isr(int irq, void *dev) +{ + struct csiphy_device *csiphy = dev; + int i; + + for (i = 0; i < 11; i++) { + int c = i + 22; + u8 val = readl_relaxed(csiphy->base + + CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(i)); + + writel_relaxed(val, csiphy->base + + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(c)); + } + + writel_relaxed(0x1, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(10)); + writel_relaxed(0x0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(10)); + + for (i = 22; i < 33; i++) + writel_relaxed(0x0, csiphy->base + + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(i)); + + return IRQ_HANDLED; +} + +/* + * csiphy_settle_cnt_calc - Calculate settle count value + * + * Helper function to calculate settle count value. This is + * based on the CSI2 T_hs_settle parameter which in turn + * is calculated based on the CSI2 transmitter link frequency. + * + * Return settle count value or 0 if the CSI2 link frequency + * is not available + */ +static u8 csiphy_settle_cnt_calc(s64 link_freq, u32 timer_clk_rate) +{ + u32 ui; /* ps */ + u32 timer_period; /* ps */ + u32 t_hs_prepare_max; /* ps */ + u32 t_hs_settle; /* ps */ + u8 settle_cnt; + + if (link_freq <= 0) + return 0; + + ui = div_u64(1000000000000LL, link_freq); + ui /= 2; + t_hs_prepare_max = 85000 + 6 * ui; + t_hs_settle = t_hs_prepare_max; + + timer_period = div_u64(1000000000000LL, timer_clk_rate); + settle_cnt = t_hs_settle / timer_period - 6; + + return settle_cnt; +} + +static void csiphy_gen1_config_lanes(struct csiphy_device *csiphy, + struct csiphy_config *cfg, + u8 settle_cnt) +{ + struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg; + int i, l = 0; + u8 val; + + for (i = 0; i <= c->num_data; i++) { + if (i == c->num_data) + l = 7; + else + l = c->data[i].pos * 2; + + val = CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG; + val |= 0x17; + writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG1(l)); + + val = CSIPHY_3PH_LNn_CFG2_LP_REC_EN_INT; + writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG2(l)); + + val = settle_cnt; + writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG3(l)); + + val = CSIPHY_3PH_LNn_CFG5_T_HS_DTERM | + CSIPHY_3PH_LNn_CFG5_HS_REC_EQ_FQ_INT; + writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG5(l)); + + val = CSIPHY_3PH_LNn_CFG6_SWI_FORCE_INIT_EXIT; + writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG6(l)); + + val = CSIPHY_3PH_LNn_CFG7_SWI_T_INIT; + writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG7(l)); + + val = CSIPHY_3PH_LNn_CFG8_SWI_SKIP_WAKEUP | + CSIPHY_3PH_LNn_CFG8_SKEW_FILTER_ENABLE; + writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG8(l)); + + val = CSIPHY_3PH_LNn_CFG9_SWI_T_WAKEUP; + writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG9(l)); + + val = CSIPHY_3PH_LNn_TEST_IMP_HS_TERM_IMP; + writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_TEST_IMP(l)); + + val = CSIPHY_3PH_LNn_CSI_LANE_CTRL15_SWI_SOT_SYMBOL; + writel_relaxed(val, csiphy->base + + CSIPHY_3PH_LNn_CSI_LANE_CTRL15(l)); + } + + val = CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG; + writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG1(l)); + + if (csiphy->camss->version == CAMSS_660) + val = CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS_660; + else + val = CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS; + writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG4(l)); + + val = CSIPHY_3PH_LNn_MISC1_IS_CLKLANE; + writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_MISC1(l)); +} + +static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy, + u8 settle_cnt) +{ + const struct csiphy_reg_t *r; + int i, l, array_size; + u32 val; + + switch (csiphy->camss->version) { + case CAMSS_845: + r = &lane_regs_sdm845[0][0]; + array_size = ARRAY_SIZE(lane_regs_sdm845[0]); + break; + case CAMSS_8250: + r = &lane_regs_sm8250[0][0]; + array_size = ARRAY_SIZE(lane_regs_sm8250[0]); + break; + default: + WARN(1, "unknown cspi version\n"); + return; + } + + for (l = 0; l < 5; l++) { + for (i = 0; i < array_size; i++, r++) { + switch (r->csiphy_param_type) { + case CSIPHY_SETTLE_CNT_LOWER_BYTE: + val = settle_cnt & 0xff; + break; + case CSIPHY_DNP_PARAMS: + continue; + default: + val = r->reg_data; + break; + } + writel_relaxed(val, csiphy->base + r->reg_addr); + } + } +} + +static u8 csiphy_get_lane_mask(struct csiphy_lanes_cfg *lane_cfg) +{ + u8 lane_mask; + int i; + + lane_mask = CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE; + + for (i = 0; i < lane_cfg->num_data; i++) + lane_mask |= 1 << lane_cfg->data[i].pos; + + return lane_mask; +} + +static void csiphy_lanes_enable(struct csiphy_device *csiphy, + struct csiphy_config *cfg, + s64 link_freq, u8 lane_mask) +{ + struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg; + bool is_gen2 = (csiphy->camss->version == CAMSS_845 || + csiphy->camss->version == CAMSS_8250); + u8 settle_cnt; + u8 val; + int i; + + settle_cnt = csiphy_settle_cnt_calc(link_freq, csiphy->timer_clk_rate); + + val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE; + for (i = 0; i < c->num_data; i++) + val |= BIT(c->data[i].pos * 2); + + writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(5)); + + val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_COMMON_PWRDN_B; + writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6)); + + val = 0x02; + writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(7)); + + val = 0x00; + writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0)); + + if (is_gen2) + csiphy_gen2_config_lanes(csiphy, settle_cnt); + else + csiphy_gen1_config_lanes(csiphy, cfg, settle_cnt); + + /* IRQ_MASK registers - disable all interrupts */ + for (i = 11; i < 22; i++) + writel_relaxed(0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(i)); +} + +static void csiphy_lanes_disable(struct csiphy_device *csiphy, + struct csiphy_config *cfg) +{ + writel_relaxed(0, csiphy->base + + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(5)); + + writel_relaxed(0, csiphy->base + + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6)); +} + +const struct csiphy_hw_ops csiphy_ops_3ph_1_0 = { + .get_lane_mask = csiphy_get_lane_mask, + .hw_version_read = csiphy_hw_version_read, + .reset = csiphy_reset, + .lanes_enable = csiphy_lanes_enable, + .lanes_disable = csiphy_lanes_disable, + .isr = csiphy_isr, +}; diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c new file mode 100644 index 0000000000..3f726a7237 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-csiphy.c @@ -0,0 +1,797 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-csiphy.c + * + * Qualcomm MSM Camera Subsystem - CSIPHY Module + * + * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2016-2018 Linaro Ltd. + */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <media/media-entity.h> +#include <media/v4l2-device.h> +#include <media/v4l2-subdev.h> + +#include "camss-csiphy.h" +#include "camss.h" + +#define MSM_CSIPHY_NAME "msm_csiphy" + +struct csiphy_format { + u32 code; + u8 bpp; +}; + +static const struct csiphy_format csiphy_formats_8x16[] = { + { MEDIA_BUS_FMT_UYVY8_2X8, 8 }, + { MEDIA_BUS_FMT_VYUY8_2X8, 8 }, + { MEDIA_BUS_FMT_YUYV8_2X8, 8 }, + { MEDIA_BUS_FMT_YVYU8_2X8, 8 }, + { MEDIA_BUS_FMT_SBGGR8_1X8, 8 }, + { MEDIA_BUS_FMT_SGBRG8_1X8, 8 }, + { MEDIA_BUS_FMT_SGRBG8_1X8, 8 }, + { MEDIA_BUS_FMT_SRGGB8_1X8, 8 }, + { MEDIA_BUS_FMT_SBGGR10_1X10, 10 }, + { MEDIA_BUS_FMT_SGBRG10_1X10, 10 }, + { MEDIA_BUS_FMT_SGRBG10_1X10, 10 }, + { MEDIA_BUS_FMT_SRGGB10_1X10, 10 }, + { MEDIA_BUS_FMT_SBGGR12_1X12, 12 }, + { MEDIA_BUS_FMT_SGBRG12_1X12, 12 }, + { MEDIA_BUS_FMT_SGRBG12_1X12, 12 }, + { MEDIA_BUS_FMT_SRGGB12_1X12, 12 }, + { MEDIA_BUS_FMT_Y10_1X10, 10 }, +}; + +static const struct csiphy_format csiphy_formats_8x96[] = { + { MEDIA_BUS_FMT_UYVY8_2X8, 8 }, + { MEDIA_BUS_FMT_VYUY8_2X8, 8 }, + { MEDIA_BUS_FMT_YUYV8_2X8, 8 }, + { MEDIA_BUS_FMT_YVYU8_2X8, 8 }, + { MEDIA_BUS_FMT_SBGGR8_1X8, 8 }, + { MEDIA_BUS_FMT_SGBRG8_1X8, 8 }, + { MEDIA_BUS_FMT_SGRBG8_1X8, 8 }, + { MEDIA_BUS_FMT_SRGGB8_1X8, 8 }, + { MEDIA_BUS_FMT_SBGGR10_1X10, 10 }, + { MEDIA_BUS_FMT_SGBRG10_1X10, 10 }, + { MEDIA_BUS_FMT_SGRBG10_1X10, 10 }, + { MEDIA_BUS_FMT_SRGGB10_1X10, 10 }, + { MEDIA_BUS_FMT_SBGGR12_1X12, 12 }, + { MEDIA_BUS_FMT_SGBRG12_1X12, 12 }, + { MEDIA_BUS_FMT_SGRBG12_1X12, 12 }, + { MEDIA_BUS_FMT_SRGGB12_1X12, 12 }, + { MEDIA_BUS_FMT_SBGGR14_1X14, 14 }, + { MEDIA_BUS_FMT_SGBRG14_1X14, 14 }, + { MEDIA_BUS_FMT_SGRBG14_1X14, 14 }, + { MEDIA_BUS_FMT_SRGGB14_1X14, 14 }, + { MEDIA_BUS_FMT_Y10_1X10, 10 }, +}; + +static const struct csiphy_format csiphy_formats_sdm845[] = { + { MEDIA_BUS_FMT_UYVY8_2X8, 8 }, + { MEDIA_BUS_FMT_VYUY8_2X8, 8 }, + { MEDIA_BUS_FMT_YUYV8_2X8, 8 }, + { MEDIA_BUS_FMT_YVYU8_2X8, 8 }, + { MEDIA_BUS_FMT_SBGGR8_1X8, 8 }, + { MEDIA_BUS_FMT_SGBRG8_1X8, 8 }, + { MEDIA_BUS_FMT_SGRBG8_1X8, 8 }, + { MEDIA_BUS_FMT_SRGGB8_1X8, 8 }, + { MEDIA_BUS_FMT_SBGGR10_1X10, 10 }, + { MEDIA_BUS_FMT_SGBRG10_1X10, 10 }, + { MEDIA_BUS_FMT_SGRBG10_1X10, 10 }, + { MEDIA_BUS_FMT_SRGGB10_1X10, 10 }, + { MEDIA_BUS_FMT_SBGGR12_1X12, 12 }, + { MEDIA_BUS_FMT_SGBRG12_1X12, 12 }, + { MEDIA_BUS_FMT_SGRBG12_1X12, 12 }, + { MEDIA_BUS_FMT_SRGGB12_1X12, 12 }, + { MEDIA_BUS_FMT_SBGGR14_1X14, 14 }, + { MEDIA_BUS_FMT_SGBRG14_1X14, 14 }, + { MEDIA_BUS_FMT_SGRBG14_1X14, 14 }, + { MEDIA_BUS_FMT_SRGGB14_1X14, 14 }, + { MEDIA_BUS_FMT_Y8_1X8, 8 }, + { MEDIA_BUS_FMT_Y10_1X10, 10 }, +}; + +/* + * csiphy_get_bpp - map media bus format to bits per pixel + * @formats: supported media bus formats array + * @nformats: size of @formats array + * @code: media bus format code + * + * Return number of bits per pixel + */ +static u8 csiphy_get_bpp(const struct csiphy_format *formats, + unsigned int nformats, u32 code) +{ + unsigned int i; + + for (i = 0; i < nformats; i++) + if (code == formats[i].code) + return formats[i].bpp; + + WARN(1, "Unknown format\n"); + + return formats[0].bpp; +} + +/* + * csiphy_set_clock_rates - Calculate and set clock rates on CSIPHY module + * @csiphy: CSIPHY device + */ +static int csiphy_set_clock_rates(struct csiphy_device *csiphy) +{ + struct device *dev = csiphy->camss->dev; + s64 link_freq; + int i, j; + int ret; + + u8 bpp = csiphy_get_bpp(csiphy->formats, csiphy->nformats, + csiphy->fmt[MSM_CSIPHY_PAD_SINK].code); + u8 num_lanes = csiphy->cfg.csi2->lane_cfg.num_data; + + link_freq = camss_get_link_freq(&csiphy->subdev.entity, bpp, num_lanes); + if (link_freq < 0) + link_freq = 0; + + for (i = 0; i < csiphy->nclocks; i++) { + struct camss_clock *clock = &csiphy->clock[i]; + + if (csiphy->rate_set[i]) { + u64 min_rate = link_freq / 4; + long round_rate; + + camss_add_clock_margin(&min_rate); + + for (j = 0; j < clock->nfreqs; j++) + if (min_rate < clock->freq[j]) + break; + + if (j == clock->nfreqs) { + dev_err(dev, + "Pixel clock is too high for CSIPHY\n"); + return -EINVAL; + } + + /* if sensor pixel clock is not available */ + /* set highest possible CSIPHY clock rate */ + if (min_rate == 0) + j = clock->nfreqs - 1; + + round_rate = clk_round_rate(clock->clk, clock->freq[j]); + if (round_rate < 0) { + dev_err(dev, "clk round rate failed: %ld\n", + round_rate); + return -EINVAL; + } + + csiphy->timer_clk_rate = round_rate; + + ret = clk_set_rate(clock->clk, csiphy->timer_clk_rate); + if (ret < 0) { + dev_err(dev, "clk set rate failed: %d\n", ret); + return ret; + } + } + } + + return 0; +} + +/* + * csiphy_set_power - Power on/off CSIPHY module + * @sd: CSIPHY V4L2 subdevice + * @on: Requested power state + * + * Return 0 on success or a negative error code otherwise + */ +static int csiphy_set_power(struct v4l2_subdev *sd, int on) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + struct device *dev = csiphy->camss->dev; + + if (on) { + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + + ret = csiphy_set_clock_rates(csiphy); + if (ret < 0) { + pm_runtime_put_sync(dev); + return ret; + } + + ret = camss_enable_clocks(csiphy->nclocks, csiphy->clock, dev); + if (ret < 0) { + pm_runtime_put_sync(dev); + return ret; + } + + enable_irq(csiphy->irq); + + csiphy->ops->reset(csiphy); + + csiphy->ops->hw_version_read(csiphy, dev); + } else { + disable_irq(csiphy->irq); + + camss_disable_clocks(csiphy->nclocks, csiphy->clock); + + pm_runtime_put_sync(dev); + } + + return 0; +} + +/* + * csiphy_stream_on - Enable streaming on CSIPHY module + * @csiphy: CSIPHY device + * + * Helper function to enable streaming on CSIPHY module. + * Main configuration of CSIPHY module is also done here. + * + * Return 0 on success or a negative error code otherwise + */ +static int csiphy_stream_on(struct csiphy_device *csiphy) +{ + struct csiphy_config *cfg = &csiphy->cfg; + s64 link_freq; + u8 lane_mask = csiphy->ops->get_lane_mask(&cfg->csi2->lane_cfg); + u8 bpp = csiphy_get_bpp(csiphy->formats, csiphy->nformats, + csiphy->fmt[MSM_CSIPHY_PAD_SINK].code); + u8 num_lanes = csiphy->cfg.csi2->lane_cfg.num_data; + u8 val; + + link_freq = camss_get_link_freq(&csiphy->subdev.entity, bpp, num_lanes); + + if (link_freq < 0) { + dev_err(csiphy->camss->dev, + "Cannot get CSI2 transmitter's link frequency\n"); + return -EINVAL; + } + + if (csiphy->base_clk_mux) { + val = readl_relaxed(csiphy->base_clk_mux); + if (cfg->combo_mode && (lane_mask & 0x18) == 0x18) { + val &= ~0xf0; + val |= cfg->csid_id << 4; + } else { + val &= ~0xf; + val |= cfg->csid_id; + } + writel_relaxed(val, csiphy->base_clk_mux); + + /* Enforce reg write ordering between clk mux & lane enabling */ + wmb(); + } + + csiphy->ops->lanes_enable(csiphy, cfg, link_freq, lane_mask); + + return 0; +} + +/* + * csiphy_stream_off - Disable streaming on CSIPHY module + * @csiphy: CSIPHY device + * + * Helper function to disable streaming on CSIPHY module + */ +static void csiphy_stream_off(struct csiphy_device *csiphy) +{ + csiphy->ops->lanes_disable(csiphy, &csiphy->cfg); +} + + +/* + * csiphy_set_stream - Enable/disable streaming on CSIPHY module + * @sd: CSIPHY V4L2 subdevice + * @enable: Requested streaming state + * + * Return 0 on success or a negative error code otherwise + */ +static int csiphy_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + int ret = 0; + + if (enable) + ret = csiphy_stream_on(csiphy); + else + csiphy_stream_off(csiphy); + + return ret; +} + +/* + * __csiphy_get_format - Get pointer to format structure + * @csiphy: CSIPHY device + * @cfg: V4L2 subdev pad configuration + * @pad: pad from which format is requested + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE format structure + */ +static struct v4l2_mbus_framefmt * +__csiphy_get_format(struct csiphy_device *csiphy, + struct v4l2_subdev_state *sd_state, + unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(&csiphy->subdev, sd_state, + pad); + + return &csiphy->fmt[pad]; +} + +/* + * csiphy_try_format - Handle try format by pad subdev method + * @csiphy: CSIPHY device + * @cfg: V4L2 subdev pad configuration + * @pad: pad on which format is requested + * @fmt: pointer to v4l2 format structure + * @which: wanted subdev format + */ +static void csiphy_try_format(struct csiphy_device *csiphy, + struct v4l2_subdev_state *sd_state, + unsigned int pad, + struct v4l2_mbus_framefmt *fmt, + enum v4l2_subdev_format_whence which) +{ + unsigned int i; + + switch (pad) { + case MSM_CSIPHY_PAD_SINK: + /* Set format on sink pad */ + + for (i = 0; i < csiphy->nformats; i++) + if (fmt->code == csiphy->formats[i].code) + break; + + /* If not found, use UYVY as default */ + if (i >= csiphy->nformats) + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + + fmt->width = clamp_t(u32, fmt->width, 1, 8191); + fmt->height = clamp_t(u32, fmt->height, 1, 8191); + + fmt->field = V4L2_FIELD_NONE; + fmt->colorspace = V4L2_COLORSPACE_SRGB; + + break; + + case MSM_CSIPHY_PAD_SRC: + /* Set and return a format same as sink pad */ + + *fmt = *__csiphy_get_format(csiphy, sd_state, + MSM_CSID_PAD_SINK, + which); + + break; + } +} + +/* + * csiphy_enum_mbus_code - Handle pixel format enumeration + * @sd: CSIPHY V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @code: pointer to v4l2_subdev_mbus_code_enum structure + * return -EINVAL or zero on success + */ +static int csiphy_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + if (code->pad == MSM_CSIPHY_PAD_SINK) { + if (code->index >= csiphy->nformats) + return -EINVAL; + + code->code = csiphy->formats[code->index].code; + } else { + if (code->index > 0) + return -EINVAL; + + format = __csiphy_get_format(csiphy, sd_state, + MSM_CSIPHY_PAD_SINK, + code->which); + + code->code = format->code; + } + + return 0; +} + +/* + * csiphy_enum_frame_size - Handle frame size enumeration + * @sd: CSIPHY V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fse: pointer to v4l2_subdev_frame_size_enum structure + * return -EINVAL or zero on success + */ +static int csiphy_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt format; + + if (fse->index != 0) + return -EINVAL; + + format.code = fse->code; + format.width = 1; + format.height = 1; + csiphy_try_format(csiphy, sd_state, fse->pad, &format, fse->which); + fse->min_width = format.width; + fse->min_height = format.height; + + if (format.code != fse->code) + return -EINVAL; + + format.code = fse->code; + format.width = -1; + format.height = -1; + csiphy_try_format(csiphy, sd_state, fse->pad, &format, fse->which); + fse->max_width = format.width; + fse->max_height = format.height; + + return 0; +} + +/* + * csiphy_get_format - Handle get format by pads subdev method + * @sd: CSIPHY V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int csiphy_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *fmt) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __csiphy_get_format(csiphy, sd_state, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + fmt->format = *format; + + return 0; +} + +/* + * csiphy_set_format - Handle set format by pads subdev method + * @sd: CSIPHY V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int csiphy_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *fmt) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __csiphy_get_format(csiphy, sd_state, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + csiphy_try_format(csiphy, sd_state, fmt->pad, &fmt->format, + fmt->which); + *format = fmt->format; + + /* Propagate the format from sink to source */ + if (fmt->pad == MSM_CSIPHY_PAD_SINK) { + format = __csiphy_get_format(csiphy, sd_state, + MSM_CSIPHY_PAD_SRC, + fmt->which); + + *format = fmt->format; + csiphy_try_format(csiphy, sd_state, MSM_CSIPHY_PAD_SRC, + format, + fmt->which); + } + + return 0; +} + +/* + * csiphy_init_formats - Initialize formats on all pads + * @sd: CSIPHY V4L2 subdevice + * @fh: V4L2 subdev file handle + * + * Initialize all pad formats with default values. + * + * Return 0 on success or a negative error code otherwise + */ +static int csiphy_init_formats(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct v4l2_subdev_format format = { + .pad = MSM_CSIPHY_PAD_SINK, + .which = fh ? V4L2_SUBDEV_FORMAT_TRY : + V4L2_SUBDEV_FORMAT_ACTIVE, + .format = { + .code = MEDIA_BUS_FMT_UYVY8_2X8, + .width = 1920, + .height = 1080 + } + }; + + return csiphy_set_format(sd, fh ? fh->state : NULL, &format); +} + +/* + * msm_csiphy_subdev_init - Initialize CSIPHY device structure and resources + * @csiphy: CSIPHY device + * @res: CSIPHY module resources table + * @id: CSIPHY module id + * + * Return 0 on success or a negative error code otherwise + */ +int msm_csiphy_subdev_init(struct camss *camss, + struct csiphy_device *csiphy, + const struct resources *res, u8 id) +{ + struct device *dev = camss->dev; + struct platform_device *pdev = to_platform_device(dev); + int i, j; + int ret; + + csiphy->camss = camss; + csiphy->id = id; + csiphy->cfg.combo_mode = 0; + + if (camss->version == CAMSS_8x16) { + csiphy->ops = &csiphy_ops_2ph_1_0; + csiphy->formats = csiphy_formats_8x16; + csiphy->nformats = ARRAY_SIZE(csiphy_formats_8x16); + } else if (camss->version == CAMSS_8x96 || + camss->version == CAMSS_660) { + csiphy->ops = &csiphy_ops_3ph_1_0; + csiphy->formats = csiphy_formats_8x96; + csiphy->nformats = ARRAY_SIZE(csiphy_formats_8x96); + } else if (camss->version == CAMSS_845 || + camss->version == CAMSS_8250) { + csiphy->ops = &csiphy_ops_3ph_1_0; + csiphy->formats = csiphy_formats_sdm845; + csiphy->nformats = ARRAY_SIZE(csiphy_formats_sdm845); + } else { + return -EINVAL; + } + + /* Memory */ + + csiphy->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]); + if (IS_ERR(csiphy->base)) + return PTR_ERR(csiphy->base); + + if (camss->version == CAMSS_8x16 || + camss->version == CAMSS_8x96) { + csiphy->base_clk_mux = + devm_platform_ioremap_resource_byname(pdev, res->reg[1]); + if (IS_ERR(csiphy->base_clk_mux)) + return PTR_ERR(csiphy->base_clk_mux); + } else { + csiphy->base_clk_mux = NULL; + } + + /* Interrupt */ + + ret = platform_get_irq_byname(pdev, res->interrupt[0]); + if (ret < 0) + return ret; + + csiphy->irq = ret; + snprintf(csiphy->irq_name, sizeof(csiphy->irq_name), "%s_%s%d", + dev_name(dev), MSM_CSIPHY_NAME, csiphy->id); + + ret = devm_request_irq(dev, csiphy->irq, csiphy->ops->isr, + IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, + csiphy->irq_name, csiphy); + if (ret < 0) { + dev_err(dev, "request_irq failed: %d\n", ret); + return ret; + } + + /* Clocks */ + + csiphy->nclocks = 0; + while (res->clock[csiphy->nclocks]) + csiphy->nclocks++; + + csiphy->clock = devm_kcalloc(dev, + csiphy->nclocks, sizeof(*csiphy->clock), + GFP_KERNEL); + if (!csiphy->clock) + return -ENOMEM; + + csiphy->rate_set = devm_kcalloc(dev, + csiphy->nclocks, + sizeof(*csiphy->rate_set), + GFP_KERNEL); + if (!csiphy->rate_set) + return -ENOMEM; + + for (i = 0; i < csiphy->nclocks; i++) { + struct camss_clock *clock = &csiphy->clock[i]; + + clock->clk = devm_clk_get(dev, res->clock[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->name = res->clock[i]; + + clock->nfreqs = 0; + while (res->clock_rate[i][clock->nfreqs]) + clock->nfreqs++; + + if (!clock->nfreqs) { + clock->freq = NULL; + continue; + } + + clock->freq = devm_kcalloc(dev, + clock->nfreqs, + sizeof(*clock->freq), + GFP_KERNEL); + if (!clock->freq) + return -ENOMEM; + + for (j = 0; j < clock->nfreqs; j++) + clock->freq[j] = res->clock_rate[i][j]; + + if (!strcmp(clock->name, "csiphy0_timer") || + !strcmp(clock->name, "csiphy1_timer") || + !strcmp(clock->name, "csiphy2_timer") || + !strcmp(clock->name, "csiphy3_timer") || + !strcmp(clock->name, "csiphy4_timer") || + !strcmp(clock->name, "csiphy5_timer")) + csiphy->rate_set[i] = true; + + if (camss->version == CAMSS_660 && + (!strcmp(clock->name, "csi0_phy") || + !strcmp(clock->name, "csi1_phy") || + !strcmp(clock->name, "csi2_phy"))) + csiphy->rate_set[i] = true; + } + + return 0; +} + +/* + * csiphy_link_setup - Setup CSIPHY connections + * @entity: Pointer to media entity structure + * @local: Pointer to local pad + * @remote: Pointer to remote pad + * @flags: Link flags + * + * Rreturn 0 on success + */ +static int csiphy_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags) +{ + if ((local->flags & MEDIA_PAD_FL_SOURCE) && + (flags & MEDIA_LNK_FL_ENABLED)) { + struct v4l2_subdev *sd; + struct csiphy_device *csiphy; + struct csid_device *csid; + + if (media_pad_remote_pad_first(local)) + return -EBUSY; + + sd = media_entity_to_v4l2_subdev(entity); + csiphy = v4l2_get_subdevdata(sd); + + sd = media_entity_to_v4l2_subdev(remote->entity); + csid = v4l2_get_subdevdata(sd); + + csiphy->cfg.csid_id = csid->id; + } + + return 0; +} + +static const struct v4l2_subdev_core_ops csiphy_core_ops = { + .s_power = csiphy_set_power, +}; + +static const struct v4l2_subdev_video_ops csiphy_video_ops = { + .s_stream = csiphy_set_stream, +}; + +static const struct v4l2_subdev_pad_ops csiphy_pad_ops = { + .enum_mbus_code = csiphy_enum_mbus_code, + .enum_frame_size = csiphy_enum_frame_size, + .get_fmt = csiphy_get_format, + .set_fmt = csiphy_set_format, +}; + +static const struct v4l2_subdev_ops csiphy_v4l2_ops = { + .core = &csiphy_core_ops, + .video = &csiphy_video_ops, + .pad = &csiphy_pad_ops, +}; + +static const struct v4l2_subdev_internal_ops csiphy_v4l2_internal_ops = { + .open = csiphy_init_formats, +}; + +static const struct media_entity_operations csiphy_media_ops = { + .link_setup = csiphy_link_setup, + .link_validate = v4l2_subdev_link_validate, +}; + +/* + * msm_csiphy_register_entity - Register subdev node for CSIPHY module + * @csiphy: CSIPHY device + * @v4l2_dev: V4L2 device + * + * Return 0 on success or a negative error code otherwise + */ +int msm_csiphy_register_entity(struct csiphy_device *csiphy, + struct v4l2_device *v4l2_dev) +{ + struct v4l2_subdev *sd = &csiphy->subdev; + struct media_pad *pads = csiphy->pads; + struct device *dev = csiphy->camss->dev; + int ret; + + v4l2_subdev_init(sd, &csiphy_v4l2_ops); + sd->internal_ops = &csiphy_v4l2_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d", + MSM_CSIPHY_NAME, csiphy->id); + v4l2_set_subdevdata(sd, csiphy); + + ret = csiphy_init_formats(sd, NULL); + if (ret < 0) { + dev_err(dev, "Failed to init format: %d\n", ret); + return ret; + } + + pads[MSM_CSIPHY_PAD_SINK].flags = MEDIA_PAD_FL_SINK; + pads[MSM_CSIPHY_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE; + + sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; + sd->entity.ops = &csiphy_media_ops; + ret = media_entity_pads_init(&sd->entity, MSM_CSIPHY_PADS_NUM, pads); + if (ret < 0) { + dev_err(dev, "Failed to init media entity: %d\n", ret); + return ret; + } + + ret = v4l2_device_register_subdev(v4l2_dev, sd); + if (ret < 0) { + dev_err(dev, "Failed to register subdev: %d\n", ret); + media_entity_cleanup(&sd->entity); + } + + return ret; +} + +/* + * msm_csiphy_unregister_entity - Unregister CSIPHY module subdev node + * @csiphy: CSIPHY device + */ +void msm_csiphy_unregister_entity(struct csiphy_device *csiphy) +{ + v4l2_device_unregister_subdev(&csiphy->subdev); + media_entity_cleanup(&csiphy->subdev.entity); +} diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.h b/drivers/media/platform/qcom/camss/camss-csiphy.h new file mode 100644 index 0000000000..1c14947f92 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-csiphy.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * camss-csiphy.h + * + * Qualcomm MSM Camera Subsystem - CSIPHY Module + * + * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2016-2018 Linaro Ltd. + */ +#ifndef QC_MSM_CAMSS_CSIPHY_H +#define QC_MSM_CAMSS_CSIPHY_H + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <media/media-entity.h> +#include <media/v4l2-device.h> +#include <media/v4l2-mediabus.h> +#include <media/v4l2-subdev.h> + +#define MSM_CSIPHY_PAD_SINK 0 +#define MSM_CSIPHY_PAD_SRC 1 +#define MSM_CSIPHY_PADS_NUM 2 + +struct csiphy_lane { + u8 pos; + u8 pol; +}; + +struct csiphy_lanes_cfg { + int num_data; + struct csiphy_lane *data; + struct csiphy_lane clk; +}; + +struct csiphy_csi2_cfg { + struct csiphy_lanes_cfg lane_cfg; +}; + +struct csiphy_config { + u8 combo_mode; + u8 csid_id; + struct csiphy_csi2_cfg *csi2; +}; + +struct csiphy_device; + +struct csiphy_hw_ops { + /* + * csiphy_get_lane_mask - Calculate CSI2 lane mask configuration parameter + * @lane_cfg - CSI2 lane configuration + * + * Return lane mask + */ + u8 (*get_lane_mask)(struct csiphy_lanes_cfg *lane_cfg); + void (*hw_version_read)(struct csiphy_device *csiphy, + struct device *dev); + void (*reset)(struct csiphy_device *csiphy); + void (*lanes_enable)(struct csiphy_device *csiphy, + struct csiphy_config *cfg, + s64 link_freq, u8 lane_mask); + void (*lanes_disable)(struct csiphy_device *csiphy, + struct csiphy_config *cfg); + irqreturn_t (*isr)(int irq, void *dev); +}; + +struct csiphy_device { + struct camss *camss; + u8 id; + struct v4l2_subdev subdev; + struct media_pad pads[MSM_CSIPHY_PADS_NUM]; + void __iomem *base; + void __iomem *base_clk_mux; + u32 irq; + char irq_name[30]; + struct camss_clock *clock; + bool *rate_set; + int nclocks; + u32 timer_clk_rate; + struct csiphy_config cfg; + struct v4l2_mbus_framefmt fmt[MSM_CSIPHY_PADS_NUM]; + const struct csiphy_hw_ops *ops; + const struct csiphy_format *formats; + unsigned int nformats; +}; + +struct resources; + +int msm_csiphy_subdev_init(struct camss *camss, + struct csiphy_device *csiphy, + const struct resources *res, u8 id); + +int msm_csiphy_register_entity(struct csiphy_device *csiphy, + struct v4l2_device *v4l2_dev); + +void msm_csiphy_unregister_entity(struct csiphy_device *csiphy); + +extern const struct csiphy_hw_ops csiphy_ops_2ph_1_0; +extern const struct csiphy_hw_ops csiphy_ops_3ph_1_0; + +#endif /* QC_MSM_CAMSS_CSIPHY_H */ diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c new file mode 100644 index 0000000000..b713f5b86a --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-ispif.c @@ -0,0 +1,1453 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-ispif.c + * + * Qualcomm MSM Camera Subsystem - ISPIF (ISP Interface) Module + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2018 Linaro Ltd. + */ +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <media/media-entity.h> +#include <media/v4l2-device.h> +#include <media/v4l2-subdev.h> + +#include "camss-ispif.h" +#include "camss.h" + +#define MSM_ISPIF_NAME "msm_ispif" + +#define ISPIF_RST_CMD_0 0x008 +#define ISPIF_RST_CMD_1 0x00c +#define ISPIF_RST_CMD_0_STROBED_RST_EN (1 << 0) +#define ISPIF_RST_CMD_0_MISC_LOGIC_RST (1 << 1) +#define ISPIF_RST_CMD_0_SW_REG_RST (1 << 2) +#define ISPIF_RST_CMD_0_PIX_INTF_0_CSID_RST (1 << 3) +#define ISPIF_RST_CMD_0_PIX_INTF_0_VFE_RST (1 << 4) +#define ISPIF_RST_CMD_0_PIX_INTF_1_CSID_RST (1 << 5) +#define ISPIF_RST_CMD_0_PIX_INTF_1_VFE_RST (1 << 6) +#define ISPIF_RST_CMD_0_RDI_INTF_0_CSID_RST (1 << 7) +#define ISPIF_RST_CMD_0_RDI_INTF_0_VFE_RST (1 << 8) +#define ISPIF_RST_CMD_0_RDI_INTF_1_CSID_RST (1 << 9) +#define ISPIF_RST_CMD_0_RDI_INTF_1_VFE_RST (1 << 10) +#define ISPIF_RST_CMD_0_RDI_INTF_2_CSID_RST (1 << 11) +#define ISPIF_RST_CMD_0_RDI_INTF_2_VFE_RST (1 << 12) +#define ISPIF_RST_CMD_0_PIX_OUTPUT_0_MISR_RST (1 << 16) +#define ISPIF_RST_CMD_0_RDI_OUTPUT_0_MISR_RST (1 << 17) +#define ISPIF_RST_CMD_0_RDI_OUTPUT_1_MISR_RST (1 << 18) +#define ISPIF_RST_CMD_0_RDI_OUTPUT_2_MISR_RST (1 << 19) +#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x01c +#define ISPIF_VFE_m_CTRL_0(m) (0x200 + 0x200 * (m)) +#define ISPIF_VFE_m_CTRL_0_PIX0_LINE_BUF_EN (1 << 6) +#define ISPIF_VFE_m_IRQ_MASK_0(m) (0x208 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_MASK_0_PIX0_ENABLE 0x00001249 +#define ISPIF_VFE_m_IRQ_MASK_0_PIX0_MASK 0x00001fff +#define ISPIF_VFE_m_IRQ_MASK_0_RDI0_ENABLE 0x02492000 +#define ISPIF_VFE_m_IRQ_MASK_0_RDI0_MASK 0x03ffe000 +#define ISPIF_VFE_m_IRQ_MASK_1(m) (0x20c + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_MASK_1_PIX1_ENABLE 0x00001249 +#define ISPIF_VFE_m_IRQ_MASK_1_PIX1_MASK 0x00001fff +#define ISPIF_VFE_m_IRQ_MASK_1_RDI1_ENABLE 0x02492000 +#define ISPIF_VFE_m_IRQ_MASK_1_RDI1_MASK 0x03ffe000 +#define ISPIF_VFE_m_IRQ_MASK_2(m) (0x210 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_MASK_2_RDI2_ENABLE 0x00001249 +#define ISPIF_VFE_m_IRQ_MASK_2_RDI2_MASK 0x00001fff +#define ISPIF_VFE_m_IRQ_STATUS_0(m) (0x21c + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW (1 << 12) +#define ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW (1 << 25) +#define ISPIF_VFE_m_IRQ_STATUS_1(m) (0x220 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW (1 << 12) +#define ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW (1 << 25) +#define ISPIF_VFE_m_IRQ_STATUS_2(m) (0x224 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW (1 << 12) +#define ISPIF_VFE_m_IRQ_CLEAR_0(m) (0x230 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_CLEAR_1(m) (0x234 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_CLEAR_2(m) (0x238 + 0x200 * (m)) +#define ISPIF_VFE_m_INTF_INPUT_SEL(m) (0x244 + 0x200 * (m)) +#define ISPIF_VFE_m_INTF_CMD_0(m) (0x248 + 0x200 * (m)) +#define ISPIF_VFE_m_INTF_CMD_1(m) (0x24c + 0x200 * (m)) +#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) \ + (0x254 + 0x200 * (m) + 0x4 * (n)) +#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) \ + (0x264 + 0x200 * (m) + 0x4 * (n)) +/* PACK_CFG registers are 8x96 only */ +#define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(m, n) \ + (0x270 + 0x200 * (m) + 0x4 * (n)) +#define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(m, n) \ + (0x27c + 0x200 * (m) + 0x4 * (n)) +#define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0_CID_c_PLAIN(c) \ + (1 << ((cid % 8) * 4)) +#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) \ + (0x2c0 + 0x200 * (m) + 0x4 * (n)) +#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) \ + (0x2d0 + 0x200 * (m) + 0x4 * (n)) + +#define CSI_PIX_CLK_MUX_SEL 0x000 +#define CSI_RDI_CLK_MUX_SEL 0x008 + +#define ISPIF_TIMEOUT_SLEEP_US 1000 +#define ISPIF_TIMEOUT_ALL_US 1000000 +#define ISPIF_RESET_TIMEOUT_MS 500 + +enum ispif_intf_cmd { + CMD_DISABLE_FRAME_BOUNDARY = 0x0, + CMD_ENABLE_FRAME_BOUNDARY = 0x1, + CMD_DISABLE_IMMEDIATELY = 0x2, + CMD_ALL_DISABLE_IMMEDIATELY = 0xaaaaaaaa, + CMD_ALL_NO_CHANGE = 0xffffffff, +}; + +static const u32 ispif_formats_8x16[] = { + MEDIA_BUS_FMT_UYVY8_2X8, + MEDIA_BUS_FMT_VYUY8_2X8, + MEDIA_BUS_FMT_YUYV8_2X8, + MEDIA_BUS_FMT_YVYU8_2X8, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_Y10_1X10, +}; + +static const u32 ispif_formats_8x96[] = { + MEDIA_BUS_FMT_UYVY8_2X8, + MEDIA_BUS_FMT_VYUY8_2X8, + MEDIA_BUS_FMT_YUYV8_2X8, + MEDIA_BUS_FMT_YVYU8_2X8, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, + MEDIA_BUS_FMT_SBGGR14_1X14, + MEDIA_BUS_FMT_SGBRG14_1X14, + MEDIA_BUS_FMT_SGRBG14_1X14, + MEDIA_BUS_FMT_SRGGB14_1X14, + MEDIA_BUS_FMT_Y10_1X10, + MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, +}; + +/* + * ispif_isr_8x96 - ISPIF module interrupt handler for 8x96 + * @irq: Interrupt line + * @dev: ISPIF device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t ispif_isr_8x96(int irq, void *dev) +{ + struct ispif_device *ispif = dev; + struct camss *camss = ispif->camss; + u32 value0, value1, value2, value3, value4, value5; + + value0 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(0)); + value1 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(0)); + value2 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(0)); + value3 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(1)); + value4 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(1)); + value5 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(1)); + + writel_relaxed(value0, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(0)); + writel_relaxed(value1, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(0)); + writel_relaxed(value2, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(0)); + writel_relaxed(value3, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(1)); + writel_relaxed(value4, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(1)); + writel_relaxed(value5, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(1)); + + writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD); + + if ((value0 >> 27) & 0x1) + complete(&ispif->reset_complete[0]); + + if ((value3 >> 27) & 0x1) + complete(&ispif->reset_complete[1]); + + if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW)) + dev_err_ratelimited(camss->dev, "VFE0 pix0 overflow\n"); + + if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW)) + dev_err_ratelimited(camss->dev, "VFE0 rdi0 overflow\n"); + + if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW)) + dev_err_ratelimited(camss->dev, "VFE0 pix1 overflow\n"); + + if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW)) + dev_err_ratelimited(camss->dev, "VFE0 rdi1 overflow\n"); + + if (unlikely(value2 & ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW)) + dev_err_ratelimited(camss->dev, "VFE0 rdi2 overflow\n"); + + if (unlikely(value3 & ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW)) + dev_err_ratelimited(camss->dev, "VFE1 pix0 overflow\n"); + + if (unlikely(value3 & ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW)) + dev_err_ratelimited(camss->dev, "VFE1 rdi0 overflow\n"); + + if (unlikely(value4 & ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW)) + dev_err_ratelimited(camss->dev, "VFE1 pix1 overflow\n"); + + if (unlikely(value4 & ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW)) + dev_err_ratelimited(camss->dev, "VFE1 rdi1 overflow\n"); + + if (unlikely(value5 & ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW)) + dev_err_ratelimited(camss->dev, "VFE1 rdi2 overflow\n"); + + return IRQ_HANDLED; +} + +/* + * ispif_isr_8x16 - ISPIF module interrupt handler for 8x16 + * @irq: Interrupt line + * @dev: ISPIF device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t ispif_isr_8x16(int irq, void *dev) +{ + struct ispif_device *ispif = dev; + struct camss *camss = ispif->camss; + u32 value0, value1, value2; + + value0 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(0)); + value1 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(0)); + value2 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(0)); + + writel_relaxed(value0, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(0)); + writel_relaxed(value1, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(0)); + writel_relaxed(value2, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(0)); + + writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD); + + if ((value0 >> 27) & 0x1) + complete(&ispif->reset_complete[0]); + + if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW)) + dev_err_ratelimited(camss->dev, "VFE0 pix0 overflow\n"); + + if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW)) + dev_err_ratelimited(camss->dev, "VFE0 rdi0 overflow\n"); + + if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW)) + dev_err_ratelimited(camss->dev, "VFE0 pix1 overflow\n"); + + if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW)) + dev_err_ratelimited(camss->dev, "VFE0 rdi1 overflow\n"); + + if (unlikely(value2 & ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW)) + dev_err_ratelimited(camss->dev, "VFE0 rdi2 overflow\n"); + + return IRQ_HANDLED; +} + +static int ispif_vfe_reset(struct ispif_device *ispif, u8 vfe_id) +{ + struct camss *camss = ispif->camss; + + unsigned long time; + u32 val; + + if (vfe_id > (camss->vfe_num - 1)) { + dev_err(camss->dev, + "Error: asked reset for invalid VFE%d\n", vfe_id); + return -ENOENT; + } + + reinit_completion(&ispif->reset_complete[vfe_id]); + + val = ISPIF_RST_CMD_0_STROBED_RST_EN | + ISPIF_RST_CMD_0_MISC_LOGIC_RST | + ISPIF_RST_CMD_0_SW_REG_RST | + ISPIF_RST_CMD_0_PIX_INTF_0_CSID_RST | + ISPIF_RST_CMD_0_PIX_INTF_0_VFE_RST | + ISPIF_RST_CMD_0_PIX_INTF_1_CSID_RST | + ISPIF_RST_CMD_0_PIX_INTF_1_VFE_RST | + ISPIF_RST_CMD_0_RDI_INTF_0_CSID_RST | + ISPIF_RST_CMD_0_RDI_INTF_0_VFE_RST | + ISPIF_RST_CMD_0_RDI_INTF_1_CSID_RST | + ISPIF_RST_CMD_0_RDI_INTF_1_VFE_RST | + ISPIF_RST_CMD_0_RDI_INTF_2_CSID_RST | + ISPIF_RST_CMD_0_RDI_INTF_2_VFE_RST | + ISPIF_RST_CMD_0_PIX_OUTPUT_0_MISR_RST | + ISPIF_RST_CMD_0_RDI_OUTPUT_0_MISR_RST | + ISPIF_RST_CMD_0_RDI_OUTPUT_1_MISR_RST | + ISPIF_RST_CMD_0_RDI_OUTPUT_2_MISR_RST; + + if (vfe_id == 1) + writel_relaxed(val, ispif->base + ISPIF_RST_CMD_1); + else + writel_relaxed(val, ispif->base + ISPIF_RST_CMD_0); + + time = wait_for_completion_timeout(&ispif->reset_complete[vfe_id], + msecs_to_jiffies(ISPIF_RESET_TIMEOUT_MS)); + if (!time) { + dev_err(camss->dev, + "ISPIF for VFE%d reset timeout\n", vfe_id); + return -EIO; + } + + return 0; +} + +/* + * ispif_reset - Trigger reset on ISPIF module and wait to complete + * @ispif: ISPIF device + * + * Return 0 on success or a negative error code otherwise + */ +static int ispif_reset(struct ispif_device *ispif, u8 vfe_id) +{ + struct camss *camss = ispif->camss; + int ret; + + ret = camss_pm_domain_on(camss, PM_DOMAIN_VFE0); + if (ret < 0) + return ret; + + ret = camss_pm_domain_on(camss, PM_DOMAIN_VFE1); + if (ret < 0) + return ret; + + ret = camss_enable_clocks(ispif->nclocks_for_reset, + ispif->clock_for_reset, + camss->dev); + if (ret < 0) + return ret; + + ret = ispif_vfe_reset(ispif, vfe_id); + if (ret) + dev_dbg(camss->dev, "ISPIF Reset failed\n"); + + camss_disable_clocks(ispif->nclocks_for_reset, ispif->clock_for_reset); + + camss_pm_domain_off(camss, PM_DOMAIN_VFE0); + camss_pm_domain_off(camss, PM_DOMAIN_VFE1); + + return ret; +} + +/* + * ispif_set_power - Power on/off ISPIF module + * @sd: ISPIF V4L2 subdevice + * @on: Requested power state + * + * Return 0 on success or a negative error code otherwise + */ +static int ispif_set_power(struct v4l2_subdev *sd, int on) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct ispif_device *ispif = line->ispif; + struct device *dev = ispif->camss->dev; + int ret = 0; + + mutex_lock(&ispif->power_lock); + + if (on) { + if (ispif->power_count) { + /* Power is already on */ + ispif->power_count++; + goto exit; + } + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + goto exit; + + ret = camss_enable_clocks(ispif->nclocks, ispif->clock, dev); + if (ret < 0) { + pm_runtime_put_sync(dev); + goto exit; + } + + ret = ispif_reset(ispif, line->vfe_id); + if (ret < 0) { + pm_runtime_put_sync(dev); + camss_disable_clocks(ispif->nclocks, ispif->clock); + goto exit; + } + + ispif->intf_cmd[line->vfe_id].cmd_0 = CMD_ALL_NO_CHANGE; + ispif->intf_cmd[line->vfe_id].cmd_1 = CMD_ALL_NO_CHANGE; + + ispif->power_count++; + } else { + if (ispif->power_count == 0) { + dev_err(dev, "ispif power off on power_count == 0\n"); + goto exit; + } else if (ispif->power_count == 1) { + camss_disable_clocks(ispif->nclocks, ispif->clock); + pm_runtime_put_sync(dev); + } + + ispif->power_count--; + } + +exit: + mutex_unlock(&ispif->power_lock); + + return ret; +} + +/* + * ispif_select_clk_mux - Select clock for PIX/RDI interface + * @ispif: ISPIF device + * @intf: VFE interface + * @csid: CSID HW module id + * @vfe: VFE HW module id + * @enable: enable or disable the selected clock + */ +static void ispif_select_clk_mux(struct ispif_device *ispif, + enum ispif_intf intf, u8 csid, + u8 vfe, u8 enable) +{ + u32 val; + + switch (intf) { + case PIX0: + val = readl_relaxed(ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL); + val &= ~(0xf << (vfe * 8)); + if (enable) + val |= (csid << (vfe * 8)); + writel_relaxed(val, ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL); + break; + + case RDI0: + val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + val &= ~(0xf << (vfe * 12)); + if (enable) + val |= (csid << (vfe * 12)); + writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + break; + + case PIX1: + val = readl_relaxed(ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL); + val &= ~(0xf << (4 + (vfe * 8))); + if (enable) + val |= (csid << (4 + (vfe * 8))); + writel_relaxed(val, ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL); + break; + + case RDI1: + val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + val &= ~(0xf << (4 + (vfe * 12))); + if (enable) + val |= (csid << (4 + (vfe * 12))); + writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + break; + + case RDI2: + val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + val &= ~(0xf << (8 + (vfe * 12))); + if (enable) + val |= (csid << (8 + (vfe * 12))); + writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + break; + } + + mb(); +} + +/* + * ispif_validate_intf_status - Validate current status of PIX/RDI interface + * @ispif: ISPIF device + * @intf: VFE interface + * @vfe: VFE HW module id + * + * Return 0 when interface is idle or -EBUSY otherwise + */ +static int ispif_validate_intf_status(struct ispif_device *ispif, + enum ispif_intf intf, u8 vfe) +{ + int ret = 0; + u32 val = 0; + + switch (intf) { + case PIX0: + val = readl_relaxed(ispif->base + + ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 0)); + break; + case RDI0: + val = readl_relaxed(ispif->base + + ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 0)); + break; + case PIX1: + val = readl_relaxed(ispif->base + + ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 1)); + break; + case RDI1: + val = readl_relaxed(ispif->base + + ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 1)); + break; + case RDI2: + val = readl_relaxed(ispif->base + + ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 2)); + break; + } + + if ((val & 0xf) != 0xf) { + dev_err(ispif->camss->dev, "%s: ispif is busy: 0x%x\n", + __func__, val); + ret = -EBUSY; + } + + return ret; +} + +/* + * ispif_wait_for_stop - Wait for PIX/RDI interface to stop + * @ispif: ISPIF device + * @intf: VFE interface + * @vfe: VFE HW module id + * + * Return 0 on success or a negative error code otherwise + */ +static int ispif_wait_for_stop(struct ispif_device *ispif, + enum ispif_intf intf, u8 vfe) +{ + u32 addr = 0; + u32 stop_flag = 0; + int ret; + + switch (intf) { + case PIX0: + addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 0); + break; + case RDI0: + addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 0); + break; + case PIX1: + addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 1); + break; + case RDI1: + addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 1); + break; + case RDI2: + addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 2); + break; + } + + ret = readl_poll_timeout(ispif->base + addr, + stop_flag, + (stop_flag & 0xf) == 0xf, + ISPIF_TIMEOUT_SLEEP_US, + ISPIF_TIMEOUT_ALL_US); + if (ret < 0) + dev_err(ispif->camss->dev, "%s: ispif stop timeout\n", + __func__); + + return ret; +} + +/* + * ispif_select_csid - Select CSID HW module for input from + * @ispif: ISPIF device + * @intf: VFE interface + * @csid: CSID HW module id + * @vfe: VFE HW module id + * @enable: enable or disable the selected input + */ +static void ispif_select_csid(struct ispif_device *ispif, enum ispif_intf intf, + u8 csid, u8 vfe, u8 enable) +{ + u32 val; + + val = readl_relaxed(ispif->base + ISPIF_VFE_m_INTF_INPUT_SEL(vfe)); + switch (intf) { + case PIX0: + val &= ~(BIT(1) | BIT(0)); + if (enable) + val |= csid; + break; + case RDI0: + val &= ~(BIT(5) | BIT(4)); + if (enable) + val |= (csid << 4); + break; + case PIX1: + val &= ~(BIT(9) | BIT(8)); + if (enable) + val |= (csid << 8); + break; + case RDI1: + val &= ~(BIT(13) | BIT(12)); + if (enable) + val |= (csid << 12); + break; + case RDI2: + val &= ~(BIT(21) | BIT(20)); + if (enable) + val |= (csid << 20); + break; + } + + writel(val, ispif->base + ISPIF_VFE_m_INTF_INPUT_SEL(vfe)); +} + +/* + * ispif_select_cid - Enable/disable desired CID + * @ispif: ISPIF device + * @intf: VFE interface + * @cid: desired CID to enable/disable + * @vfe: VFE HW module id + * @enable: enable or disable the desired CID + */ +static void ispif_select_cid(struct ispif_device *ispif, enum ispif_intf intf, + u8 cid, u8 vfe, u8 enable) +{ + u32 cid_mask = 1 << cid; + u32 addr = 0; + u32 val; + + switch (intf) { + case PIX0: + addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe, 0); + break; + case RDI0: + addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 0); + break; + case PIX1: + addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe, 1); + break; + case RDI1: + addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 1); + break; + case RDI2: + addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 2); + break; + } + + val = readl_relaxed(ispif->base + addr); + if (enable) + val |= cid_mask; + else + val &= ~cid_mask; + + writel(val, ispif->base + addr); +} + +/* + * ispif_config_irq - Enable/disable interrupts for PIX/RDI interface + * @ispif: ISPIF device + * @intf: VFE interface + * @vfe: VFE HW module id + * @enable: enable or disable + */ +static void ispif_config_irq(struct ispif_device *ispif, enum ispif_intf intf, + u8 vfe, u8 enable) +{ + u32 val; + + switch (intf) { + case PIX0: + val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe)); + val &= ~ISPIF_VFE_m_IRQ_MASK_0_PIX0_MASK; + if (enable) + val |= ISPIF_VFE_m_IRQ_MASK_0_PIX0_ENABLE; + writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe)); + writel_relaxed(ISPIF_VFE_m_IRQ_MASK_0_PIX0_ENABLE, + ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(vfe)); + break; + case RDI0: + val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe)); + val &= ~ISPIF_VFE_m_IRQ_MASK_0_RDI0_MASK; + if (enable) + val |= ISPIF_VFE_m_IRQ_MASK_0_RDI0_ENABLE; + writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe)); + writel_relaxed(ISPIF_VFE_m_IRQ_MASK_0_RDI0_ENABLE, + ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(vfe)); + break; + case PIX1: + val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe)); + val &= ~ISPIF_VFE_m_IRQ_MASK_1_PIX1_MASK; + if (enable) + val |= ISPIF_VFE_m_IRQ_MASK_1_PIX1_ENABLE; + writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe)); + writel_relaxed(ISPIF_VFE_m_IRQ_MASK_1_PIX1_ENABLE, + ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(vfe)); + break; + case RDI1: + val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe)); + val &= ~ISPIF_VFE_m_IRQ_MASK_1_RDI1_MASK; + if (enable) + val |= ISPIF_VFE_m_IRQ_MASK_1_RDI1_ENABLE; + writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe)); + writel_relaxed(ISPIF_VFE_m_IRQ_MASK_1_RDI1_ENABLE, + ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(vfe)); + break; + case RDI2: + val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe)); + val &= ~ISPIF_VFE_m_IRQ_MASK_2_RDI2_MASK; + if (enable) + val |= ISPIF_VFE_m_IRQ_MASK_2_RDI2_ENABLE; + writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe)); + writel_relaxed(ISPIF_VFE_m_IRQ_MASK_2_RDI2_ENABLE, + ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(vfe)); + break; + } + + writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD); +} + +/* + * ispif_config_pack - Config packing for PRDI mode + * @ispif: ISPIF device + * @code: media bus format code + * @intf: VFE interface + * @cid: desired CID to handle + * @vfe: VFE HW module id + * @enable: enable or disable + */ +static void ispif_config_pack(struct ispif_device *ispif, u32 code, + enum ispif_intf intf, u8 cid, u8 vfe, u8 enable) +{ + u32 addr, val; + + if (code != MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE && + code != MEDIA_BUS_FMT_Y10_2X8_PADHI_LE) + return; + + switch (intf) { + case RDI0: + if (cid < 8) + addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(vfe, 0); + else + addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(vfe, 0); + break; + case RDI1: + if (cid < 8) + addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(vfe, 1); + else + addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(vfe, 1); + break; + case RDI2: + if (cid < 8) + addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(vfe, 2); + else + addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(vfe, 2); + break; + default: + return; + } + + if (enable) + val = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0_CID_c_PLAIN(cid); + else + val = 0; + + writel_relaxed(val, ispif->base + addr); +} + +/* + * ispif_set_intf_cmd - Set command to enable/disable interface + * @ispif: ISPIF device + * @cmd: interface command + * @intf: VFE interface + * @vfe: VFE HW module id + * @vc: virtual channel + */ +static void ispif_set_intf_cmd(struct ispif_device *ispif, u8 cmd, + enum ispif_intf intf, u8 vfe, u8 vc) +{ + u32 *val; + + if (intf == RDI2) { + val = &ispif->intf_cmd[vfe].cmd_1; + *val &= ~(0x3 << (vc * 2 + 8)); + *val |= (cmd << (vc * 2 + 8)); + wmb(); + writel_relaxed(*val, ispif->base + ISPIF_VFE_m_INTF_CMD_1(vfe)); + wmb(); + } else { + val = &ispif->intf_cmd[vfe].cmd_0; + *val &= ~(0x3 << (vc * 2 + intf * 8)); + *val |= (cmd << (vc * 2 + intf * 8)); + wmb(); + writel_relaxed(*val, ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe)); + wmb(); + } +} + +/* + * ispif_set_stream - Enable/disable streaming on ISPIF module + * @sd: ISPIF V4L2 subdevice + * @enable: Requested streaming state + * + * Main configuration of ISPIF module is also done here. + * + * Return 0 on success or a negative error code otherwise + */ +static int ispif_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct ispif_device *ispif = line->ispif; + struct camss *camss = ispif->camss; + enum ispif_intf intf = line->interface; + u8 csid = line->csid_id; + u8 vfe = line->vfe_id; + u8 vc = 0; /* Virtual Channel 0 */ + u8 cid = vc * 4; /* id of Virtual Channel and Data Type set */ + int ret; + + if (enable) { + if (!media_pad_remote_pad_first(&line->pads[MSM_ISPIF_PAD_SINK])) + return -ENOLINK; + + /* Config */ + + mutex_lock(&ispif->config_lock); + ispif_select_clk_mux(ispif, intf, csid, vfe, 1); + + ret = ispif_validate_intf_status(ispif, intf, vfe); + if (ret < 0) { + mutex_unlock(&ispif->config_lock); + return ret; + } + + ispif_select_csid(ispif, intf, csid, vfe, 1); + ispif_select_cid(ispif, intf, cid, vfe, 1); + ispif_config_irq(ispif, intf, vfe, 1); + if (camss->version == CAMSS_8x96 || + camss->version == CAMSS_660) + ispif_config_pack(ispif, + line->fmt[MSM_ISPIF_PAD_SINK].code, + intf, cid, vfe, 1); + ispif_set_intf_cmd(ispif, CMD_ENABLE_FRAME_BOUNDARY, + intf, vfe, vc); + } else { + mutex_lock(&ispif->config_lock); + ispif_set_intf_cmd(ispif, CMD_DISABLE_FRAME_BOUNDARY, + intf, vfe, vc); + mutex_unlock(&ispif->config_lock); + + ret = ispif_wait_for_stop(ispif, intf, vfe); + if (ret < 0) + return ret; + + mutex_lock(&ispif->config_lock); + if (camss->version == CAMSS_8x96 || + camss->version == CAMSS_660) + ispif_config_pack(ispif, + line->fmt[MSM_ISPIF_PAD_SINK].code, + intf, cid, vfe, 0); + ispif_config_irq(ispif, intf, vfe, 0); + ispif_select_cid(ispif, intf, cid, vfe, 0); + ispif_select_csid(ispif, intf, csid, vfe, 0); + ispif_select_clk_mux(ispif, intf, csid, vfe, 0); + } + + mutex_unlock(&ispif->config_lock); + + return 0; +} + +/* + * __ispif_get_format - Get pointer to format structure + * @ispif: ISPIF line + * @cfg: V4L2 subdev pad configuration + * @pad: pad from which format is requested + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE format structure + */ +static struct v4l2_mbus_framefmt * +__ispif_get_format(struct ispif_line *line, + struct v4l2_subdev_state *sd_state, + unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(&line->subdev, sd_state, + pad); + + return &line->fmt[pad]; +} + +/* + * ispif_try_format - Handle try format by pad subdev method + * @ispif: ISPIF line + * @cfg: V4L2 subdev pad configuration + * @pad: pad on which format is requested + * @fmt: pointer to v4l2 format structure + * @which: wanted subdev format + */ +static void ispif_try_format(struct ispif_line *line, + struct v4l2_subdev_state *sd_state, + unsigned int pad, + struct v4l2_mbus_framefmt *fmt, + enum v4l2_subdev_format_whence which) +{ + unsigned int i; + + switch (pad) { + case MSM_ISPIF_PAD_SINK: + /* Set format on sink pad */ + + for (i = 0; i < line->nformats; i++) + if (fmt->code == line->formats[i]) + break; + + /* If not found, use UYVY as default */ + if (i >= line->nformats) + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + + fmt->width = clamp_t(u32, fmt->width, 1, 8191); + fmt->height = clamp_t(u32, fmt->height, 1, 8191); + + fmt->field = V4L2_FIELD_NONE; + fmt->colorspace = V4L2_COLORSPACE_SRGB; + + break; + + case MSM_ISPIF_PAD_SRC: + /* Set and return a format same as sink pad */ + + *fmt = *__ispif_get_format(line, sd_state, MSM_ISPIF_PAD_SINK, + which); + + break; + } + + fmt->colorspace = V4L2_COLORSPACE_SRGB; +} + +/* + * ispif_enum_mbus_code - Handle pixel format enumeration + * @sd: ISPIF V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @code: pointer to v4l2_subdev_mbus_code_enum structure + * return -EINVAL or zero on success + */ +static int ispif_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + if (code->pad == MSM_ISPIF_PAD_SINK) { + if (code->index >= line->nformats) + return -EINVAL; + + code->code = line->formats[code->index]; + } else { + if (code->index > 0) + return -EINVAL; + + format = __ispif_get_format(line, sd_state, + MSM_ISPIF_PAD_SINK, + code->which); + + code->code = format->code; + } + + return 0; +} + +/* + * ispif_enum_frame_size - Handle frame size enumeration + * @sd: ISPIF V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fse: pointer to v4l2_subdev_frame_size_enum structure + * return -EINVAL or zero on success + */ +static int ispif_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt format; + + if (fse->index != 0) + return -EINVAL; + + format.code = fse->code; + format.width = 1; + format.height = 1; + ispif_try_format(line, sd_state, fse->pad, &format, fse->which); + fse->min_width = format.width; + fse->min_height = format.height; + + if (format.code != fse->code) + return -EINVAL; + + format.code = fse->code; + format.width = -1; + format.height = -1; + ispif_try_format(line, sd_state, fse->pad, &format, fse->which); + fse->max_width = format.width; + fse->max_height = format.height; + + return 0; +} + +/* + * ispif_get_format - Handle get format by pads subdev method + * @sd: ISPIF V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int ispif_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *fmt) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __ispif_get_format(line, sd_state, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + fmt->format = *format; + + return 0; +} + +/* + * ispif_set_format - Handle set format by pads subdev method + * @sd: ISPIF V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int ispif_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *fmt) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __ispif_get_format(line, sd_state, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + ispif_try_format(line, sd_state, fmt->pad, &fmt->format, fmt->which); + *format = fmt->format; + + /* Propagate the format from sink to source */ + if (fmt->pad == MSM_ISPIF_PAD_SINK) { + format = __ispif_get_format(line, sd_state, MSM_ISPIF_PAD_SRC, + fmt->which); + + *format = fmt->format; + ispif_try_format(line, sd_state, MSM_ISPIF_PAD_SRC, format, + fmt->which); + } + + return 0; +} + +/* + * ispif_init_formats - Initialize formats on all pads + * @sd: ISPIF V4L2 subdevice + * @fh: V4L2 subdev file handle + * + * Initialize all pad formats with default values. + * + * Return 0 on success or a negative error code otherwise + */ +static int ispif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_subdev_format format = { + .pad = MSM_ISPIF_PAD_SINK, + .which = fh ? V4L2_SUBDEV_FORMAT_TRY : + V4L2_SUBDEV_FORMAT_ACTIVE, + .format = { + .code = MEDIA_BUS_FMT_UYVY8_2X8, + .width = 1920, + .height = 1080 + } + }; + + return ispif_set_format(sd, fh ? fh->state : NULL, &format); +} + +/* + * msm_ispif_subdev_init - Initialize ISPIF device structure and resources + * @ispif: ISPIF device + * @res: ISPIF module resources table + * + * Return 0 on success or a negative error code otherwise + */ +int msm_ispif_subdev_init(struct camss *camss, + const struct resources_ispif *res) +{ + struct device *dev = camss->dev; + struct ispif_device *ispif = camss->ispif; + struct platform_device *pdev = to_platform_device(dev); + int i; + int ret; + + if (!camss->ispif) + return 0; + + ispif->camss = camss; + + /* Number of ISPIF lines - same as number of CSID hardware modules */ + if (camss->version == CAMSS_8x16) + ispif->line_num = 2; + else if (camss->version == CAMSS_8x96 || + camss->version == CAMSS_660) + ispif->line_num = 4; + else + return -EINVAL; + + ispif->line = devm_kcalloc(dev, ispif->line_num, + sizeof(*ispif->line), GFP_KERNEL); + if (!ispif->line) + return -ENOMEM; + + for (i = 0; i < ispif->line_num; i++) { + ispif->line[i].ispif = ispif; + ispif->line[i].id = i; + + if (camss->version == CAMSS_8x16) { + ispif->line[i].formats = ispif_formats_8x16; + ispif->line[i].nformats = + ARRAY_SIZE(ispif_formats_8x16); + } else if (camss->version == CAMSS_8x96 || + camss->version == CAMSS_660) { + ispif->line[i].formats = ispif_formats_8x96; + ispif->line[i].nformats = + ARRAY_SIZE(ispif_formats_8x96); + } else { + return -EINVAL; + } + } + + /* Memory */ + + ispif->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]); + if (IS_ERR(ispif->base)) + return PTR_ERR(ispif->base); + + ispif->base_clk_mux = devm_platform_ioremap_resource_byname(pdev, res->reg[1]); + if (IS_ERR(ispif->base_clk_mux)) + return PTR_ERR(ispif->base_clk_mux); + + /* Interrupt */ + + ret = platform_get_irq_byname(pdev, res->interrupt); + if (ret < 0) + return ret; + + ispif->irq = ret; + snprintf(ispif->irq_name, sizeof(ispif->irq_name), "%s_%s", + dev_name(dev), MSM_ISPIF_NAME); + if (camss->version == CAMSS_8x16) + ret = devm_request_irq(dev, ispif->irq, ispif_isr_8x16, + IRQF_TRIGGER_RISING, ispif->irq_name, ispif); + else if (camss->version == CAMSS_8x96 || + camss->version == CAMSS_660) + ret = devm_request_irq(dev, ispif->irq, ispif_isr_8x96, + IRQF_TRIGGER_RISING, ispif->irq_name, ispif); + else + ret = -EINVAL; + + if (ret < 0) { + dev_err(dev, "request_irq failed: %d\n", ret); + return ret; + } + + /* Clocks */ + + ispif->nclocks = 0; + while (res->clock[ispif->nclocks]) + ispif->nclocks++; + + ispif->clock = devm_kcalloc(dev, + ispif->nclocks, sizeof(*ispif->clock), + GFP_KERNEL); + if (!ispif->clock) + return -ENOMEM; + + for (i = 0; i < ispif->nclocks; i++) { + struct camss_clock *clock = &ispif->clock[i]; + + clock->clk = devm_clk_get(dev, res->clock[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->freq = NULL; + clock->nfreqs = 0; + } + + ispif->nclocks_for_reset = 0; + while (res->clock_for_reset[ispif->nclocks_for_reset]) + ispif->nclocks_for_reset++; + + ispif->clock_for_reset = devm_kcalloc(dev, + ispif->nclocks_for_reset, + sizeof(*ispif->clock_for_reset), + GFP_KERNEL); + if (!ispif->clock_for_reset) + return -ENOMEM; + + for (i = 0; i < ispif->nclocks_for_reset; i++) { + struct camss_clock *clock = &ispif->clock_for_reset[i]; + + clock->clk = devm_clk_get(dev, res->clock_for_reset[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->freq = NULL; + clock->nfreqs = 0; + } + + mutex_init(&ispif->power_lock); + ispif->power_count = 0; + + mutex_init(&ispif->config_lock); + + for (i = 0; i < MSM_ISPIF_VFE_NUM; i++) + init_completion(&ispif->reset_complete[i]); + + return 0; +} + +/* + * ispif_get_intf - Get ISPIF interface to use by VFE line id + * @line_id: VFE line id that the ISPIF line is connected to + * + * Return ISPIF interface to use + */ +static enum ispif_intf ispif_get_intf(enum vfe_line_id line_id) +{ + switch (line_id) { + case (VFE_LINE_RDI0): + return RDI0; + case (VFE_LINE_RDI1): + return RDI1; + case (VFE_LINE_RDI2): + return RDI2; + case (VFE_LINE_PIX): + return PIX0; + default: + return RDI0; + } +} + +/* + * ispif_get_vfe_id - Get VFE HW module id + * @entity: Pointer to VFE media entity structure + * @id: Return CSID HW module id here + */ +static void ispif_get_vfe_id(struct media_entity *entity, u8 *id) +{ + struct v4l2_subdev *sd; + struct vfe_line *line; + struct vfe_device *vfe; + + sd = media_entity_to_v4l2_subdev(entity); + line = v4l2_get_subdevdata(sd); + vfe = to_vfe(line); + + *id = vfe->id; +} + +/* + * ispif_get_vfe_line_id - Get VFE line id by media entity + * @entity: Pointer to VFE media entity structure + * @id: Return VFE line id here + */ +static void ispif_get_vfe_line_id(struct media_entity *entity, + enum vfe_line_id *id) +{ + struct v4l2_subdev *sd; + struct vfe_line *line; + + sd = media_entity_to_v4l2_subdev(entity); + line = v4l2_get_subdevdata(sd); + + *id = line->id; +} + +/* + * ispif_link_setup - Setup ISPIF connections + * @entity: Pointer to media entity structure + * @local: Pointer to local pad + * @remote: Pointer to remote pad + * @flags: Link flags + * + * Return 0 on success + */ +static int ispif_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags) +{ + if (flags & MEDIA_LNK_FL_ENABLED) { + if (media_pad_remote_pad_first(local)) + return -EBUSY; + + if (local->flags & MEDIA_PAD_FL_SINK) { + struct v4l2_subdev *sd; + struct ispif_line *line; + + sd = media_entity_to_v4l2_subdev(entity); + line = v4l2_get_subdevdata(sd); + + msm_csid_get_csid_id(remote->entity, &line->csid_id); + } else { /* MEDIA_PAD_FL_SOURCE */ + struct v4l2_subdev *sd; + struct ispif_line *line; + enum vfe_line_id id; + + sd = media_entity_to_v4l2_subdev(entity); + line = v4l2_get_subdevdata(sd); + + ispif_get_vfe_id(remote->entity, &line->vfe_id); + ispif_get_vfe_line_id(remote->entity, &id); + line->interface = ispif_get_intf(id); + } + } + + return 0; +} + +static const struct v4l2_subdev_core_ops ispif_core_ops = { + .s_power = ispif_set_power, +}; + +static const struct v4l2_subdev_video_ops ispif_video_ops = { + .s_stream = ispif_set_stream, +}; + +static const struct v4l2_subdev_pad_ops ispif_pad_ops = { + .enum_mbus_code = ispif_enum_mbus_code, + .enum_frame_size = ispif_enum_frame_size, + .get_fmt = ispif_get_format, + .set_fmt = ispif_set_format, +}; + +static const struct v4l2_subdev_ops ispif_v4l2_ops = { + .core = &ispif_core_ops, + .video = &ispif_video_ops, + .pad = &ispif_pad_ops, +}; + +static const struct v4l2_subdev_internal_ops ispif_v4l2_internal_ops = { + .open = ispif_init_formats, +}; + +static const struct media_entity_operations ispif_media_ops = { + .link_setup = ispif_link_setup, + .link_validate = v4l2_subdev_link_validate, +}; + +/* + * msm_ispif_register_entities - Register subdev node for ISPIF module + * @ispif: ISPIF device + * @v4l2_dev: V4L2 device + * + * Return 0 on success or a negative error code otherwise + */ +int msm_ispif_register_entities(struct ispif_device *ispif, + struct v4l2_device *v4l2_dev) +{ + struct camss *camss; + int ret; + int i; + + if (!ispif) + return 0; + + camss = ispif->camss; + + for (i = 0; i < ispif->line_num; i++) { + struct v4l2_subdev *sd = &ispif->line[i].subdev; + struct media_pad *pads = ispif->line[i].pads; + + v4l2_subdev_init(sd, &ispif_v4l2_ops); + sd->internal_ops = &ispif_v4l2_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d", + MSM_ISPIF_NAME, i); + v4l2_set_subdevdata(sd, &ispif->line[i]); + + ret = ispif_init_formats(sd, NULL); + if (ret < 0) { + dev_err(camss->dev, "Failed to init format: %d\n", ret); + goto error; + } + + pads[MSM_ISPIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK; + pads[MSM_ISPIF_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE; + + sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; + sd->entity.ops = &ispif_media_ops; + ret = media_entity_pads_init(&sd->entity, MSM_ISPIF_PADS_NUM, + pads); + if (ret < 0) { + dev_err(camss->dev, "Failed to init media entity: %d\n", + ret); + goto error; + } + + ret = v4l2_device_register_subdev(v4l2_dev, sd); + if (ret < 0) { + dev_err(camss->dev, "Failed to register subdev: %d\n", + ret); + media_entity_cleanup(&sd->entity); + goto error; + } + } + + return 0; + +error: + for (i--; i >= 0; i--) { + struct v4l2_subdev *sd = &ispif->line[i].subdev; + + v4l2_device_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + } + + return ret; +} + +/* + * msm_ispif_unregister_entities - Unregister ISPIF module subdev node + * @ispif: ISPIF device + */ +void msm_ispif_unregister_entities(struct ispif_device *ispif) +{ + int i; + + if (!ispif) + return; + + mutex_destroy(&ispif->power_lock); + mutex_destroy(&ispif->config_lock); + + for (i = 0; i < ispif->line_num; i++) { + struct v4l2_subdev *sd = &ispif->line[i].subdev; + + v4l2_device_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + } +} diff --git a/drivers/media/platform/qcom/camss/camss-ispif.h b/drivers/media/platform/qcom/camss/camss-ispif.h new file mode 100644 index 0000000000..fdf28e68cc --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-ispif.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * camss-ispif.h + * + * Qualcomm MSM Camera Subsystem - ISPIF (ISP Interface) Module + * + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2018 Linaro Ltd. + */ +#ifndef QC_MSM_CAMSS_ISPIF_H +#define QC_MSM_CAMSS_ISPIF_H + +#include <linux/clk.h> +#include <media/media-entity.h> +#include <media/v4l2-device.h> +#include <media/v4l2-subdev.h> + +#define MSM_ISPIF_PAD_SINK 0 +#define MSM_ISPIF_PAD_SRC 1 +#define MSM_ISPIF_PADS_NUM 2 + +#define MSM_ISPIF_VFE_NUM 2 + +enum ispif_intf { + PIX0, + RDI0, + PIX1, + RDI1, + RDI2 +}; + +struct ispif_intf_cmd_reg { + u32 cmd_0; + u32 cmd_1; +}; + +struct ispif_line { + struct ispif_device *ispif; + u8 id; + u8 csid_id; + u8 vfe_id; + enum ispif_intf interface; + struct v4l2_subdev subdev; + struct media_pad pads[MSM_ISPIF_PADS_NUM]; + struct v4l2_mbus_framefmt fmt[MSM_ISPIF_PADS_NUM]; + const u32 *formats; + unsigned int nformats; +}; + +struct ispif_device { + void __iomem *base; + void __iomem *base_clk_mux; + u32 irq; + char irq_name[30]; + struct camss_clock *clock; + int nclocks; + struct camss_clock *clock_for_reset; + int nclocks_for_reset; + struct completion reset_complete[MSM_ISPIF_VFE_NUM]; + int power_count; + struct mutex power_lock; + struct ispif_intf_cmd_reg intf_cmd[MSM_ISPIF_VFE_NUM]; + struct mutex config_lock; + unsigned int line_num; + struct ispif_line *line; + struct camss *camss; +}; + +struct resources_ispif; + +int msm_ispif_subdev_init(struct camss *camss, + const struct resources_ispif *res); + +int msm_ispif_register_entities(struct ispif_device *ispif, + struct v4l2_device *v4l2_dev); + +void msm_ispif_unregister_entities(struct ispif_device *ispif); + +#endif /* QC_MSM_CAMSS_ISPIF_H */ diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c new file mode 100644 index 0000000000..168baaa80d --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c @@ -0,0 +1,774 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-vfe-170.c + * + * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v170 + * + * Copyright (C) 2020-2021 Linaro Ltd. + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> + +#include "camss.h" +#include "camss-vfe.h" + +#define VFE_HW_VERSION (0x000) + +#define VFE_GLOBAL_RESET_CMD (0x018) +#define GLOBAL_RESET_CMD_CORE BIT(0) +#define GLOBAL_RESET_CMD_CAMIF BIT(1) +#define GLOBAL_RESET_CMD_BUS BIT(2) +#define GLOBAL_RESET_CMD_BUS_BDG BIT(3) +#define GLOBAL_RESET_CMD_REGISTER BIT(4) +#define GLOBAL_RESET_CMD_PM BIT(5) +#define GLOBAL_RESET_CMD_BUS_MISR BIT(6) +#define GLOBAL_RESET_CMD_TESTGEN BIT(7) +#define GLOBAL_RESET_CMD_DSP BIT(8) +#define GLOBAL_RESET_CMD_IDLE_CGC BIT(9) +#define GLOBAL_RESET_CMD_RDI0 BIT(10) +#define GLOBAL_RESET_CMD_RDI1 BIT(11) +#define GLOBAL_RESET_CMD_RDI2 BIT(12) +#define GLOBAL_RESET_CMD_RDI3 BIT(13) +#define GLOBAL_RESET_CMD_VFE_DOMAIN BIT(30) +#define GLOBAL_RESET_CMD_RESET_BYPASS BIT(31) + +#define VFE_CORE_CFG (0x050) +#define CFG_PIXEL_PATTERN_YCBYCR (0x4) +#define CFG_PIXEL_PATTERN_YCRYCB (0x5) +#define CFG_PIXEL_PATTERN_CBYCRY (0x6) +#define CFG_PIXEL_PATTERN_CRYCBY (0x7) +#define CFG_COMPOSITE_REG_UPDATE_EN BIT(4) + +#define VFE_IRQ_CMD (0x058) +#define CMD_GLOBAL_CLEAR BIT(0) + +#define VFE_IRQ_MASK_0 (0x05c) +#define MASK_0_CAMIF_SOF BIT(0) +#define MASK_0_CAMIF_EOF BIT(1) +#define MASK_0_RDI_REG_UPDATE(n) BIT((n) + 5) +#define MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8) +#define MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25) +#define MASK_0_RESET_ACK BIT(31) + +#define VFE_IRQ_MASK_1 (0x060) +#define MASK_1_CAMIF_ERROR BIT(0) +#define MASK_1_VIOLATION BIT(7) +#define MASK_1_BUS_BDG_HALT_ACK BIT(8) +#define MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9) +#define MASK_1_RDI_SOF(n) BIT((n) + 29) + +#define VFE_IRQ_CLEAR_0 (0x064) +#define VFE_IRQ_CLEAR_1 (0x068) + +#define VFE_IRQ_STATUS_0 (0x06c) +#define STATUS_0_CAMIF_SOF BIT(0) +#define STATUS_0_RDI_REG_UPDATE(n) BIT((n) + 5) +#define STATUS_0_IMAGE_MASTER_PING_PONG(n) BIT((n) + 8) +#define STATUS_0_IMAGE_COMPOSITE_DONE(n) BIT((n) + 25) +#define STATUS_0_RESET_ACK BIT(31) + +#define VFE_IRQ_STATUS_1 (0x070) +#define STATUS_1_VIOLATION BIT(7) +#define STATUS_1_BUS_BDG_HALT_ACK BIT(8) +#define STATUS_1_RDI_SOF(n) BIT((n) + 27) + +#define VFE_VIOLATION_STATUS (0x07c) + +#define VFE_CAMIF_CMD (0x478) +#define CMD_CLEAR_CAMIF_STATUS BIT(2) + +#define VFE_CAMIF_CFG (0x47c) +#define CFG_VSYNC_SYNC_EDGE (0) +#define VSYNC_ACTIVE_HIGH (0) +#define VSYNC_ACTIVE_LOW (1) +#define CFG_HSYNC_SYNC_EDGE (1) +#define HSYNC_ACTIVE_HIGH (0) +#define HSYNC_ACTIVE_LOW (1) +#define CFG_VFE_SUBSAMPLE_ENABLE BIT(4) +#define CFG_BUS_SUBSAMPLE_ENABLE BIT(5) +#define CFG_VFE_OUTPUT_EN BIT(6) +#define CFG_BUS_OUTPUT_EN BIT(7) +#define CFG_BINNING_EN BIT(9) +#define CFG_FRAME_BASED_EN BIT(10) +#define CFG_RAW_CROP_EN BIT(22) + +#define VFE_REG_UPDATE_CMD (0x4ac) +#define REG_UPDATE_RDI(n) BIT(1 + (n)) + +#define VFE_BUS_IRQ_MASK(n) (0x2044 + (n) * 4) +#define VFE_BUS_IRQ_CLEAR(n) (0x2050 + (n) * 4) +#define VFE_BUS_IRQ_STATUS(n) (0x205c + (n) * 4) +#define STATUS0_COMP_RESET_DONE BIT(0) +#define STATUS0_COMP_REG_UPDATE0_DONE BIT(1) +#define STATUS0_COMP_REG_UPDATE1_DONE BIT(2) +#define STATUS0_COMP_REG_UPDATE2_DONE BIT(3) +#define STATUS0_COMP_REG_UPDATE3_DONE BIT(4) +#define STATUS0_COMP_REG_UPDATE_DONE(n) BIT((n) + 1) +#define STATUS0_COMP0_BUF_DONE BIT(5) +#define STATUS0_COMP1_BUF_DONE BIT(6) +#define STATUS0_COMP2_BUF_DONE BIT(7) +#define STATUS0_COMP3_BUF_DONE BIT(8) +#define STATUS0_COMP4_BUF_DONE BIT(9) +#define STATUS0_COMP5_BUF_DONE BIT(10) +#define STATUS0_COMP_BUF_DONE(n) BIT((n) + 5) +#define STATUS0_COMP_ERROR BIT(11) +#define STATUS0_COMP_OVERWRITE BIT(12) +#define STATUS0_OVERFLOW BIT(13) +#define STATUS0_VIOLATION BIT(14) +/* WM_CLIENT_BUF_DONE defined for buffers 0:19 */ +#define STATUS1_WM_CLIENT_BUF_DONE(n) BIT(n) +#define STATUS1_EARLY_DONE BIT(24) +#define STATUS2_DUAL_COMP0_BUF_DONE BIT(0) +#define STATUS2_DUAL_COMP1_BUF_DONE BIT(1) +#define STATUS2_DUAL_COMP2_BUF_DONE BIT(2) +#define STATUS2_DUAL_COMP3_BUF_DONE BIT(3) +#define STATUS2_DUAL_COMP4_BUF_DONE BIT(4) +#define STATUS2_DUAL_COMP5_BUF_DONE BIT(5) +#define STATUS2_DUAL_COMP_BUF_DONE(n) BIT(n) +#define STATUS2_DUAL_COMP_ERROR BIT(6) +#define STATUS2_DUAL_COMP_OVERWRITE BIT(7) + +#define VFE_BUS_IRQ_CLEAR_GLOBAL (0x2068) + +#define VFE_BUS_WM_DEBUG_STATUS_CFG (0x226c) +#define DEBUG_STATUS_CFG_STATUS0(n) BIT(n) +#define DEBUG_STATUS_CFG_STATUS1(n) BIT(8 + (n)) + +#define VFE_BUS_WM_ADDR_SYNC_FRAME_HEADER (0x2080) + +#define VFE_BUS_WM_ADDR_SYNC_NO_SYNC (0x2084) +#define BUS_VER2_MAX_CLIENTS (24) +#define WM_ADDR_NO_SYNC_DEFAULT_VAL \ + ((1 << BUS_VER2_MAX_CLIENTS) - 1) + +#define VFE_BUS_WM_CGC_OVERRIDE (0x200c) +#define WM_CGC_OVERRIDE_ALL (0xFFFFF) + +#define VFE_BUS_WM_TEST_BUS_CTRL (0x211c) + +#define VFE_BUS_WM_STATUS0(n) (0x2200 + (n) * 0x100) +#define VFE_BUS_WM_STATUS1(n) (0x2204 + (n) * 0x100) +#define VFE_BUS_WM_CFG(n) (0x2208 + (n) * 0x100) +#define WM_CFG_EN (0) +#define WM_CFG_MODE (1) +#define MODE_QCOM_PLAIN (0) +#define MODE_MIPI_RAW (1) +#define WM_CFG_VIRTUALFRAME (2) +#define VFE_BUS_WM_HEADER_ADDR(n) (0x220c + (n) * 0x100) +#define VFE_BUS_WM_HEADER_CFG(n) (0x2210 + (n) * 0x100) +#define VFE_BUS_WM_IMAGE_ADDR(n) (0x2214 + (n) * 0x100) +#define VFE_BUS_WM_IMAGE_ADDR_OFFSET(n) (0x2218 + (n) * 0x100) +#define VFE_BUS_WM_BUFFER_WIDTH_CFG(n) (0x221c + (n) * 0x100) +#define WM_BUFFER_DEFAULT_WIDTH (0xFF01) + +#define VFE_BUS_WM_BUFFER_HEIGHT_CFG(n) (0x2220 + (n) * 0x100) +#define VFE_BUS_WM_PACKER_CFG(n) (0x2224 + (n) * 0x100) + +#define VFE_BUS_WM_STRIDE(n) (0x2228 + (n) * 0x100) +#define WM_STRIDE_DEFAULT_STRIDE (0xFF01) + +#define VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(n) (0x2248 + (n) * 0x100) +#define VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(n) (0x224c + (n) * 0x100) +#define VFE_BUS_WM_FRAMEDROP_PERIOD(n) (0x2250 + (n) * 0x100) +#define VFE_BUS_WM_FRAMEDROP_PATTERN(n) (0x2254 + (n) * 0x100) +#define VFE_BUS_WM_FRAME_INC(n) (0x2258 + (n) * 0x100) +#define VFE_BUS_WM_BURST_LIMIT(n) (0x225c + (n) * 0x100) + +static u32 vfe_hw_version(struct vfe_device *vfe) +{ + u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION); + + u32 gen = (hw_version >> 28) & 0xF; + u32 rev = (hw_version >> 16) & 0xFFF; + u32 step = hw_version & 0xFFFF; + + dev_dbg(vfe->camss->dev, "VFE HW Version = %u.%u.%u\n", + gen, rev, step); + + return hw_version; +} + +static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits) +{ + u32 bits = readl_relaxed(vfe->base + reg); + + writel_relaxed(bits | set_bits, vfe->base + reg); +} + +static void vfe_global_reset(struct vfe_device *vfe) +{ + u32 reset_bits = GLOBAL_RESET_CMD_CORE | + GLOBAL_RESET_CMD_CAMIF | + GLOBAL_RESET_CMD_BUS | + GLOBAL_RESET_CMD_BUS_BDG | + GLOBAL_RESET_CMD_REGISTER | + GLOBAL_RESET_CMD_TESTGEN | + GLOBAL_RESET_CMD_DSP | + GLOBAL_RESET_CMD_IDLE_CGC | + GLOBAL_RESET_CMD_RDI0 | + GLOBAL_RESET_CMD_RDI1 | + GLOBAL_RESET_CMD_RDI2; + + writel_relaxed(BIT(31), vfe->base + VFE_IRQ_MASK_0); + + /* Make sure IRQ mask has been written before resetting */ + wmb(); + + writel_relaxed(reset_bits, vfe->base + VFE_GLOBAL_RESET_CMD); +} + +static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line) +{ + u32 val; + + /*Set Debug Registers*/ + val = DEBUG_STATUS_CFG_STATUS0(1) | + DEBUG_STATUS_CFG_STATUS0(7); + writel_relaxed(val, vfe->base + VFE_BUS_WM_DEBUG_STATUS_CFG); + + /* BUS_WM_INPUT_IF_ADDR_SYNC_FRAME_HEADER */ + writel_relaxed(0, vfe->base + VFE_BUS_WM_ADDR_SYNC_FRAME_HEADER); + + /* no clock gating at bus input */ + val = WM_CGC_OVERRIDE_ALL; + writel_relaxed(val, vfe->base + VFE_BUS_WM_CGC_OVERRIDE); + + writel_relaxed(0x0, vfe->base + VFE_BUS_WM_TEST_BUS_CTRL); + + /* if addr_no_sync has default value then config the addr no sync reg */ + val = WM_ADDR_NO_SYNC_DEFAULT_VAL; + writel_relaxed(val, vfe->base + VFE_BUS_WM_ADDR_SYNC_NO_SYNC); + + writel_relaxed(0xf, vfe->base + VFE_BUS_WM_BURST_LIMIT(wm)); + + val = WM_BUFFER_DEFAULT_WIDTH; + writel_relaxed(val, vfe->base + VFE_BUS_WM_BUFFER_WIDTH_CFG(wm)); + + val = 0; + writel_relaxed(val, vfe->base + VFE_BUS_WM_BUFFER_HEIGHT_CFG(wm)); + + val = 0; + writel_relaxed(val, vfe->base + VFE_BUS_WM_PACKER_CFG(wm)); // XXX 1 for PLAIN8? + + /* Configure stride for RDIs */ + val = WM_STRIDE_DEFAULT_STRIDE; + writel_relaxed(val, vfe->base + VFE_BUS_WM_STRIDE(wm)); + + /* Enable WM */ + val = 1 << WM_CFG_EN | + MODE_MIPI_RAW << WM_CFG_MODE; + writel_relaxed(val, vfe->base + VFE_BUS_WM_CFG(wm)); +} + +static void vfe_wm_stop(struct vfe_device *vfe, u8 wm) +{ + /* Disable WM */ + writel_relaxed(0, vfe->base + VFE_BUS_WM_CFG(wm)); +} + +static void vfe_wm_update(struct vfe_device *vfe, u8 wm, u32 addr, + struct vfe_line *line) +{ + struct v4l2_pix_format_mplane *pix = + &line->video_out.active_fmt.fmt.pix_mp; + u32 stride = pix->plane_fmt[0].bytesperline; + + writel_relaxed(addr, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm)); + writel_relaxed(stride * pix->height, vfe->base + VFE_BUS_WM_FRAME_INC(wm)); +} + +static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + vfe->reg_update |= REG_UPDATE_RDI(line_id); + + /* Enforce ordering between previous reg writes and reg update */ + wmb(); + + writel_relaxed(vfe->reg_update, vfe->base + VFE_REG_UPDATE_CMD); + + /* Enforce ordering between reg update and subsequent reg writes */ + wmb(); +} + +static inline void vfe_reg_update_clear(struct vfe_device *vfe, + enum vfe_line_id line_id) +{ + vfe->reg_update &= ~REG_UPDATE_RDI(line_id); +} + +static void vfe_enable_irq_common(struct vfe_device *vfe) +{ + vfe_reg_set(vfe, VFE_IRQ_MASK_0, ~0u); + vfe_reg_set(vfe, VFE_IRQ_MASK_1, ~0u); + + writel_relaxed(~0u, vfe->base + VFE_BUS_IRQ_MASK(0)); + writel_relaxed(~0u, vfe->base + VFE_BUS_IRQ_MASK(1)); + writel_relaxed(~0u, vfe->base + VFE_BUS_IRQ_MASK(2)); +} + +static void vfe_isr_halt_ack(struct vfe_device *vfe) +{ + complete(&vfe->halt_complete); +} + +static void vfe_isr_read(struct vfe_device *vfe, u32 *status0, u32 *status1) +{ + *status0 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_0); + *status1 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_1); + + writel_relaxed(*status0, vfe->base + VFE_IRQ_CLEAR_0); + writel_relaxed(*status1, vfe->base + VFE_IRQ_CLEAR_1); + + /* Enforce ordering between IRQ Clear and Global IRQ Clear */ + wmb(); + writel_relaxed(CMD_GLOBAL_CLEAR, vfe->base + VFE_IRQ_CMD); +} + +static void vfe_violation_read(struct vfe_device *vfe) +{ + u32 violation = readl_relaxed(vfe->base + VFE_VIOLATION_STATUS); + + pr_err_ratelimited("VFE: violation = 0x%08x\n", violation); +} + +/* + * vfe_isr - VFE module interrupt handler + * @irq: Interrupt line + * @dev: VFE device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t vfe_isr(int irq, void *dev) +{ + struct vfe_device *vfe = dev; + u32 status0, status1, vfe_bus_status[3]; + int i, wm; + + status0 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_0); + status1 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_1); + + writel_relaxed(status0, vfe->base + VFE_IRQ_CLEAR_0); + writel_relaxed(status1, vfe->base + VFE_IRQ_CLEAR_1); + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++) { + vfe_bus_status[i] = readl_relaxed(vfe->base + VFE_BUS_IRQ_STATUS(i)); + writel_relaxed(vfe_bus_status[i], vfe->base + VFE_BUS_IRQ_CLEAR(i)); + } + + /* Enforce ordering between IRQ reading and interpretation */ + wmb(); + + writel_relaxed(CMD_GLOBAL_CLEAR, vfe->base + VFE_IRQ_CMD); + writel_relaxed(1, vfe->base + VFE_BUS_IRQ_CLEAR_GLOBAL); + + if (status0 & STATUS_0_RESET_ACK) + vfe->isr_ops.reset_ack(vfe); + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++) + if (status0 & STATUS_0_RDI_REG_UPDATE(i)) + vfe->isr_ops.reg_update(vfe, i); + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++) + if (status0 & STATUS_1_RDI_SOF(i)) + vfe->isr_ops.sof(vfe, i); + + for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++) + if (vfe_bus_status[0] & STATUS0_COMP_BUF_DONE(i)) + vfe->isr_ops.comp_done(vfe, i); + + for (wm = 0; wm < MSM_VFE_IMAGE_MASTERS_NUM; wm++) + if (status0 & BIT(9)) + if (vfe_bus_status[1] & STATUS1_WM_CLIENT_BUF_DONE(wm)) + vfe->isr_ops.wm_done(vfe, wm); + + return IRQ_HANDLED; +} + +/* + * vfe_halt - Trigger halt on VFE module and wait to complete + * @vfe: VFE device + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_halt(struct vfe_device *vfe) +{ + /* rely on vfe_disable_output() to stop the VFE */ + return 0; +} + +static int vfe_get_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output; + unsigned long flags; + int wm_idx; + + spin_lock_irqsave(&vfe->output_lock, flags); + + output = &line->output; + if (output->state > VFE_OUTPUT_RESERVED) { + dev_err(vfe->camss->dev, "Output is running\n"); + goto error; + } + + output->wm_num = 1; + + wm_idx = vfe_reserve_wm(vfe, line->id); + if (wm_idx < 0) { + dev_err(vfe->camss->dev, "Can not reserve wm\n"); + goto error_get_wm; + } + output->wm_idx[0] = wm_idx; + + output->drop_update_idx = 0; + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; + +error_get_wm: + vfe_release_wm(vfe, output->wm_idx[0]); + output->state = VFE_OUTPUT_OFF; +error: + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return -EINVAL; +} + +static int vfe_enable_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output = &line->output; + const struct vfe_hw_ops *ops = vfe->ops; + struct media_entity *sensor; + unsigned long flags; + unsigned int frame_skip = 0; + unsigned int i; + + sensor = camss_find_sensor(&line->subdev.entity); + if (sensor) { + struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(sensor); + + v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip); + /* Max frame skip is 29 frames */ + if (frame_skip > VFE_FRAME_DROP_VAL - 1) + frame_skip = VFE_FRAME_DROP_VAL - 1; + } + + spin_lock_irqsave(&vfe->output_lock, flags); + + ops->reg_update_clear(vfe, line->id); + + if (output->state > VFE_OUTPUT_RESERVED) { + dev_err(vfe->camss->dev, "Output is not in reserved state %d\n", + output->state); + spin_unlock_irqrestore(&vfe->output_lock, flags); + return -EINVAL; + } + + WARN_ON(output->gen2.active_num); + + output->state = VFE_OUTPUT_ON; + + output->sequence = 0; + output->wait_reg_update = 0; + reinit_completion(&output->reg_update); + + vfe_wm_start(vfe, output->wm_idx[0], line); + + for (i = 0; i < 2; i++) { + output->buf[i] = vfe_buf_get_pending(output); + if (!output->buf[i]) + break; + output->gen2.active_num++; + vfe_wm_update(vfe, output->wm_idx[0], output->buf[i]->addr[0], line); + } + + ops->reg_update(vfe, line->id); + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; +} + +static void vfe_disable_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output = &line->output; + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&vfe->output_lock, flags); + for (i = 0; i < output->wm_num; i++) + vfe_wm_stop(vfe, output->wm_idx[i]); + output->gen2.active_num = 0; + spin_unlock_irqrestore(&vfe->output_lock, flags); + + vfe_reset(vfe); +} + +/* + * vfe_enable - Enable streaming on VFE line + * @line: VFE line + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_enable(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + int ret; + + mutex_lock(&vfe->stream_lock); + + if (!vfe->stream_count) + vfe_enable_irq_common(vfe); + + vfe->stream_count++; + + mutex_unlock(&vfe->stream_lock); + + ret = vfe_get_output(line); + if (ret < 0) + goto error_get_output; + + ret = vfe_enable_output(line); + if (ret < 0) + goto error_enable_output; + + vfe->was_streaming = 1; + + return 0; + +error_enable_output: + vfe_put_output(line); + +error_get_output: + mutex_lock(&vfe->stream_lock); + + vfe->stream_count--; + + mutex_unlock(&vfe->stream_lock); + + return ret; +} + +/* + * vfe_disable - Disable streaming on VFE line + * @line: VFE line + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_disable(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + + vfe_disable_output(line); + + vfe_put_output(line); + + mutex_lock(&vfe->stream_lock); + + vfe->stream_count--; + + mutex_unlock(&vfe->stream_lock); + + return 0; +} + +/* + * vfe_isr_sof - Process start of frame interrupt + * @vfe: VFE Device + * @line_id: VFE line + */ +static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + /* nop */ +} + +/* + * vfe_isr_reg_update - Process reg update interrupt + * @vfe: VFE Device + * @line_id: VFE line + */ +static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + struct vfe_output *output; + unsigned long flags; + + spin_lock_irqsave(&vfe->output_lock, flags); + vfe->ops->reg_update_clear(vfe, line_id); + + output = &vfe->line[line_id].output; + + if (output->wait_reg_update) { + output->wait_reg_update = 0; + complete(&output->reg_update); + } + + spin_unlock_irqrestore(&vfe->output_lock, flags); +} + +/* + * vfe_isr_wm_done - Process write master done interrupt + * @vfe: VFE Device + * @wm: Write master id + */ +static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm) +{ + struct vfe_line *line = &vfe->line[vfe->wm_output_map[wm]]; + struct camss_buffer *ready_buf; + struct vfe_output *output; + unsigned long flags; + u32 index; + u64 ts = ktime_get_ns(); + + spin_lock_irqsave(&vfe->output_lock, flags); + + if (vfe->wm_output_map[wm] == VFE_LINE_NONE) { + dev_err_ratelimited(vfe->camss->dev, + "Received wm done for unmapped index\n"); + goto out_unlock; + } + output = &vfe->line[vfe->wm_output_map[wm]].output; + + ready_buf = output->buf[0]; + if (!ready_buf) { + dev_err_ratelimited(vfe->camss->dev, + "Missing ready buf %d!\n", output->state); + goto out_unlock; + } + + ready_buf->vb.vb2_buf.timestamp = ts; + ready_buf->vb.sequence = output->sequence++; + + index = 0; + output->buf[0] = output->buf[1]; + if (output->buf[0]) + index = 1; + + output->buf[index] = vfe_buf_get_pending(output); + + if (output->buf[index]) + vfe_wm_update(vfe, output->wm_idx[0], output->buf[index]->addr[0], line); + else + output->gen2.active_num--; + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE); + + return; + +out_unlock: + spin_unlock_irqrestore(&vfe->output_lock, flags); +} + +/* + * vfe_pm_domain_off - Disable power domains specific to this VFE. + * @vfe: VFE Device + */ +static void vfe_pm_domain_off(struct vfe_device *vfe) +{ + struct camss *camss = vfe->camss; + + if (vfe->id >= camss->vfe_num) + return; + + device_link_del(camss->genpd_link[vfe->id]); +} + +/* + * vfe_pm_domain_on - Enable power domains specific to this VFE. + * @vfe: VFE Device + */ +static int vfe_pm_domain_on(struct vfe_device *vfe) +{ + struct camss *camss = vfe->camss; + enum vfe_line_id id = vfe->id; + + if (id >= camss->vfe_num) + return 0; + + camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id], + DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | + DL_FLAG_RPM_ACTIVE); + if (!camss->genpd_link[id]) + return -EINVAL; + + return 0; +} + +/* + * vfe_queue_buffer - Add empty buffer + * @vid: Video device structure + * @buf: Buffer to be enqueued + * + * Add an empty buffer - depending on the current number of buffers it will be + * put in pending buffer queue or directly given to the hardware to be filled. + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_queue_buffer(struct camss_video *vid, + struct camss_buffer *buf) +{ + struct vfe_line *line = container_of(vid, struct vfe_line, video_out); + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output; + unsigned long flags; + + output = &line->output; + + spin_lock_irqsave(&vfe->output_lock, flags); + + if (output->state == VFE_OUTPUT_ON && output->gen2.active_num < 2) { + output->buf[output->gen2.active_num++] = buf; + vfe_wm_update(vfe, output->wm_idx[0], buf->addr[0], line); + } else { + vfe_buf_add_pending(output, buf); + } + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; +} + +static const struct vfe_isr_ops vfe_isr_ops_170 = { + .reset_ack = vfe_isr_reset_ack, + .halt_ack = vfe_isr_halt_ack, + .reg_update = vfe_isr_reg_update, + .sof = vfe_isr_sof, + .comp_done = vfe_isr_comp_done, + .wm_done = vfe_isr_wm_done, +}; + +static const struct camss_video_ops vfe_video_ops_170 = { + .queue_buffer = vfe_queue_buffer, + .flush_buffers = vfe_flush_buffers, +}; + +static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe) +{ + vfe->isr_ops = vfe_isr_ops_170; + vfe->video_ops = vfe_video_ops_170; + + vfe->line_num = VFE_LINE_NUM_GEN2; +} + +const struct vfe_hw_ops vfe_ops_170 = { + .global_reset = vfe_global_reset, + .hw_version = vfe_hw_version, + .isr_read = vfe_isr_read, + .isr = vfe_isr, + .pm_domain_off = vfe_pm_domain_off, + .pm_domain_on = vfe_pm_domain_on, + .reg_update_clear = vfe_reg_update_clear, + .reg_update = vfe_reg_update, + .subdev_init = vfe_subdev_init, + .vfe_disable = vfe_disable, + .vfe_enable = vfe_enable, + .vfe_halt = vfe_halt, + .violation_read = vfe_violation_read, +}; diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c new file mode 100644 index 0000000000..42047b11ba --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c @@ -0,0 +1,1013 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-vfe-4-1.c + * + * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v4.1 + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2018 Linaro Ltd. + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> + +#include "camss.h" +#include "camss-vfe.h" +#include "camss-vfe-gen1.h" + +#define VFE_0_HW_VERSION 0x000 + +#define VFE_0_GLOBAL_RESET_CMD 0x00c +#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0) +#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1) +#define VFE_0_GLOBAL_RESET_CMD_BUS BIT(2) +#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG BIT(3) +#define VFE_0_GLOBAL_RESET_CMD_REGISTER BIT(4) +#define VFE_0_GLOBAL_RESET_CMD_TIMER BIT(5) +#define VFE_0_GLOBAL_RESET_CMD_PM BIT(6) +#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR BIT(7) +#define VFE_0_GLOBAL_RESET_CMD_TESTGEN BIT(8) + +#define VFE_0_MODULE_CFG 0x018 +#define VFE_0_MODULE_CFG_DEMUX BIT(2) +#define VFE_0_MODULE_CFG_CHROMA_UPSAMPLE BIT(3) +#define VFE_0_MODULE_CFG_SCALE_ENC BIT(23) +#define VFE_0_MODULE_CFG_CROP_ENC BIT(27) + +#define VFE_0_CORE_CFG 0x01c +#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7 + +#define VFE_0_IRQ_CMD 0x024 +#define VFE_0_IRQ_CMD_GLOBAL_CLEAR BIT(0) + +#define VFE_0_IRQ_MASK_0 0x028 +#define VFE_0_IRQ_MASK_0_CAMIF_SOF BIT(0) +#define VFE_0_IRQ_MASK_0_CAMIF_EOF BIT(1) +#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) BIT((n) + 5) +#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \ + ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n)) +#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8) +#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25) +#define VFE_0_IRQ_MASK_0_RESET_ACK BIT(31) +#define VFE_0_IRQ_MASK_1 0x02c +#define VFE_0_IRQ_MASK_1_CAMIF_ERROR BIT(0) +#define VFE_0_IRQ_MASK_1_VIOLATION BIT(7) +#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK BIT(8) +#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9) +#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) BIT((n) + 29) + +#define VFE_0_IRQ_CLEAR_0 0x030 +#define VFE_0_IRQ_CLEAR_1 0x034 + +#define VFE_0_IRQ_STATUS_0 0x038 +#define VFE_0_IRQ_STATUS_0_CAMIF_SOF BIT(0) +#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) BIT((n) + 5) +#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \ + ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n)) +#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8) +#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25) +#define VFE_0_IRQ_STATUS_0_RESET_ACK BIT(31) +#define VFE_0_IRQ_STATUS_1 0x03c +#define VFE_0_IRQ_STATUS_1_VIOLATION BIT(7) +#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK BIT(8) +#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) BIT((n) + 29) + +#define VFE_0_IRQ_COMPOSITE_MASK_0 0x40 +#define VFE_0_VIOLATION_STATUS 0x48 + +#define VFE_0_BUS_CMD 0x4c +#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) BIT(x) + +#define VFE_0_BUS_CFG 0x050 + +#define VFE_0_BUS_XBAR_CFG_x(x) (0x58 + 0x4 * ((x) / 2)) +#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN BIT(1) +#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4) +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 5 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 6 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 7 + +#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x06c + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT 1 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x070 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x074 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x078 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1f << 2) + +#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x07c + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x080 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x084 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \ + (0x088 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \ + (0x08c + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff + +#define VFE_0_BUS_PING_PONG_STATUS 0x268 + +#define VFE_0_BUS_BDG_CMD 0x2c0 +#define VFE_0_BUS_BDG_CMD_HALT_REQ 1 + +#define VFE_0_BUS_BDG_QOS_CFG_0 0x2c4 +#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5 +#define VFE_0_BUS_BDG_QOS_CFG_1 0x2c8 +#define VFE_0_BUS_BDG_QOS_CFG_2 0x2cc +#define VFE_0_BUS_BDG_QOS_CFG_3 0x2d0 +#define VFE_0_BUS_BDG_QOS_CFG_4 0x2d4 +#define VFE_0_BUS_BDG_QOS_CFG_5 0x2d8 +#define VFE_0_BUS_BDG_QOS_CFG_6 0x2dc +#define VFE_0_BUS_BDG_QOS_CFG_7 0x2e0 +#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa5 + +#define VFE_0_RDI_CFG_x(x) (0x2e8 + (0x4 * (x))) +#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28 +#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28) +#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4 +#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4) +#define VFE_0_RDI_CFG_x_RDI_EN_BIT BIT(2) +#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3 +#define VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(r) BIT(16 + (r)) + +#define VFE_0_CAMIF_CMD 0x2f4 +#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0 +#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1 +#define VFE_0_CAMIF_CMD_NO_CHANGE 3 +#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS BIT(2) +#define VFE_0_CAMIF_CFG 0x2f8 +#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN BIT(6) +#define VFE_0_CAMIF_FRAME_CFG 0x300 +#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x304 +#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x308 +#define VFE_0_CAMIF_SUBSAMPLE_CFG_0 0x30c +#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x314 +#define VFE_0_CAMIF_STATUS 0x31c +#define VFE_0_CAMIF_STATUS_HALT BIT(31) + +#define VFE_0_REG_UPDATE 0x378 +#define VFE_0_REG_UPDATE_RDIn(n) BIT(1 + (n)) +#define VFE_0_REG_UPDATE_line_n(n) \ + ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n)) + +#define VFE_0_DEMUX_CFG 0x424 +#define VFE_0_DEMUX_CFG_PERIOD 0x3 +#define VFE_0_DEMUX_GAIN_0 0x428 +#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0) +#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16) +#define VFE_0_DEMUX_GAIN_1 0x42c +#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0) +#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16) +#define VFE_0_DEMUX_EVEN_CFG 0x438 +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9 +#define VFE_0_DEMUX_ODD_CFG 0x43c +#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac +#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c +#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca +#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9 + +#define VFE_0_SCALE_ENC_Y_CFG 0x75c +#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x760 +#define VFE_0_SCALE_ENC_Y_H_PHASE 0x764 +#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x76c +#define VFE_0_SCALE_ENC_Y_V_PHASE 0x770 +#define VFE_0_SCALE_ENC_CBCR_CFG 0x778 +#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x77c +#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x780 +#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x790 +#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x794 + +#define VFE_0_CROP_ENC_Y_WIDTH 0x854 +#define VFE_0_CROP_ENC_Y_HEIGHT 0x858 +#define VFE_0_CROP_ENC_CBCR_WIDTH 0x85c +#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x860 + +#define VFE_0_CLAMP_ENC_MAX_CFG 0x874 +#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0) +#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8) +#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16) +#define VFE_0_CLAMP_ENC_MIN_CFG 0x878 +#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0) +#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8) +#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16) + +#define VFE_0_CGC_OVERRIDE_1 0x974 +#define VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(x) BIT(x) + +#define CAMIF_TIMEOUT_SLEEP_US 1000 +#define CAMIF_TIMEOUT_ALL_US 1000000 + +#define MSM_VFE_VFE0_UB_SIZE 1023 +#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3) + +static u32 vfe_hw_version(struct vfe_device *vfe) +{ + u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION); + + dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version); + + return hw_version; +} + +static u16 vfe_get_ub_size(u8 vfe_id) +{ + if (vfe_id == 0) + return MSM_VFE_VFE0_UB_SIZE_RDI; + + return 0; +} + +static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits) +{ + u32 bits = readl_relaxed(vfe->base + reg); + + writel_relaxed(bits & ~clr_bits, vfe->base + reg); +} + +static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits) +{ + u32 bits = readl_relaxed(vfe->base + reg); + + writel_relaxed(bits | set_bits, vfe->base + reg); +} + +static void vfe_global_reset(struct vfe_device *vfe) +{ + u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_TESTGEN | + VFE_0_GLOBAL_RESET_CMD_BUS_MISR | + VFE_0_GLOBAL_RESET_CMD_PM | + VFE_0_GLOBAL_RESET_CMD_TIMER | + VFE_0_GLOBAL_RESET_CMD_REGISTER | + VFE_0_GLOBAL_RESET_CMD_BUS_BDG | + VFE_0_GLOBAL_RESET_CMD_BUS | + VFE_0_GLOBAL_RESET_CMD_CAMIF | + VFE_0_GLOBAL_RESET_CMD_CORE; + + writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD); +} + +static void vfe_halt_request(struct vfe_device *vfe) +{ + writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ, + vfe->base + VFE_0_BUS_BDG_CMD); +} + +static void vfe_halt_clear(struct vfe_device *vfe) +{ + writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD); +} + +static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable) +{ + if (enable) + vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT); + else + vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT); +} + +static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable) +{ + if (enable) + vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT); + else + vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT); +} + +static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane, + u16 *width, u16 *height, u16 *bytesperline) +{ + *width = pix->width; + *height = pix->height; + *bytesperline = pix->plane_fmt[0].bytesperline; + + if (pix->pixelformat == V4L2_PIX_FMT_NV12 || + pix->pixelformat == V4L2_PIX_FMT_NV21) + if (plane == 1) + *height /= 2; +} + +static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm, + struct v4l2_pix_format_mplane *pix, + u8 plane, u32 enable) +{ + u32 reg; + + if (enable) { + u16 width = 0, height = 0, bytesperline = 0, wpl; + + vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline); + + wpl = vfe_word_per_line(pix->pixelformat, width); + + reg = height - 1; + reg |= ((wpl + 1) / 2 - 1) << 16; + + writel_relaxed(reg, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm)); + + wpl = vfe_word_per_line(pix->pixelformat, bytesperline); + + reg = 0x3; + reg |= (height - 1) << 4; + reg |= wpl << 16; + + writel_relaxed(reg, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm)); + } else { + writel_relaxed(0, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm)); + writel_relaxed(0, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm)); + } +} + +static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per) +{ + u32 reg; + + reg = readl_relaxed(vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm)); + + reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK); + + reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT) + & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK; + + writel_relaxed(reg, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm)); +} + +static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm, + u32 pattern) +{ + writel_relaxed(pattern, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm)); +} + +static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm, + u16 offset, u16 depth) +{ + u32 reg; + + reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) | + depth; + writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm)); +} + +static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm) +{ + wmb(); + writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD); + wmb(); +} + +static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr) +{ + writel_relaxed(addr, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm)); +} + +static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr) +{ + writel_relaxed(addr, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm)); +} + +static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm) +{ + u32 reg; + + reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS); + + return (reg >> wm) & 0x1; +} + +static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable) +{ + if (enable) + writel_relaxed(0x10000009, vfe->base + VFE_0_BUS_CFG); + else + writel_relaxed(0, vfe->base + VFE_0_BUS_CFG); +} + +static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm, + enum vfe_line_id id) +{ + u32 reg; + + reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS; + reg |= VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id); + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg); + + reg = VFE_0_RDI_CFG_x_RDI_EN_BIT; + reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) & + VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK; + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg); + + switch (id) { + case VFE_LINE_RDI0: + default: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI1: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI2: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + } + + if (wm % 2 == 1) + reg <<= 16; + + vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg); +} + +static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm) +{ + writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF, + vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm)); +} + +static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm, + enum vfe_line_id id) +{ + u32 reg; + + reg = VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id); + vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(0), reg); + + reg = VFE_0_RDI_CFG_x_RDI_EN_BIT; + vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg); + + switch (id) { + case VFE_LINE_RDI0: + default: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI1: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI2: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + } + + if (wm % 2 == 1) + reg <<= 16; + + vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg); +} + +static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output, + u8 enable) +{ + struct vfe_line *line = container_of(output, struct vfe_line, output); + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 reg; + unsigned int i; + + for (i = 0; i < output->wm_num; i++) { + if (i == 0) { + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + } else if (i == 1) { + reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN; + if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16) + reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA; + } else { + /* On current devices output->wm_num is always <= 2 */ + break; + } + + if (output->wm_idx[i] % 2 == 1) + reg <<= 16; + + if (enable) + vfe_reg_set(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]), + reg); + else + vfe_reg_clr(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]), + reg); + } +} + +static void vfe_set_realign_cfg(struct vfe_device *vfe, struct vfe_line *line, + u8 enable) +{ + /* empty */ +} +static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid) +{ + vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), + VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK); + + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), + cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT); +} + +static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id); + wmb(); + writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE); + wmb(); +} + +static inline void vfe_reg_update_clear(struct vfe_device *vfe, + enum vfe_line_id line_id) +{ + vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id); +} + +static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm, + enum vfe_line_id line_id, u8 enable) +{ + u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) | + VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id); + u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) | + VFE_0_IRQ_MASK_1_RDIn_SOF(line_id); + + if (enable) { + vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); + } else { + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1); + } +} + +static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp, + enum vfe_line_id line_id, u8 enable) +{ + struct vfe_output *output = &vfe->line[line_id].output; + unsigned int i; + u32 irq_en0; + u32 irq_en1; + u32 comp_mask = 0; + + irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF; + irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF; + irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp); + irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id); + irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR; + for (i = 0; i < output->wm_num; i++) { + irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW( + output->wm_idx[i]); + comp_mask |= (1 << output->wm_idx[i]) << comp * 8; + } + + if (enable) { + vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); + vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask); + } else { + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1); + vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask); + } +} + +static void vfe_enable_irq_common(struct vfe_device *vfe) +{ + u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK; + u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION | + VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK; + + vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); +} + +static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 val, even_cfg, odd_cfg; + + writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG); + + val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD; + writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0); + + val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2; + writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1); + + switch (line->fmt[MSM_VFE_PAD_SINK].code) { + case MEDIA_BUS_FMT_YUYV8_2X8: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV; + break; + case MEDIA_BUS_FMT_YVYU8_2X8: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU; + break; + case MEDIA_BUS_FMT_UYVY8_2X8: + default: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY; + break; + case MEDIA_BUS_FMT_VYUY8_2X8: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY; + break; + } + + writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG); + writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG); +} + +static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 reg; + u16 input, output; + u8 interp_reso; + u32 phase_mult; + + writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG); + + input = line->fmt[MSM_VFE_PAD_SINK].width; + output = line->compose.width; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (13 + interp_reso)) / output; + reg = (interp_reso << 20) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE); + + input = line->fmt[MSM_VFE_PAD_SINK].height; + output = line->compose.height; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (13 + interp_reso)) / output; + reg = (interp_reso << 20) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE); + + writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG); + + input = line->fmt[MSM_VFE_PAD_SINK].width; + output = line->compose.width / 2; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (13 + interp_reso)) / output; + reg = (interp_reso << 20) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE); + + input = line->fmt[MSM_VFE_PAD_SINK].height; + output = line->compose.height; + if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) + output = line->compose.height / 2; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (13 + interp_reso)) / output; + reg = (interp_reso << 20) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE); +} + +static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 reg; + u16 first, last; + + first = line->crop.left; + last = line->crop.left + line->crop.width - 1; + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH); + + first = line->crop.top; + last = line->crop.top + line->crop.height - 1; + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT); + + first = line->crop.left / 2; + last = line->crop.left / 2 + line->crop.width / 2 - 1; + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH); + + first = line->crop.top; + last = line->crop.top + line->crop.height - 1; + if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) { + first = line->crop.top / 2; + last = line->crop.top / 2 + line->crop.height / 2 - 1; + } + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT); +} + +static void vfe_set_clamp_cfg(struct vfe_device *vfe) +{ + u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 | + VFE_0_CLAMP_ENC_MAX_CFG_CH1 | + VFE_0_CLAMP_ENC_MAX_CFG_CH2; + + writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG); + + val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 | + VFE_0_CLAMP_ENC_MIN_CFG_CH1 | + VFE_0_CLAMP_ENC_MIN_CFG_CH2; + + writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG); +} + +static void vfe_set_qos(struct vfe_device *vfe) +{ + u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG; + u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG; + + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6); + writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7); +} + +static void vfe_set_ds(struct vfe_device *vfe) +{ + /* empty */ +} + +static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable) +{ + u32 val = VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(wm); + + if (enable) + vfe_reg_set(vfe, VFE_0_CGC_OVERRIDE_1, val); + else + vfe_reg_clr(vfe, VFE_0_CGC_OVERRIDE_1, val); + + wmb(); +} + +static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 val; + + switch (line->fmt[MSM_VFE_PAD_SINK].code) { + case MEDIA_BUS_FMT_YUYV8_2X8: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR; + break; + case MEDIA_BUS_FMT_YVYU8_2X8: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB; + break; + case MEDIA_BUS_FMT_UYVY8_2X8: + default: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY; + break; + case MEDIA_BUS_FMT_VYUY8_2X8: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY; + break; + } + + writel_relaxed(val, vfe->base + VFE_0_CORE_CFG); + + val = line->fmt[MSM_VFE_PAD_SINK].width * 2; + val |= line->fmt[MSM_VFE_PAD_SINK].height << 16; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG); + + val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG); + + val = line->fmt[MSM_VFE_PAD_SINK].height - 1; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG); + + val = 0xffffffff; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG_0); + + val = 0xffffffff; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN); + + val = VFE_0_RDI_CFG_x_MIPI_EN_BITS; + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val); + + val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG); +} + +static void vfe_set_camif_cmd(struct vfe_device *vfe, u8 enable) +{ + u32 cmd; + + cmd = VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS | VFE_0_CAMIF_CMD_NO_CHANGE; + writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD); + wmb(); + + if (enable) + cmd = VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY; + else + cmd = VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY; + + writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD); +} + +static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable) +{ + u32 val = VFE_0_MODULE_CFG_DEMUX | + VFE_0_MODULE_CFG_CHROMA_UPSAMPLE | + VFE_0_MODULE_CFG_SCALE_ENC | + VFE_0_MODULE_CFG_CROP_ENC; + + if (enable) + writel_relaxed(val, vfe->base + VFE_0_MODULE_CFG); + else + writel_relaxed(0x0, vfe->base + VFE_0_MODULE_CFG); +} + +static int vfe_camif_wait_for_stop(struct vfe_device *vfe, struct device *dev) +{ + u32 val; + int ret; + + ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS, + val, + (val & VFE_0_CAMIF_STATUS_HALT), + CAMIF_TIMEOUT_SLEEP_US, + CAMIF_TIMEOUT_ALL_US); + if (ret < 0) + dev_err(dev, "%s: camif stop timeout\n", __func__); + + return ret; +} + +static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1) +{ + *value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0); + *value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1); + + writel_relaxed(*value0, vfe->base + VFE_0_IRQ_CLEAR_0); + writel_relaxed(*value1, vfe->base + VFE_0_IRQ_CLEAR_1); + + wmb(); + writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD); +} + +static void vfe_violation_read(struct vfe_device *vfe) +{ + u32 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS); + + pr_err_ratelimited("VFE: violation = 0x%08x\n", violation); +} + +/* + * vfe_isr - VFE module interrupt handler + * @irq: Interrupt line + * @dev: VFE device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t vfe_isr(int irq, void *dev) +{ + struct vfe_device *vfe = dev; + u32 value0, value1; + int i, j; + + vfe->ops->isr_read(vfe, &value0, &value1); + + dev_dbg(vfe->camss->dev, "VFE: status0 = 0x%08x, status1 = 0x%08x\n", + value0, value1); + + if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK) + vfe->isr_ops.reset_ack(vfe); + + if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION) + vfe->ops->violation_read(vfe); + + if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK) + vfe->isr_ops.halt_ack(vfe); + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) + if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i)) + vfe->isr_ops.reg_update(vfe, i); + + if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF) + vfe->isr_ops.sof(vfe, VFE_LINE_PIX); + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++) + if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i)) + vfe->isr_ops.sof(vfe, i); + + for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++) + if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) { + vfe->isr_ops.comp_done(vfe, i); + for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++) + if (vfe->wm_output_map[j] == VFE_LINE_PIX) + value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j); + } + + for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++) + if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i)) + vfe->isr_ops.wm_done(vfe, i); + + return IRQ_HANDLED; +} + +/* + * vfe_pm_domain_off - Disable power domains specific to this VFE. + * @vfe: VFE Device + */ +static void vfe_pm_domain_off(struct vfe_device *vfe) +{ + /* nop */ +} + +/* + * vfe_pm_domain_on - Enable power domains specific to this VFE. + * @vfe: VFE Device + */ +static int vfe_pm_domain_on(struct vfe_device *vfe) +{ + return 0; +} + +static const struct vfe_hw_ops_gen1 vfe_ops_gen1_4_1 = { + .bus_connect_wm_to_rdi = vfe_bus_connect_wm_to_rdi, + .bus_disconnect_wm_from_rdi = vfe_bus_disconnect_wm_from_rdi, + .bus_enable_wr_if = vfe_bus_enable_wr_if, + .bus_reload_wm = vfe_bus_reload_wm, + .camif_wait_for_stop = vfe_camif_wait_for_stop, + .enable_irq_common = vfe_enable_irq_common, + .enable_irq_pix_line = vfe_enable_irq_pix_line, + .enable_irq_wm_line = vfe_enable_irq_wm_line, + .get_ub_size = vfe_get_ub_size, + .halt_clear = vfe_halt_clear, + .halt_request = vfe_halt_request, + .set_camif_cfg = vfe_set_camif_cfg, + .set_camif_cmd = vfe_set_camif_cmd, + .set_cgc_override = vfe_set_cgc_override, + .set_clamp_cfg = vfe_set_clamp_cfg, + .set_crop_cfg = vfe_set_crop_cfg, + .set_demux_cfg = vfe_set_demux_cfg, + .set_ds = vfe_set_ds, + .set_module_cfg = vfe_set_module_cfg, + .set_qos = vfe_set_qos, + .set_rdi_cid = vfe_set_rdi_cid, + .set_realign_cfg = vfe_set_realign_cfg, + .set_scale_cfg = vfe_set_scale_cfg, + .set_xbar_cfg = vfe_set_xbar_cfg, + .wm_enable = vfe_wm_enable, + .wm_frame_based = vfe_wm_frame_based, + .wm_get_ping_pong_status = vfe_wm_get_ping_pong_status, + .wm_line_based = vfe_wm_line_based, + .wm_set_framedrop_pattern = vfe_wm_set_framedrop_pattern, + .wm_set_framedrop_period = vfe_wm_set_framedrop_period, + .wm_set_ping_addr = vfe_wm_set_ping_addr, + .wm_set_pong_addr = vfe_wm_set_pong_addr, + .wm_set_subsample = vfe_wm_set_subsample, + .wm_set_ub_cfg = vfe_wm_set_ub_cfg, +}; + +static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe) +{ + vfe->isr_ops = vfe_isr_ops_gen1; + vfe->ops_gen1 = &vfe_ops_gen1_4_1; + vfe->video_ops = vfe_video_ops_gen1; + + vfe->line_num = VFE_LINE_NUM_GEN1; +} + +const struct vfe_hw_ops vfe_ops_4_1 = { + .global_reset = vfe_global_reset, + .hw_version = vfe_hw_version, + .isr_read = vfe_isr_read, + .isr = vfe_isr, + .pm_domain_off = vfe_pm_domain_off, + .pm_domain_on = vfe_pm_domain_on, + .reg_update_clear = vfe_reg_update_clear, + .reg_update = vfe_reg_update, + .subdev_init = vfe_subdev_init, + .vfe_disable = vfe_gen1_disable, + .vfe_enable = vfe_gen1_enable, + .vfe_halt = vfe_gen1_halt, + .violation_read = vfe_violation_read, +}; diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c new file mode 100644 index 0000000000..ab2d57bdf5 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c @@ -0,0 +1,1209 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-vfe-4-7.c + * + * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v4.7 + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2018 Linaro Ltd. + */ + +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> + +#include "camss.h" +#include "camss-vfe.h" +#include "camss-vfe-gen1.h" + + +#define VFE_0_HW_VERSION 0x000 + +#define VFE_0_GLOBAL_RESET_CMD 0x018 +#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0) +#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1) +#define VFE_0_GLOBAL_RESET_CMD_BUS BIT(2) +#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG BIT(3) +#define VFE_0_GLOBAL_RESET_CMD_REGISTER BIT(4) +#define VFE_0_GLOBAL_RESET_CMD_PM BIT(5) +#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR BIT(6) +#define VFE_0_GLOBAL_RESET_CMD_TESTGEN BIT(7) +#define VFE_0_GLOBAL_RESET_CMD_DSP BIT(8) +#define VFE_0_GLOBAL_RESET_CMD_IDLE_CGC BIT(9) + +#define VFE_0_MODULE_LENS_EN 0x040 +#define VFE_0_MODULE_LENS_EN_DEMUX BIT(2) +#define VFE_0_MODULE_LENS_EN_CHROMA_UPSAMPLE BIT(3) + +#define VFE_0_MODULE_ZOOM_EN 0x04c +#define VFE_0_MODULE_ZOOM_EN_SCALE_ENC BIT(1) +#define VFE_0_MODULE_ZOOM_EN_CROP_ENC BIT(2) +#define VFE_0_MODULE_ZOOM_EN_REALIGN_BUF BIT(9) + +#define VFE_0_CORE_CFG 0x050 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7 +#define VFE_0_CORE_CFG_COMPOSITE_REG_UPDATE_EN BIT(4) + +#define VFE_0_IRQ_CMD 0x058 +#define VFE_0_IRQ_CMD_GLOBAL_CLEAR BIT(0) + +#define VFE_0_IRQ_MASK_0 0x05c +#define VFE_0_IRQ_MASK_0_CAMIF_SOF BIT(0) +#define VFE_0_IRQ_MASK_0_CAMIF_EOF BIT(1) +#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) BIT((n) + 5) +#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \ + ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n)) +#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8) +#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25) +#define VFE_0_IRQ_MASK_0_RESET_ACK BIT(31) +#define VFE_0_IRQ_MASK_1 0x060 +#define VFE_0_IRQ_MASK_1_CAMIF_ERROR BIT(0) +#define VFE_0_IRQ_MASK_1_VIOLATION BIT(7) +#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK BIT(8) +#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9) +#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) BIT((n) + 29) + +#define VFE_0_IRQ_CLEAR_0 0x064 +#define VFE_0_IRQ_CLEAR_1 0x068 + +#define VFE_0_IRQ_STATUS_0 0x06c +#define VFE_0_IRQ_STATUS_0_CAMIF_SOF BIT(0) +#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) BIT((n) + 5) +#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \ + ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n)) +#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8) +#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25) +#define VFE_0_IRQ_STATUS_0_RESET_ACK BIT(31) +#define VFE_0_IRQ_STATUS_1 0x070 +#define VFE_0_IRQ_STATUS_1_VIOLATION BIT(7) +#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK BIT(8) +#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) BIT((n) + 29) + +#define VFE_0_IRQ_COMPOSITE_MASK_0 0x074 +#define VFE_0_VIOLATION_STATUS 0x07c + +#define VFE_0_BUS_CMD 0x80 +#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) BIT(x) + +#define VFE_0_BUS_CFG 0x084 + +#define VFE_0_BUS_XBAR_CFG_x(x) (0x90 + 0x4 * ((x) / 2)) +#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN BIT(2) +#define VFE_0_BUS_XBAR_CFG_x_M_REALIGN_BUF_EN BIT(3) +#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTRA (0x1 << 4) +#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER (0x2 << 4) +#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4) +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0x0 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 0xc +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 0xd +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 0xe + +#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x0a0 + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x0a4 + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x0ac + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x0b4 + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT 1 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1f << 2) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x0b8 + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x0bc + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x0c0 + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \ + (0x0c4 + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \ + (0x0c8 + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff + +#define VFE_0_BUS_PING_PONG_STATUS 0x338 + +#define VFE_0_BUS_BDG_CMD 0x400 +#define VFE_0_BUS_BDG_CMD_HALT_REQ 1 + +#define VFE_0_BUS_BDG_QOS_CFG_0 0x404 +#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa9aaa9 +#define VFE_0_BUS_BDG_QOS_CFG_1 0x408 +#define VFE_0_BUS_BDG_QOS_CFG_2 0x40c +#define VFE_0_BUS_BDG_QOS_CFG_3 0x410 +#define VFE_0_BUS_BDG_QOS_CFG_4 0x414 +#define VFE_0_BUS_BDG_QOS_CFG_5 0x418 +#define VFE_0_BUS_BDG_QOS_CFG_6 0x41c +#define VFE_0_BUS_BDG_QOS_CFG_7 0x420 +#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa9 + +#define VFE48_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5 +#define VFE48_0_BUS_BDG_QOS_CFG_3_CFG 0xaa55aaa5 +#define VFE48_0_BUS_BDG_QOS_CFG_4_CFG 0xaa55aa55 +#define VFE48_0_BUS_BDG_QOS_CFG_7_CFG 0x0005aa55 + +#define VFE_0_BUS_BDG_DS_CFG_0 0x424 +#define VFE_0_BUS_BDG_DS_CFG_0_CFG 0xcccc0011 +#define VFE_0_BUS_BDG_DS_CFG_1 0x428 +#define VFE_0_BUS_BDG_DS_CFG_2 0x42c +#define VFE_0_BUS_BDG_DS_CFG_3 0x430 +#define VFE_0_BUS_BDG_DS_CFG_4 0x434 +#define VFE_0_BUS_BDG_DS_CFG_5 0x438 +#define VFE_0_BUS_BDG_DS_CFG_6 0x43c +#define VFE_0_BUS_BDG_DS_CFG_7 0x440 +#define VFE_0_BUS_BDG_DS_CFG_8 0x444 +#define VFE_0_BUS_BDG_DS_CFG_9 0x448 +#define VFE_0_BUS_BDG_DS_CFG_10 0x44c +#define VFE_0_BUS_BDG_DS_CFG_11 0x450 +#define VFE_0_BUS_BDG_DS_CFG_12 0x454 +#define VFE_0_BUS_BDG_DS_CFG_13 0x458 +#define VFE_0_BUS_BDG_DS_CFG_14 0x45c +#define VFE_0_BUS_BDG_DS_CFG_15 0x460 +#define VFE_0_BUS_BDG_DS_CFG_16 0x464 +#define VFE_0_BUS_BDG_DS_CFG_16_CFG 0x40000103 + +#define VFE48_0_BUS_BDG_DS_CFG_0_CFG 0xcccc1111 +#define VFE48_0_BUS_BDG_DS_CFG_16_CFG 0x00000110 + +#define VFE_0_RDI_CFG_x(x) (0x46c + (0x4 * (x))) +#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28 +#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28) +#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4 +#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4) +#define VFE_0_RDI_CFG_x_RDI_EN_BIT BIT(2) +#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3 + +#define VFE_0_CAMIF_CMD 0x478 +#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0 +#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1 +#define VFE_0_CAMIF_CMD_NO_CHANGE 3 +#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS BIT(2) +#define VFE_0_CAMIF_CFG 0x47c +#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN BIT(6) +#define VFE_0_CAMIF_FRAME_CFG 0x484 +#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x488 +#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x48c +#define VFE_0_CAMIF_SUBSAMPLE_CFG 0x490 +#define VFE_0_CAMIF_IRQ_FRAMEDROP_PATTERN 0x498 +#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x49c +#define VFE_0_CAMIF_STATUS 0x4a4 +#define VFE_0_CAMIF_STATUS_HALT BIT(31) + +#define VFE_0_REG_UPDATE 0x4ac +#define VFE_0_REG_UPDATE_RDIn(n) BIT(1 + (n)) +#define VFE_0_REG_UPDATE_line_n(n) \ + ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n)) + +#define VFE_0_DEMUX_CFG 0x560 +#define VFE_0_DEMUX_CFG_PERIOD 0x3 +#define VFE_0_DEMUX_GAIN_0 0x564 +#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0) +#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16) +#define VFE_0_DEMUX_GAIN_1 0x568 +#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0) +#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16) +#define VFE_0_DEMUX_EVEN_CFG 0x574 +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9 +#define VFE_0_DEMUX_ODD_CFG 0x578 +#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac +#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c +#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca +#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9 + +#define VFE_0_SCALE_ENC_Y_CFG 0x91c +#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x920 +#define VFE_0_SCALE_ENC_Y_H_PHASE 0x924 +#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x934 +#define VFE_0_SCALE_ENC_Y_V_PHASE 0x938 +#define VFE_0_SCALE_ENC_CBCR_CFG 0x948 +#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x94c +#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x950 +#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x960 +#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x964 + +#define VFE_0_CROP_ENC_Y_WIDTH 0x974 +#define VFE_0_CROP_ENC_Y_HEIGHT 0x978 +#define VFE_0_CROP_ENC_CBCR_WIDTH 0x97c +#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x980 + +#define VFE_0_CLAMP_ENC_MAX_CFG 0x984 +#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0) +#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8) +#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16) +#define VFE_0_CLAMP_ENC_MIN_CFG 0x988 +#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0) +#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8) +#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16) + +#define VFE_0_REALIGN_BUF_CFG 0xaac +#define VFE_0_REALIGN_BUF_CFG_CB_ODD_PIXEL BIT(2) +#define VFE_0_REALIGN_BUF_CFG_CR_ODD_PIXEL BIT(3) +#define VFE_0_REALIGN_BUF_CFG_HSUB_ENABLE BIT(4) + +#define VFE48_0_BUS_IMAGE_MASTER_CMD 0xcec +#define VFE48_0_BUS_IMAGE_MASTER_n_SHIFT(x) (2 * (x)) + +#define CAMIF_TIMEOUT_SLEEP_US 1000 +#define CAMIF_TIMEOUT_ALL_US 1000000 + +#define MSM_VFE_VFE0_UB_SIZE 2047 +#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3) +#define MSM_VFE_VFE1_UB_SIZE 1535 +#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3) + +static u32 vfe_hw_version(struct vfe_device *vfe) +{ + u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION); + + dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version); + + return hw_version; +} + +static u16 vfe_get_ub_size(u8 vfe_id) +{ + if (vfe_id == 0) + return MSM_VFE_VFE0_UB_SIZE_RDI; + else if (vfe_id == 1) + return MSM_VFE_VFE1_UB_SIZE_RDI; + + return 0; +} + +static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits) +{ + u32 bits = readl_relaxed(vfe->base + reg); + + writel_relaxed(bits & ~clr_bits, vfe->base + reg); +} + +static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits) +{ + u32 bits = readl_relaxed(vfe->base + reg); + + writel_relaxed(bits | set_bits, vfe->base + reg); +} + +static void vfe_global_reset(struct vfe_device *vfe) +{ + u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_IDLE_CGC | + VFE_0_GLOBAL_RESET_CMD_DSP | + VFE_0_GLOBAL_RESET_CMD_TESTGEN | + VFE_0_GLOBAL_RESET_CMD_BUS_MISR | + VFE_0_GLOBAL_RESET_CMD_PM | + VFE_0_GLOBAL_RESET_CMD_REGISTER | + VFE_0_GLOBAL_RESET_CMD_BUS_BDG | + VFE_0_GLOBAL_RESET_CMD_BUS | + VFE_0_GLOBAL_RESET_CMD_CAMIF | + VFE_0_GLOBAL_RESET_CMD_CORE; + + writel_relaxed(BIT(31), vfe->base + VFE_0_IRQ_MASK_0); + + /* Enforce barrier between IRQ mask setup and global reset */ + wmb(); + writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD); +} + +static void vfe_halt_request(struct vfe_device *vfe) +{ + writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ, + vfe->base + VFE_0_BUS_BDG_CMD); +} + +static void vfe_halt_clear(struct vfe_device *vfe) +{ + writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD); +} + +static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable) +{ + if (enable) + vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT); + else + vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT); +} + +static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable) +{ + if (enable) + vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT); + else + vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT); +} + +#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N)) + +static int vfe_word_per_line_by_pixel(u32 format, u32 pixel_per_line) +{ + int val = 0; + + switch (format) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + val = CALC_WORD(pixel_per_line, 1, 8); + break; + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_YVYU: + case V4L2_PIX_FMT_UYVY: + case V4L2_PIX_FMT_VYUY: + val = CALC_WORD(pixel_per_line, 2, 8); + break; + } + + return val; +} + +static int vfe_word_per_line_by_bytes(u32 bytes_per_line) +{ + return CALC_WORD(bytes_per_line, 1, 8); +} + +static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane, + u16 *width, u16 *height, u16 *bytesperline) +{ + *width = pix->width; + *height = pix->height; + + switch (pix->pixelformat) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + *bytesperline = pix->plane_fmt[0].bytesperline; + if (plane == 1) + *height /= 2; + break; + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + *bytesperline = pix->plane_fmt[0].bytesperline; + break; + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_YVYU: + case V4L2_PIX_FMT_VYUY: + case V4L2_PIX_FMT_UYVY: + *bytesperline = pix->plane_fmt[plane].bytesperline; + break; + } +} + +static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm, + struct v4l2_pix_format_mplane *pix, + u8 plane, u32 enable) +{ + u32 reg; + + if (enable) { + u16 width = 0, height = 0, bytesperline = 0, wpl; + + vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline); + + wpl = vfe_word_per_line_by_pixel(pix->pixelformat, width); + + reg = height - 1; + reg |= ((wpl + 3) / 4 - 1) << 16; + + writel_relaxed(reg, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm)); + + wpl = vfe_word_per_line_by_bytes(bytesperline); + + reg = 0x3; + reg |= (height - 1) << 2; + reg |= ((wpl + 1) / 2) << 16; + + writel_relaxed(reg, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm)); + } else { + writel_relaxed(0, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm)); + writel_relaxed(0, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm)); + } +} + +static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per) +{ + u32 reg; + + reg = readl_relaxed(vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm)); + + reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK); + + reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT) + & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK; + + writel_relaxed(reg, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm)); +} + +static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm, + u32 pattern) +{ + writel_relaxed(pattern, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm)); +} + +static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm, + u16 offset, u16 depth) +{ + u32 reg; + + reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) | + depth; + writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm)); +} + +static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm) +{ + /* Enforce barrier between any outstanding register write */ + wmb(); + + writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD); + + /* Use barrier to make sure bus reload is issued before anything else */ + wmb(); +} + +static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr) +{ + writel_relaxed(addr, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm)); +} + +static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr) +{ + writel_relaxed(addr, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm)); +} + +static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm) +{ + u32 reg; + + reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS); + + return (reg >> wm) & 0x1; +} + +static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable) +{ + if (enable) + writel_relaxed(0x101, vfe->base + VFE_0_BUS_CFG); + else + writel_relaxed(0, vfe->base + VFE_0_BUS_CFG); +} + +static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm, + enum vfe_line_id id) +{ + u32 reg; + + reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS; + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg); + + reg = VFE_0_RDI_CFG_x_RDI_EN_BIT; + reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) & + VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK; + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg); + + switch (id) { + case VFE_LINE_RDI0: + default: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI1: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI2: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + } + + if (wm % 2 == 1) + reg <<= 16; + + vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg); +} + +static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm) +{ + writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF, + vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm)); +} + +static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm, + enum vfe_line_id id) +{ + u32 reg; + + reg = VFE_0_RDI_CFG_x_RDI_EN_BIT; + vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg); + + switch (id) { + case VFE_LINE_RDI0: + default: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI1: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI2: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + } + + if (wm % 2 == 1) + reg <<= 16; + + vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg); +} + +static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output, + u8 enable) +{ + struct vfe_line *line = container_of(output, struct vfe_line, output); + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 reg; + + switch (p) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + + if (output->wm_idx[0] % 2 == 1) + reg <<= 16; + + if (enable) + vfe_reg_set(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]), + reg); + else + vfe_reg_clr(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]), + reg); + + reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN; + if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16) + reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA; + + if (output->wm_idx[1] % 2 == 1) + reg <<= 16; + + if (enable) + vfe_reg_set(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[1]), + reg); + else + vfe_reg_clr(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[1]), + reg); + break; + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_YVYU: + case V4L2_PIX_FMT_VYUY: + case V4L2_PIX_FMT_UYVY: + reg = VFE_0_BUS_XBAR_CFG_x_M_REALIGN_BUF_EN; + reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN; + + if (p == V4L2_PIX_FMT_YUYV || p == V4L2_PIX_FMT_YVYU) + reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA; + + if (output->wm_idx[0] % 2 == 1) + reg <<= 16; + + if (enable) + vfe_reg_set(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]), + reg); + else + vfe_reg_clr(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]), + reg); + break; + default: + break; + } +} + +static void vfe_set_realign_cfg(struct vfe_device *vfe, struct vfe_line *line, + u8 enable) +{ + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 val = VFE_0_MODULE_ZOOM_EN_REALIGN_BUF; + + if (p != V4L2_PIX_FMT_YUYV && p != V4L2_PIX_FMT_YVYU && + p != V4L2_PIX_FMT_VYUY && p != V4L2_PIX_FMT_UYVY) + return; + + if (enable) { + vfe_reg_set(vfe, VFE_0_MODULE_ZOOM_EN, val); + } else { + vfe_reg_clr(vfe, VFE_0_MODULE_ZOOM_EN, val); + return; + } + + val = VFE_0_REALIGN_BUF_CFG_HSUB_ENABLE; + + if (p == V4L2_PIX_FMT_UYVY || p == V4L2_PIX_FMT_YUYV) + val |= VFE_0_REALIGN_BUF_CFG_CR_ODD_PIXEL; + else + val |= VFE_0_REALIGN_BUF_CFG_CB_ODD_PIXEL; + + writel_relaxed(val, vfe->base + VFE_0_REALIGN_BUF_CFG); +} + +static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid) +{ + vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), + VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK); + + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), + cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT); +} + +static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id); + + /* Enforce barrier between line update and commit */ + wmb(); + writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE); + + /* Make sure register update is issued before further reg writes */ + wmb(); +} + +static inline void vfe_reg_update_clear(struct vfe_device *vfe, + enum vfe_line_id line_id) +{ + vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id); +} + +static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm, + enum vfe_line_id line_id, u8 enable) +{ + u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) | + VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id); + u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) | + VFE_0_IRQ_MASK_1_RDIn_SOF(line_id); + + if (enable) { + vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); + } else { + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1); + } +} + +static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp, + enum vfe_line_id line_id, u8 enable) +{ + struct vfe_output *output = &vfe->line[line_id].output; + unsigned int i; + u32 irq_en0; + u32 irq_en1; + u32 comp_mask = 0; + + irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF; + irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF; + irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp); + irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id); + irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR; + for (i = 0; i < output->wm_num; i++) { + irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW( + output->wm_idx[i]); + comp_mask |= (1 << output->wm_idx[i]) << comp * 8; + } + + if (enable) { + vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); + vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask); + } else { + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1); + vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask); + } +} + +static void vfe_enable_irq_common(struct vfe_device *vfe) +{ + u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK; + u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION | + VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK; + + vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); +} + +static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 val, even_cfg, odd_cfg; + + writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG); + + val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD; + writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0); + + val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2; + writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1); + + switch (line->fmt[MSM_VFE_PAD_SINK].code) { + case MEDIA_BUS_FMT_YUYV8_2X8: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV; + break; + case MEDIA_BUS_FMT_YVYU8_2X8: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU; + break; + case MEDIA_BUS_FMT_UYVY8_2X8: + default: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY; + break; + case MEDIA_BUS_FMT_VYUY8_2X8: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY; + break; + } + + writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG); + writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG); +} + +static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 reg; + u16 input, output; + u8 interp_reso; + u32 phase_mult; + + writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG); + + input = line->fmt[MSM_VFE_PAD_SINK].width - 1; + output = line->compose.width - 1; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (14 + interp_reso)) / output; + reg = (interp_reso << 28) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE); + + input = line->fmt[MSM_VFE_PAD_SINK].height - 1; + output = line->compose.height - 1; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (14 + interp_reso)) / output; + reg = (interp_reso << 28) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE); + + writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG); + + input = line->fmt[MSM_VFE_PAD_SINK].width - 1; + output = line->compose.width / 2 - 1; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (14 + interp_reso)) / output; + reg = (interp_reso << 28) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE); + + input = line->fmt[MSM_VFE_PAD_SINK].height - 1; + output = line->compose.height - 1; + if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) + output = line->compose.height / 2 - 1; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (14 + interp_reso)) / output; + reg = (interp_reso << 28) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE); +} + +static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 reg; + u16 first, last; + + first = line->crop.left; + last = line->crop.left + line->crop.width - 1; + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH); + + first = line->crop.top; + last = line->crop.top + line->crop.height - 1; + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT); + + first = line->crop.left / 2; + last = line->crop.left / 2 + line->crop.width / 2 - 1; + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH); + + first = line->crop.top; + last = line->crop.top + line->crop.height - 1; + if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) { + first = line->crop.top / 2; + last = line->crop.top / 2 + line->crop.height / 2 - 1; + } + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT); +} + +static void vfe_set_clamp_cfg(struct vfe_device *vfe) +{ + u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 | + VFE_0_CLAMP_ENC_MAX_CFG_CH1 | + VFE_0_CLAMP_ENC_MAX_CFG_CH2; + + writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG); + + val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 | + VFE_0_CLAMP_ENC_MIN_CFG_CH1 | + VFE_0_CLAMP_ENC_MIN_CFG_CH2; + + writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG); +} + +static void vfe_set_qos(struct vfe_device *vfe) +{ + u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG; + u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG; + + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6); + writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7); +} + +static void vfe_set_ds(struct vfe_device *vfe) +{ + u32 val = VFE_0_BUS_BDG_DS_CFG_0_CFG; + u32 val16 = VFE_0_BUS_BDG_DS_CFG_16_CFG; + + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_0); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_1); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_2); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_3); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_4); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_5); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_6); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_7); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_8); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_9); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_10); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_11); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_12); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_13); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_14); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_15); + writel_relaxed(val16, vfe->base + VFE_0_BUS_BDG_DS_CFG_16); +} + +static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable) +{ + /* empty */ +} + +static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 val; + + switch (line->fmt[MSM_VFE_PAD_SINK].code) { + case MEDIA_BUS_FMT_YUYV8_2X8: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR; + break; + case MEDIA_BUS_FMT_YVYU8_2X8: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB; + break; + case MEDIA_BUS_FMT_UYVY8_2X8: + default: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY; + break; + case MEDIA_BUS_FMT_VYUY8_2X8: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY; + break; + } + + val |= VFE_0_CORE_CFG_COMPOSITE_REG_UPDATE_EN; + writel_relaxed(val, vfe->base + VFE_0_CORE_CFG); + + val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1; + val |= (line->fmt[MSM_VFE_PAD_SINK].height - 1) << 16; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG); + + val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG); + + val = line->fmt[MSM_VFE_PAD_SINK].height - 1; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG); + + val = 0xffffffff; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG); + + val = 0xffffffff; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_FRAMEDROP_PATTERN); + + val = 0xffffffff; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN); + + val = VFE_0_RDI_CFG_x_MIPI_EN_BITS; + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val); + + val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG); +} + +static void vfe_set_camif_cmd(struct vfe_device *vfe, u8 enable) +{ + u32 cmd; + + cmd = VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS | VFE_0_CAMIF_CMD_NO_CHANGE; + writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD); + + /* Make sure camif command is issued written before it is changed again */ + wmb(); + + if (enable) + cmd = VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY; + else + cmd = VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY; + + writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD); +} + +static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable) +{ + u32 val_lens = VFE_0_MODULE_LENS_EN_DEMUX | + VFE_0_MODULE_LENS_EN_CHROMA_UPSAMPLE; + u32 val_zoom = VFE_0_MODULE_ZOOM_EN_SCALE_ENC | + VFE_0_MODULE_ZOOM_EN_CROP_ENC; + + if (enable) { + vfe_reg_set(vfe, VFE_0_MODULE_LENS_EN, val_lens); + vfe_reg_set(vfe, VFE_0_MODULE_ZOOM_EN, val_zoom); + } else { + vfe_reg_clr(vfe, VFE_0_MODULE_LENS_EN, val_lens); + vfe_reg_clr(vfe, VFE_0_MODULE_ZOOM_EN, val_zoom); + } +} + +static int vfe_camif_wait_for_stop(struct vfe_device *vfe, struct device *dev) +{ + u32 val; + int ret; + + ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS, + val, + (val & VFE_0_CAMIF_STATUS_HALT), + CAMIF_TIMEOUT_SLEEP_US, + CAMIF_TIMEOUT_ALL_US); + if (ret < 0) + dev_err(dev, "%s: camif stop timeout\n", __func__); + + return ret; +} + + + +/* + * vfe_isr - VFE module interrupt handler + * @irq: Interrupt line + * @dev: VFE device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t vfe_isr(int irq, void *dev) +{ + struct vfe_device *vfe = dev; + u32 value0, value1; + int i, j; + + vfe->ops->isr_read(vfe, &value0, &value1); + + dev_dbg(vfe->camss->dev, "VFE: status0 = 0x%08x, status1 = 0x%08x\n", + value0, value1); + + if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK) + vfe->isr_ops.reset_ack(vfe); + + if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION) + vfe->ops->violation_read(vfe); + + if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK) + vfe->isr_ops.halt_ack(vfe); + + for (i = VFE_LINE_RDI0; i < vfe->line_num; i++) + if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i)) + vfe->isr_ops.reg_update(vfe, i); + + if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF) + vfe->isr_ops.sof(vfe, VFE_LINE_PIX); + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++) + if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i)) + vfe->isr_ops.sof(vfe, i); + + for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++) + if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) { + vfe->isr_ops.comp_done(vfe, i); + for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++) + if (vfe->wm_output_map[j] == VFE_LINE_PIX) + value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j); + } + + for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++) + if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i)) + vfe->isr_ops.wm_done(vfe, i); + + return IRQ_HANDLED; +} + +static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1) +{ + *value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0); + *value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1); + + writel_relaxed(*value0, vfe->base + VFE_0_IRQ_CLEAR_0); + writel_relaxed(*value1, vfe->base + VFE_0_IRQ_CLEAR_1); + + /* Enforce barrier between local & global IRQ clear */ + wmb(); + writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD); +} + +/* + * vfe_pm_domain_off - Disable power domains specific to this VFE. + * @vfe: VFE Device + */ +static void vfe_pm_domain_off(struct vfe_device *vfe) +{ + struct camss *camss; + + if (!vfe) + return; + + camss = vfe->camss; + + device_link_del(camss->genpd_link[vfe->id]); +} + +/* + * vfe_pm_domain_on - Enable power domains specific to this VFE. + * @vfe: VFE Device + */ +static int vfe_pm_domain_on(struct vfe_device *vfe) +{ + struct camss *camss = vfe->camss; + enum vfe_line_id id = vfe->id; + + camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id], DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); + + if (!camss->genpd_link[id]) { + dev_err(vfe->camss->dev, "Failed to add VFE#%d to power domain\n", id); + return -EINVAL; + } + + return 0; +} + +static void vfe_violation_read(struct vfe_device *vfe) +{ + u32 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS); + + pr_err_ratelimited("VFE: violation = 0x%08x\n", violation); +} + +static const struct vfe_hw_ops_gen1 vfe_ops_gen1_4_7 = { + .bus_connect_wm_to_rdi = vfe_bus_connect_wm_to_rdi, + .bus_disconnect_wm_from_rdi = vfe_bus_disconnect_wm_from_rdi, + .bus_enable_wr_if = vfe_bus_enable_wr_if, + .bus_reload_wm = vfe_bus_reload_wm, + .camif_wait_for_stop = vfe_camif_wait_for_stop, + .enable_irq_common = vfe_enable_irq_common, + .enable_irq_pix_line = vfe_enable_irq_pix_line, + .enable_irq_wm_line = vfe_enable_irq_wm_line, + .get_ub_size = vfe_get_ub_size, + .halt_clear = vfe_halt_clear, + .halt_request = vfe_halt_request, + .set_camif_cfg = vfe_set_camif_cfg, + .set_camif_cmd = vfe_set_camif_cmd, + .set_cgc_override = vfe_set_cgc_override, + .set_clamp_cfg = vfe_set_clamp_cfg, + .set_crop_cfg = vfe_set_crop_cfg, + .set_demux_cfg = vfe_set_demux_cfg, + .set_ds = vfe_set_ds, + .set_module_cfg = vfe_set_module_cfg, + .set_qos = vfe_set_qos, + .set_rdi_cid = vfe_set_rdi_cid, + .set_realign_cfg = vfe_set_realign_cfg, + .set_scale_cfg = vfe_set_scale_cfg, + .set_xbar_cfg = vfe_set_xbar_cfg, + .wm_enable = vfe_wm_enable, + .wm_frame_based = vfe_wm_frame_based, + .wm_get_ping_pong_status = vfe_wm_get_ping_pong_status, + .wm_line_based = vfe_wm_line_based, + .wm_set_framedrop_pattern = vfe_wm_set_framedrop_pattern, + .wm_set_framedrop_period = vfe_wm_set_framedrop_period, + .wm_set_ping_addr = vfe_wm_set_ping_addr, + .wm_set_pong_addr = vfe_wm_set_pong_addr, + .wm_set_subsample = vfe_wm_set_subsample, + .wm_set_ub_cfg = vfe_wm_set_ub_cfg, +}; + +static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe) +{ + vfe->isr_ops = vfe_isr_ops_gen1; + vfe->ops_gen1 = &vfe_ops_gen1_4_7; + vfe->video_ops = vfe_video_ops_gen1; + + vfe->line_num = VFE_LINE_NUM_GEN1; +} + +const struct vfe_hw_ops vfe_ops_4_7 = { + .global_reset = vfe_global_reset, + .hw_version = vfe_hw_version, + .isr_read = vfe_isr_read, + .isr = vfe_isr, + .pm_domain_off = vfe_pm_domain_off, + .pm_domain_on = vfe_pm_domain_on, + .reg_update_clear = vfe_reg_update_clear, + .reg_update = vfe_reg_update, + .subdev_init = vfe_subdev_init, + .vfe_disable = vfe_gen1_disable, + .vfe_enable = vfe_gen1_enable, + .vfe_halt = vfe_gen1_halt, + .violation_read = vfe_violation_read, +}; diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-8.c b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c new file mode 100644 index 0000000000..7e6b62c930 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c @@ -0,0 +1,1194 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-vfe-4-8.c + * + * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v4.8 + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2021 Linaro Ltd. + */ + +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> + +#include "camss.h" +#include "camss-vfe.h" +#include "camss-vfe-gen1.h" + +#define VFE_0_HW_VERSION 0x000 + +#define VFE_0_GLOBAL_RESET_CMD 0x018 +#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0) +#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1) +#define VFE_0_GLOBAL_RESET_CMD_BUS BIT(2) +#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG BIT(3) +#define VFE_0_GLOBAL_RESET_CMD_REGISTER BIT(4) +#define VFE_0_GLOBAL_RESET_CMD_PM BIT(5) +#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR BIT(6) +#define VFE_0_GLOBAL_RESET_CMD_TESTGEN BIT(7) +#define VFE_0_GLOBAL_RESET_CMD_DSP BIT(8) +#define VFE_0_GLOBAL_RESET_CMD_IDLE_CGC BIT(9) + +#define VFE_0_MODULE_LENS_EN 0x040 +#define VFE_0_MODULE_LENS_EN_DEMUX BIT(2) +#define VFE_0_MODULE_LENS_EN_CHROMA_UPSAMPLE BIT(3) + +#define VFE_0_MODULE_ZOOM_EN 0x04c +#define VFE_0_MODULE_ZOOM_EN_SCALE_ENC BIT(1) +#define VFE_0_MODULE_ZOOM_EN_CROP_ENC BIT(2) +#define VFE_0_MODULE_ZOOM_EN_REALIGN_BUF BIT(9) + +#define VFE_0_CORE_CFG 0x050 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7 +#define VFE_0_CORE_CFG_COMPOSITE_REG_UPDATE_EN BIT(4) + +#define VFE_0_IRQ_CMD 0x058 +#define VFE_0_IRQ_CMD_GLOBAL_CLEAR BIT(0) + +#define VFE_0_IRQ_MASK_0 0x05c +#define VFE_0_IRQ_MASK_0_CAMIF_SOF BIT(0) +#define VFE_0_IRQ_MASK_0_CAMIF_EOF BIT(1) +#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) BIT((n) + 5) +#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \ + ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n)) +#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8) +#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25) +#define VFE_0_IRQ_MASK_0_RESET_ACK BIT(31) +#define VFE_0_IRQ_MASK_1 0x060 +#define VFE_0_IRQ_MASK_1_CAMIF_ERROR BIT(0) +#define VFE_0_IRQ_MASK_1_VIOLATION BIT(7) +#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK BIT(8) +#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9) +#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) BIT((n) + 29) + +#define VFE_0_IRQ_CLEAR_0 0x064 +#define VFE_0_IRQ_CLEAR_1 0x068 + +#define VFE_0_IRQ_STATUS_0 0x06c +#define VFE_0_IRQ_STATUS_0_CAMIF_SOF BIT(0) +#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) BIT((n) + 5) +#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \ + ((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n)) +#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8) +#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25) +#define VFE_0_IRQ_STATUS_0_RESET_ACK BIT(31) +#define VFE_0_IRQ_STATUS_1 0x070 +#define VFE_0_IRQ_STATUS_1_VIOLATION BIT(7) +#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK BIT(8) +#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) BIT((n) + 29) + +#define VFE_0_IRQ_COMPOSITE_MASK_0 0x074 +#define VFE_0_VIOLATION_STATUS 0x07c + +#define VFE_0_BUS_CMD 0x80 +#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) BIT(x) + +#define VFE_0_BUS_CFG 0x084 + +#define VFE_0_BUS_XBAR_CFG_x(x) (0x90 + 0x4 * ((x) / 2)) +#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN BIT(2) +#define VFE_0_BUS_XBAR_CFG_x_M_REALIGN_BUF_EN BIT(3) +#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTRA (0x1 << 4) +#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER (0x2 << 4) +#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4) +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0x0 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 0xc +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 0xd +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 0xe + +#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x0a0 + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x0a4 + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x0ac + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x0b4 + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT 1 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1f << 2) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x0b8 + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x0bc + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x0c0 + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \ + (0x0c4 + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \ + (0x0c8 + 0x2c * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff + +#define VFE_0_BUS_PING_PONG_STATUS 0x338 + +#define VFE_0_BUS_BDG_CMD 0x400 +#define VFE_0_BUS_BDG_CMD_HALT_REQ 1 + +#define VFE_0_BUS_BDG_QOS_CFG_0 0x404 +#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5 +#define VFE_0_BUS_BDG_QOS_CFG_1 0x408 +#define VFE_0_BUS_BDG_QOS_CFG_2 0x40c +#define VFE_0_BUS_BDG_QOS_CFG_3 0x410 +#define VFE_0_BUS_BDG_QOS_CFG_3_CFG 0xaa55aaa5 +#define VFE_0_BUS_BDG_QOS_CFG_4 0x414 +#define VFE_0_BUS_BDG_QOS_CFG_4_CFG 0xaa55aa55 +#define VFE_0_BUS_BDG_QOS_CFG_5 0x418 +#define VFE_0_BUS_BDG_QOS_CFG_6 0x41c +#define VFE_0_BUS_BDG_QOS_CFG_7 0x420 +#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0005aa55 + +#define VFE_0_BUS_BDG_DS_CFG_0 0x424 +#define VFE_0_BUS_BDG_DS_CFG_0_CFG 0xcccc1111 +#define VFE_0_BUS_BDG_DS_CFG_1 0x428 +#define VFE_0_BUS_BDG_DS_CFG_2 0x42c +#define VFE_0_BUS_BDG_DS_CFG_3 0x430 +#define VFE_0_BUS_BDG_DS_CFG_4 0x434 +#define VFE_0_BUS_BDG_DS_CFG_5 0x438 +#define VFE_0_BUS_BDG_DS_CFG_6 0x43c +#define VFE_0_BUS_BDG_DS_CFG_7 0x440 +#define VFE_0_BUS_BDG_DS_CFG_8 0x444 +#define VFE_0_BUS_BDG_DS_CFG_9 0x448 +#define VFE_0_BUS_BDG_DS_CFG_10 0x44c +#define VFE_0_BUS_BDG_DS_CFG_11 0x450 +#define VFE_0_BUS_BDG_DS_CFG_12 0x454 +#define VFE_0_BUS_BDG_DS_CFG_13 0x458 +#define VFE_0_BUS_BDG_DS_CFG_14 0x45c +#define VFE_0_BUS_BDG_DS_CFG_15 0x460 +#define VFE_0_BUS_BDG_DS_CFG_16 0x464 +#define VFE_0_BUS_BDG_DS_CFG_16_CFG 0x00000110 + +#define VFE_0_RDI_CFG_x(x) (0x46c + (0x4 * (x))) +#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28 +#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28) +#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4 +#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4) +#define VFE_0_RDI_CFG_x_RDI_EN_BIT BIT(2) +#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3 + +#define VFE_0_CAMIF_CMD 0x478 +#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0 +#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1 +#define VFE_0_CAMIF_CMD_NO_CHANGE 3 +#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS BIT(2) +#define VFE_0_CAMIF_CFG 0x47c +#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN BIT(6) +#define VFE_0_CAMIF_FRAME_CFG 0x484 +#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x488 +#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x48c +#define VFE_0_CAMIF_SUBSAMPLE_CFG 0x490 +#define VFE_0_CAMIF_IRQ_FRAMEDROP_PATTERN 0x498 +#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x49c +#define VFE_0_CAMIF_STATUS 0x4a4 +#define VFE_0_CAMIF_STATUS_HALT BIT(31) + +#define VFE_0_REG_UPDATE 0x4ac +#define VFE_0_REG_UPDATE_RDIn(n) BIT(1 + (n)) +#define VFE_0_REG_UPDATE_line_n(n) \ + ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n)) + +#define VFE_0_DEMUX_CFG 0x560 +#define VFE_0_DEMUX_CFG_PERIOD 0x3 +#define VFE_0_DEMUX_GAIN_0 0x564 +#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0) +#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16) +#define VFE_0_DEMUX_GAIN_1 0x568 +#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0) +#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16) +#define VFE_0_DEMUX_EVEN_CFG 0x574 +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9 +#define VFE_0_DEMUX_ODD_CFG 0x578 +#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac +#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c +#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca +#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9 + +#define VFE_0_SCALE_ENC_Y_CFG 0x91c +#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x920 +#define VFE_0_SCALE_ENC_Y_H_PHASE 0x924 +#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x934 +#define VFE_0_SCALE_ENC_Y_V_PHASE 0x938 +#define VFE_0_SCALE_ENC_CBCR_CFG 0x948 +#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x94c +#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x950 +#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x960 +#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x964 + +#define VFE_0_CROP_ENC_Y_WIDTH 0x974 +#define VFE_0_CROP_ENC_Y_HEIGHT 0x978 +#define VFE_0_CROP_ENC_CBCR_WIDTH 0x97c +#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x980 + +#define VFE_0_CLAMP_ENC_MAX_CFG 0x984 +#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0) +#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8) +#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16) +#define VFE_0_CLAMP_ENC_MIN_CFG 0x988 +#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0) +#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8) +#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16) + +#define VFE_0_REALIGN_BUF_CFG 0xaac +#define VFE_0_REALIGN_BUF_CFG_CB_ODD_PIXEL BIT(2) +#define VFE_0_REALIGN_BUF_CFG_CR_ODD_PIXEL BIT(3) +#define VFE_0_REALIGN_BUF_CFG_HSUB_ENABLE BIT(4) + +#define VFE_0_BUS_IMAGE_MASTER_CMD 0xcec +#define VFE_0_BUS_IMAGE_MASTER_n_SHIFT(x) (2 * (x)) + +#define CAMIF_TIMEOUT_SLEEP_US 1000 +#define CAMIF_TIMEOUT_ALL_US 1000000 + +#define MSM_VFE_VFE0_UB_SIZE 2047 +#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3) +#define MSM_VFE_VFE1_UB_SIZE 1535 +#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3) + +static u32 vfe_hw_version(struct vfe_device *vfe) +{ + u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION); + + dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version); + + return hw_version; +} + +static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits) +{ + u32 bits = readl_relaxed(vfe->base + reg); + + writel_relaxed(bits & ~clr_bits, vfe->base + reg); +} + +static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits) +{ + u32 bits = readl_relaxed(vfe->base + reg); + + writel_relaxed(bits | set_bits, vfe->base + reg); +} + +static void vfe_global_reset(struct vfe_device *vfe) +{ + u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_IDLE_CGC | + VFE_0_GLOBAL_RESET_CMD_DSP | + VFE_0_GLOBAL_RESET_CMD_TESTGEN | + VFE_0_GLOBAL_RESET_CMD_BUS_MISR | + VFE_0_GLOBAL_RESET_CMD_PM | + VFE_0_GLOBAL_RESET_CMD_REGISTER | + VFE_0_GLOBAL_RESET_CMD_BUS_BDG | + VFE_0_GLOBAL_RESET_CMD_BUS | + VFE_0_GLOBAL_RESET_CMD_CAMIF | + VFE_0_GLOBAL_RESET_CMD_CORE; + + writel_relaxed(BIT(31), vfe->base + VFE_0_IRQ_MASK_0); + + /* Enforce barrier between IRQ mask setup and global reset */ + wmb(); + writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD); +} + +static void vfe_halt_request(struct vfe_device *vfe) +{ + writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ, + vfe->base + VFE_0_BUS_BDG_CMD); +} + +static void vfe_halt_clear(struct vfe_device *vfe) +{ + writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD); +} + +static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable) +{ + if (enable) + vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT); + else + vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT); +} + +#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N)) + +static int vfe_word_per_line_by_pixel(u32 format, u32 pixel_per_line) +{ + int val = 0; + + switch (format) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + val = CALC_WORD(pixel_per_line, 1, 8); + break; + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_YVYU: + case V4L2_PIX_FMT_UYVY: + case V4L2_PIX_FMT_VYUY: + val = CALC_WORD(pixel_per_line, 2, 8); + break; + } + + return val; +} + +static int vfe_word_per_line_by_bytes(u32 bytes_per_line) +{ + return CALC_WORD(bytes_per_line, 1, 8); +} + +static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane, + u16 *width, u16 *height, u16 *bytesperline) +{ + *width = pix->width; + *height = pix->height; + + switch (pix->pixelformat) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + *bytesperline = pix->plane_fmt[0].bytesperline; + if (plane == 1) + *height /= 2; + break; + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + *bytesperline = pix->plane_fmt[0].bytesperline; + break; + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_YVYU: + case V4L2_PIX_FMT_VYUY: + case V4L2_PIX_FMT_UYVY: + *bytesperline = pix->plane_fmt[plane].bytesperline; + break; + } +} + +static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm, + struct v4l2_pix_format_mplane *pix, + u8 plane, u32 enable) +{ + u32 reg; + + if (enable) { + u16 width = 0, height = 0, bytesperline = 0, wpl; + + vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline); + + wpl = vfe_word_per_line_by_pixel(pix->pixelformat, width); + + reg = height - 1; + reg |= ((wpl + 3) / 4 - 1) << 16; + + writel_relaxed(reg, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm)); + + wpl = vfe_word_per_line_by_bytes(bytesperline); + + reg = 0x3; + reg |= (height - 1) << 2; + reg |= ((wpl + 1) / 2) << 16; + + writel_relaxed(reg, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm)); + } else { + writel_relaxed(0, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm)); + writel_relaxed(0, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm)); + } +} + +static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per) +{ + u32 reg; + + reg = readl_relaxed(vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm)); + + reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK); + + reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT) + & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK; + + writel_relaxed(reg, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm)); +} + +static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm, + u32 pattern) +{ + writel_relaxed(pattern, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm)); +} + +static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm, + u16 offset, u16 depth) +{ + u32 reg; + + reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) | + depth; + writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm)); +} + +static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm) +{ + /* Enforce barrier between any outstanding register write */ + wmb(); + + writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD); + + /* Use barrier to make sure bus reload is issued before anything else */ + wmb(); +} + +static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr) +{ + writel_relaxed(addr, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm)); +} + +static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr) +{ + writel_relaxed(addr, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm)); +} + +static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm) +{ + u32 reg; + + reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS); + + return (reg >> wm) & 0x1; +} + +static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable) +{ + if (enable) + writel_relaxed(0x101, vfe->base + VFE_0_BUS_CFG); + else + writel_relaxed(0, vfe->base + VFE_0_BUS_CFG); +} + +static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm, + enum vfe_line_id id) +{ + u32 reg; + + reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS; + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg); + + reg = VFE_0_RDI_CFG_x_RDI_EN_BIT; + reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) & + VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK; + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg); + + switch (id) { + case VFE_LINE_RDI0: + default: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI1: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI2: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + } + + if (wm % 2 == 1) + reg <<= 16; + + vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg); +} + +static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm) +{ + writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm)); +} + +static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm, + enum vfe_line_id id) +{ + u32 reg; + + reg = VFE_0_RDI_CFG_x_RDI_EN_BIT; + vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg); + + switch (id) { + case VFE_LINE_RDI0: + default: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI1: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI2: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + } + + if (wm % 2 == 1) + reg <<= 16; + + vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg); +} + +static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output, + u8 enable) +{ + struct vfe_line *line = container_of(output, struct vfe_line, output); + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 reg; + + switch (p) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + + if (output->wm_idx[0] % 2 == 1) + reg <<= 16; + + if (enable) + vfe_reg_set(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]), + reg); + else + vfe_reg_clr(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]), + reg); + + reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN; + if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16) + reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA; + + if (output->wm_idx[1] % 2 == 1) + reg <<= 16; + + if (enable) + vfe_reg_set(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[1]), + reg); + else + vfe_reg_clr(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[1]), + reg); + break; + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_YVYU: + case V4L2_PIX_FMT_VYUY: + case V4L2_PIX_FMT_UYVY: + reg = VFE_0_BUS_XBAR_CFG_x_M_REALIGN_BUF_EN; + reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN; + + if (p == V4L2_PIX_FMT_YUYV || p == V4L2_PIX_FMT_YVYU) + reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA; + + if (output->wm_idx[0] % 2 == 1) + reg <<= 16; + + if (enable) + vfe_reg_set(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]), + reg); + else + vfe_reg_clr(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]), + reg); + break; + default: + break; + } +} + +static void vfe_set_realign_cfg(struct vfe_device *vfe, struct vfe_line *line, + u8 enable) +{ + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 val = VFE_0_MODULE_ZOOM_EN_REALIGN_BUF; + + if (p != V4L2_PIX_FMT_YUYV && p != V4L2_PIX_FMT_YVYU && + p != V4L2_PIX_FMT_VYUY && p != V4L2_PIX_FMT_UYVY) + return; + + if (enable) { + vfe_reg_set(vfe, VFE_0_MODULE_ZOOM_EN, val); + } else { + vfe_reg_clr(vfe, VFE_0_MODULE_ZOOM_EN, val); + return; + } + + val = VFE_0_REALIGN_BUF_CFG_HSUB_ENABLE; + + if (p == V4L2_PIX_FMT_UYVY || p == V4L2_PIX_FMT_YUYV) + val |= VFE_0_REALIGN_BUF_CFG_CR_ODD_PIXEL; + else + val |= VFE_0_REALIGN_BUF_CFG_CB_ODD_PIXEL; + + writel_relaxed(val, vfe->base + VFE_0_REALIGN_BUF_CFG); +} + +static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid) +{ + vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), + VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK); + + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), + cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT); +} + +static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id); + + /* Enforce barrier between line update and commit */ + wmb(); + + writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE); + + /* Make sure register update is issued before further reg writes */ + wmb(); +} + +static inline void vfe_reg_update_clear(struct vfe_device *vfe, + enum vfe_line_id line_id) +{ + vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id); +} + +static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm, + enum vfe_line_id line_id, u8 enable) +{ + u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) | + VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id); + u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) | + VFE_0_IRQ_MASK_1_RDIn_SOF(line_id); + + if (enable) { + vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); + } else { + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1); + } +} + +static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp, + enum vfe_line_id line_id, u8 enable) +{ + struct vfe_output *output = &vfe->line[line_id].output; + unsigned int i; + u32 irq_en0; + u32 irq_en1; + u32 comp_mask = 0; + + irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF; + irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF; + irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp); + irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id); + irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR; + for (i = 0; i < output->wm_num; i++) { + irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(output->wm_idx[i]); + comp_mask |= (1 << output->wm_idx[i]) << comp * 8; + } + + if (enable) { + vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); + vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask); + } else { + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1); + vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask); + } +} + +static void vfe_enable_irq_common(struct vfe_device *vfe) +{ + u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK; + u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION | + VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK; + + vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); +} + +static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 val, even_cfg, odd_cfg; + + writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG); + + val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD; + writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0); + + val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2; + writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1); + + switch (line->fmt[MSM_VFE_PAD_SINK].code) { + case MEDIA_BUS_FMT_YUYV8_2X8: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV; + break; + case MEDIA_BUS_FMT_YVYU8_2X8: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU; + break; + case MEDIA_BUS_FMT_UYVY8_2X8: + default: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY; + break; + case MEDIA_BUS_FMT_VYUY8_2X8: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY; + break; + } + + writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG); + writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG); +} + +static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 reg; + u16 input, output; + u8 interp_reso; + u32 phase_mult; + + writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG); + + input = line->fmt[MSM_VFE_PAD_SINK].width - 1; + output = line->compose.width - 1; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (14 + interp_reso)) / output; + reg = (interp_reso << 28) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE); + + input = line->fmt[MSM_VFE_PAD_SINK].height - 1; + output = line->compose.height - 1; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (14 + interp_reso)) / output; + reg = (interp_reso << 28) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE); + + writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG); + + input = line->fmt[MSM_VFE_PAD_SINK].width - 1; + output = line->compose.width / 2 - 1; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (14 + interp_reso)) / output; + reg = (interp_reso << 28) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE); + + input = line->fmt[MSM_VFE_PAD_SINK].height - 1; + output = line->compose.height - 1; + if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) + output = line->compose.height / 2 - 1; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (14 + interp_reso)) / output; + reg = (interp_reso << 28) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE); +} + +static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 reg; + u16 first, last; + + first = line->crop.left; + last = line->crop.left + line->crop.width - 1; + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH); + + first = line->crop.top; + last = line->crop.top + line->crop.height - 1; + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT); + + first = line->crop.left / 2; + last = line->crop.left / 2 + line->crop.width / 2 - 1; + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH); + + first = line->crop.top; + last = line->crop.top + line->crop.height - 1; + if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) { + first = line->crop.top / 2; + last = line->crop.top / 2 + line->crop.height / 2 - 1; + } + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT); +} + +static void vfe_set_clamp_cfg(struct vfe_device *vfe) +{ + u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 | + VFE_0_CLAMP_ENC_MAX_CFG_CH1 | + VFE_0_CLAMP_ENC_MAX_CFG_CH2; + + writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG); + + val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 | + VFE_0_CLAMP_ENC_MIN_CFG_CH1 | + VFE_0_CLAMP_ENC_MIN_CFG_CH2; + + writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG); +} + +static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable) +{ + /* empty */ +} + +static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 val; + + switch (line->fmt[MSM_VFE_PAD_SINK].code) { + case MEDIA_BUS_FMT_YUYV8_2X8: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR; + break; + case MEDIA_BUS_FMT_YVYU8_2X8: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB; + break; + case MEDIA_BUS_FMT_UYVY8_2X8: + default: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY; + break; + case MEDIA_BUS_FMT_VYUY8_2X8: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY; + break; + } + + val |= VFE_0_CORE_CFG_COMPOSITE_REG_UPDATE_EN; + writel_relaxed(val, vfe->base + VFE_0_CORE_CFG); + + val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1; + val |= (line->fmt[MSM_VFE_PAD_SINK].height - 1) << 16; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG); + + val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG); + + val = line->fmt[MSM_VFE_PAD_SINK].height - 1; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG); + + val = 0xffffffff; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG); + + val = 0xffffffff; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_FRAMEDROP_PATTERN); + + val = 0xffffffff; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN); + + val = VFE_0_RDI_CFG_x_MIPI_EN_BITS; + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val); + + val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG); +} + +static void vfe_set_camif_cmd(struct vfe_device *vfe, u8 enable) +{ + u32 cmd; + + cmd = VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS | VFE_0_CAMIF_CMD_NO_CHANGE; + writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD); + + /* Make sure camif command is issued written before it is changed again */ + wmb(); + + if (enable) + cmd = VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY; + else + cmd = VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY; + + writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD); +} + +static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable) +{ + u32 val_lens = VFE_0_MODULE_LENS_EN_DEMUX | + VFE_0_MODULE_LENS_EN_CHROMA_UPSAMPLE; + u32 val_zoom = VFE_0_MODULE_ZOOM_EN_SCALE_ENC | + VFE_0_MODULE_ZOOM_EN_CROP_ENC; + + if (enable) { + vfe_reg_set(vfe, VFE_0_MODULE_LENS_EN, val_lens); + vfe_reg_set(vfe, VFE_0_MODULE_ZOOM_EN, val_zoom); + } else { + vfe_reg_clr(vfe, VFE_0_MODULE_LENS_EN, val_lens); + vfe_reg_clr(vfe, VFE_0_MODULE_ZOOM_EN, val_zoom); + } +} + +static int vfe_camif_wait_for_stop(struct vfe_device *vfe, struct device *dev) +{ + u32 val; + int ret; + + ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS, + val, + (val & VFE_0_CAMIF_STATUS_HALT), + CAMIF_TIMEOUT_SLEEP_US, + CAMIF_TIMEOUT_ALL_US); + if (ret < 0) + dev_err(dev, "%s: camif stop timeout\n", __func__); + + return ret; +} + +/* + * vfe_isr - VFE module interrupt handler + * @irq: Interrupt line + * @dev: VFE device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t vfe_isr(int irq, void *dev) +{ + struct vfe_device *vfe = dev; + u32 value0, value1; + int i, j; + + vfe->ops->isr_read(vfe, &value0, &value1); + + dev_dbg(vfe->camss->dev, "VFE: status0 = 0x%08x, status1 = 0x%08x\n", + value0, value1); + + if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK) + vfe->isr_ops.reset_ack(vfe); + + if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION) + vfe->ops->violation_read(vfe); + + if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK) + vfe->isr_ops.halt_ack(vfe); + + for (i = VFE_LINE_RDI0; i < vfe->line_num; i++) + if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i)) + vfe->isr_ops.reg_update(vfe, i); + + if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF) + vfe->isr_ops.sof(vfe, VFE_LINE_PIX); + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++) + if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i)) + vfe->isr_ops.sof(vfe, i); + + for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++) + if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) { + vfe->isr_ops.comp_done(vfe, i); + for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++) + if (vfe->wm_output_map[j] == VFE_LINE_PIX) + value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j); + } + + for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++) + if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i)) + vfe->isr_ops.wm_done(vfe, i); + + return IRQ_HANDLED; +} + +static u16 vfe_get_ub_size(u8 vfe_id) +{ + /* On VFE4.8 the ub-size is the same on both instances */ + return MSM_VFE_VFE0_UB_SIZE_RDI; +} + +static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable) +{ + if (enable) + writel_relaxed(2 << VFE_0_BUS_IMAGE_MASTER_n_SHIFT(wm), + vfe->base + VFE_0_BUS_IMAGE_MASTER_CMD); + else + writel_relaxed(1 << VFE_0_BUS_IMAGE_MASTER_n_SHIFT(wm), + vfe->base + VFE_0_BUS_IMAGE_MASTER_CMD); + + /* The WM must be enabled before sending other commands */ + wmb(); +} + +static void vfe_set_qos(struct vfe_device *vfe) +{ + u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG; + u32 val3 = VFE_0_BUS_BDG_QOS_CFG_3_CFG; + u32 val4 = VFE_0_BUS_BDG_QOS_CFG_4_CFG; + u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG; + + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2); + writel_relaxed(val3, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3); + writel_relaxed(val4, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4); + writel_relaxed(val4, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5); + writel_relaxed(val4, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6); + writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7); +} + +static void vfe_set_ds(struct vfe_device *vfe) +{ + u32 val = VFE_0_BUS_BDG_DS_CFG_0_CFG; + u32 val16 = VFE_0_BUS_BDG_DS_CFG_16_CFG; + + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_0); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_1); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_2); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_3); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_4); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_5); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_6); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_7); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_8); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_9); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_10); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_11); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_12); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_13); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_14); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_15); + writel_relaxed(val16, vfe->base + VFE_0_BUS_BDG_DS_CFG_16); +} + +static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1) +{ + *value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0); + *value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1); + + writel_relaxed(*value0, vfe->base + VFE_0_IRQ_CLEAR_0); + writel_relaxed(*value1, vfe->base + VFE_0_IRQ_CLEAR_1); + + /* Enforce barrier between local & global IRQ clear */ + wmb(); + writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD); +} + +/* + * vfe_pm_domain_off - Disable power domains specific to this VFE. + * @vfe: VFE Device + */ +static void vfe_pm_domain_off(struct vfe_device *vfe) +{ + struct camss *camss = vfe->camss; + + device_link_del(camss->genpd_link[vfe->id]); +} + +/* + * vfe_pm_domain_on - Enable power domains specific to this VFE. + * @vfe: VFE Device + */ +static int vfe_pm_domain_on(struct vfe_device *vfe) +{ + struct camss *camss = vfe->camss; + enum vfe_line_id id = vfe->id; + + camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id], DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); + + if (!camss->genpd_link[id]) { + dev_err(vfe->camss->dev, "Failed to add VFE#%d to power domain\n", id); + return -EINVAL; + } + + return 0; +} + +static void vfe_violation_read(struct vfe_device *vfe) +{ + u32 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS); + + pr_err_ratelimited("VFE: violation = 0x%08x\n", violation); +} + +static const struct vfe_hw_ops_gen1 vfe_ops_gen1_4_8 = { + .bus_connect_wm_to_rdi = vfe_bus_connect_wm_to_rdi, + .bus_disconnect_wm_from_rdi = vfe_bus_disconnect_wm_from_rdi, + .bus_enable_wr_if = vfe_bus_enable_wr_if, + .bus_reload_wm = vfe_bus_reload_wm, + .camif_wait_for_stop = vfe_camif_wait_for_stop, + .enable_irq_common = vfe_enable_irq_common, + .enable_irq_pix_line = vfe_enable_irq_pix_line, + .enable_irq_wm_line = vfe_enable_irq_wm_line, + .get_ub_size = vfe_get_ub_size, + .halt_clear = vfe_halt_clear, + .halt_request = vfe_halt_request, + .set_camif_cfg = vfe_set_camif_cfg, + .set_camif_cmd = vfe_set_camif_cmd, + .set_cgc_override = vfe_set_cgc_override, + .set_clamp_cfg = vfe_set_clamp_cfg, + .set_crop_cfg = vfe_set_crop_cfg, + .set_demux_cfg = vfe_set_demux_cfg, + .set_ds = vfe_set_ds, + .set_module_cfg = vfe_set_module_cfg, + .set_qos = vfe_set_qos, + .set_rdi_cid = vfe_set_rdi_cid, + .set_realign_cfg = vfe_set_realign_cfg, + .set_scale_cfg = vfe_set_scale_cfg, + .set_xbar_cfg = vfe_set_xbar_cfg, + .wm_enable = vfe_wm_enable, + .wm_frame_based = vfe_wm_frame_based, + .wm_get_ping_pong_status = vfe_wm_get_ping_pong_status, + .wm_line_based = vfe_wm_line_based, + .wm_set_framedrop_pattern = vfe_wm_set_framedrop_pattern, + .wm_set_framedrop_period = vfe_wm_set_framedrop_period, + .wm_set_ping_addr = vfe_wm_set_ping_addr, + .wm_set_pong_addr = vfe_wm_set_pong_addr, + .wm_set_subsample = vfe_wm_set_subsample, + .wm_set_ub_cfg = vfe_wm_set_ub_cfg, +}; + +static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe) +{ + vfe->isr_ops = vfe_isr_ops_gen1; + vfe->ops_gen1 = &vfe_ops_gen1_4_8; + vfe->video_ops = vfe_video_ops_gen1; + + vfe->line_num = VFE_LINE_NUM_GEN1; +} + +const struct vfe_hw_ops vfe_ops_4_8 = { + .global_reset = vfe_global_reset, + .hw_version = vfe_hw_version, + .isr_read = vfe_isr_read, + .isr = vfe_isr, + .pm_domain_off = vfe_pm_domain_off, + .pm_domain_on = vfe_pm_domain_on, + .reg_update_clear = vfe_reg_update_clear, + .reg_update = vfe_reg_update, + .subdev_init = vfe_subdev_init, + .vfe_disable = vfe_gen1_disable, + .vfe_enable = vfe_gen1_enable, + .vfe_halt = vfe_gen1_halt, + .violation_read = vfe_violation_read, +}; diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c new file mode 100644 index 0000000000..8ddb801643 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c @@ -0,0 +1,585 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-vfe-480.c + * + * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v480 (SM8250) + * + * Copyright (C) 2020-2021 Linaro Ltd. + * Copyright (C) 2021 Jonathan Marek + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> + +#include "camss.h" +#include "camss-vfe.h" + +/* VFE 2/3 are lite and have a different register layout */ +#define IS_LITE (vfe->id >= 2 ? 1 : 0) + +#define VFE_HW_VERSION (0x00) + +#define VFE_GLOBAL_RESET_CMD (IS_LITE ? 0x0c : 0x1c) +#define GLOBAL_RESET_HW_AND_REG (IS_LITE ? BIT(1) : BIT(0)) + +#define VFE_REG_UPDATE_CMD (IS_LITE ? 0x20 : 0x34) +static inline int reg_update_rdi(struct vfe_device *vfe, int n) +{ + return IS_LITE ? BIT(n) : BIT(1 + (n)); +} + +#define REG_UPDATE_RDI reg_update_rdi +#define VFE_IRQ_CMD (IS_LITE ? 0x24 : 0x38) +#define IRQ_CMD_GLOBAL_CLEAR BIT(0) + +#define VFE_IRQ_MASK(n) ((IS_LITE ? 0x28 : 0x3c) + (n) * 4) +#define IRQ_MASK_0_RESET_ACK (IS_LITE ? BIT(17) : BIT(0)) +#define IRQ_MASK_0_BUS_TOP_IRQ (IS_LITE ? BIT(4) : BIT(7)) +#define VFE_IRQ_CLEAR(n) ((IS_LITE ? 0x34 : 0x48) + (n) * 4) +#define VFE_IRQ_STATUS(n) ((IS_LITE ? 0x40 : 0x54) + (n) * 4) + +#define BUS_REG_BASE (IS_LITE ? 0x1a00 : 0xaa00) + +#define VFE_BUS_WM_CGC_OVERRIDE (BUS_REG_BASE + 0x08) +#define WM_CGC_OVERRIDE_ALL (0x3FFFFFF) + +#define VFE_BUS_WM_TEST_BUS_CTRL (BUS_REG_BASE + 0xdc) + +#define VFE_BUS_IRQ_MASK(n) (BUS_REG_BASE + 0x18 + (n) * 4) +static inline int bus_irq_mask_0_rdi_rup(struct vfe_device *vfe, int n) +{ + return IS_LITE ? BIT(n) : BIT(3 + (n)); +} + +#define BUS_IRQ_MASK_0_RDI_RUP bus_irq_mask_0_rdi_rup +static inline int bus_irq_mask_0_comp_done(struct vfe_device *vfe, int n) +{ + return IS_LITE ? BIT(4 + (n)) : BIT(6 + (n)); +} + +#define BUS_IRQ_MASK_0_COMP_DONE bus_irq_mask_0_comp_done +#define VFE_BUS_IRQ_CLEAR(n) (BUS_REG_BASE + 0x20 + (n) * 4) +#define VFE_BUS_IRQ_STATUS(n) (BUS_REG_BASE + 0x28 + (n) * 4) +#define VFE_BUS_IRQ_CLEAR_GLOBAL (BUS_REG_BASE + 0x30) + +#define VFE_BUS_WM_CFG(n) (BUS_REG_BASE + 0x200 + (n) * 0x100) +#define WM_CFG_EN (0) +#define WM_CFG_MODE (16) +#define MODE_QCOM_PLAIN (0) +#define MODE_MIPI_RAW (1) +#define VFE_BUS_WM_IMAGE_ADDR(n) (BUS_REG_BASE + 0x204 + (n) * 0x100) +#define VFE_BUS_WM_FRAME_INCR(n) (BUS_REG_BASE + 0x208 + (n) * 0x100) +#define VFE_BUS_WM_IMAGE_CFG_0(n) (BUS_REG_BASE + 0x20c + (n) * 0x100) +#define WM_IMAGE_CFG_0_DEFAULT_WIDTH (0xFFFF) +#define VFE_BUS_WM_IMAGE_CFG_1(n) (BUS_REG_BASE + 0x210 + (n) * 0x100) +#define VFE_BUS_WM_IMAGE_CFG_2(n) (BUS_REG_BASE + 0x214 + (n) * 0x100) +#define VFE_BUS_WM_PACKER_CFG(n) (BUS_REG_BASE + 0x218 + (n) * 0x100) +#define VFE_BUS_WM_HEADER_ADDR(n) (BUS_REG_BASE + 0x220 + (n) * 0x100) +#define VFE_BUS_WM_HEADER_INCR(n) (BUS_REG_BASE + 0x224 + (n) * 0x100) +#define VFE_BUS_WM_HEADER_CFG(n) (BUS_REG_BASE + 0x228 + (n) * 0x100) + +#define VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(n) (BUS_REG_BASE + 0x230 + (n) * 0x100) +#define VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(n) (BUS_REG_BASE + 0x234 + (n) * 0x100) +#define VFE_BUS_WM_FRAMEDROP_PERIOD(n) (BUS_REG_BASE + 0x238 + (n) * 0x100) +#define VFE_BUS_WM_FRAMEDROP_PATTERN(n) (BUS_REG_BASE + 0x23c + (n) * 0x100) + +#define VFE_BUS_WM_SYSTEM_CACHE_CFG(n) (BUS_REG_BASE + 0x260 + (n) * 0x100) +#define VFE_BUS_WM_BURST_LIMIT(n) (BUS_REG_BASE + 0x264 + (n) * 0x100) + +/* for titan 480, each bus client is hardcoded to a specific path + * and each bus client is part of a hardcoded "comp group" + */ +#define RDI_WM(n) ((IS_LITE ? 0 : 23) + (n)) +#define RDI_COMP_GROUP(n) ((IS_LITE ? 0 : 11) + (n)) + +#define MAX_VFE_OUTPUT_LINES 4 + +static u32 vfe_hw_version(struct vfe_device *vfe) +{ + u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION); + + u32 gen = (hw_version >> 28) & 0xF; + u32 rev = (hw_version >> 16) & 0xFFF; + u32 step = hw_version & 0xFFFF; + + dev_dbg(vfe->camss->dev, "VFE HW Version = %u.%u.%u\n", gen, rev, step); + + return hw_version; +} + +static void vfe_global_reset(struct vfe_device *vfe) +{ + writel_relaxed(IRQ_MASK_0_RESET_ACK, vfe->base + VFE_IRQ_MASK(0)); + writel_relaxed(GLOBAL_RESET_HW_AND_REG, vfe->base + VFE_GLOBAL_RESET_CMD); +} + +static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line) +{ + struct v4l2_pix_format_mplane *pix = + &line->video_out.active_fmt.fmt.pix_mp; + + wm = RDI_WM(wm); /* map to actual WM used (from wm=RDI index) */ + + /* no clock gating at bus input */ + writel_relaxed(WM_CGC_OVERRIDE_ALL, vfe->base + VFE_BUS_WM_CGC_OVERRIDE); + + writel_relaxed(0x0, vfe->base + VFE_BUS_WM_TEST_BUS_CTRL); + + writel_relaxed(pix->plane_fmt[0].bytesperline * pix->height, + vfe->base + VFE_BUS_WM_FRAME_INCR(wm)); + writel_relaxed(0xf, vfe->base + VFE_BUS_WM_BURST_LIMIT(wm)); + writel_relaxed(WM_IMAGE_CFG_0_DEFAULT_WIDTH, + vfe->base + VFE_BUS_WM_IMAGE_CFG_0(wm)); + writel_relaxed(pix->plane_fmt[0].bytesperline, + vfe->base + VFE_BUS_WM_IMAGE_CFG_2(wm)); + writel_relaxed(0, vfe->base + VFE_BUS_WM_PACKER_CFG(wm)); + + /* no dropped frames, one irq per frame */ + writel_relaxed(0, vfe->base + VFE_BUS_WM_FRAMEDROP_PERIOD(wm)); + writel_relaxed(1, vfe->base + VFE_BUS_WM_FRAMEDROP_PATTERN(wm)); + writel_relaxed(0, vfe->base + VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(wm)); + writel_relaxed(1, vfe->base + VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(wm)); + + writel_relaxed(1 << WM_CFG_EN | MODE_MIPI_RAW << WM_CFG_MODE, + vfe->base + VFE_BUS_WM_CFG(wm)); +} + +static void vfe_wm_stop(struct vfe_device *vfe, u8 wm) +{ + wm = RDI_WM(wm); /* map to actual WM used (from wm=RDI index) */ + writel_relaxed(0, vfe->base + VFE_BUS_WM_CFG(wm)); +} + +static void vfe_wm_update(struct vfe_device *vfe, u8 wm, u32 addr, + struct vfe_line *line) +{ + wm = RDI_WM(wm); /* map to actual WM used (from wm=RDI index) */ + writel_relaxed(addr, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm)); +} + +static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + vfe->reg_update |= REG_UPDATE_RDI(vfe, line_id); + writel_relaxed(vfe->reg_update, vfe->base + VFE_REG_UPDATE_CMD); +} + +static inline void vfe_reg_update_clear(struct vfe_device *vfe, + enum vfe_line_id line_id) +{ + vfe->reg_update &= ~REG_UPDATE_RDI(vfe, line_id); +} + +static void vfe_enable_irq_common(struct vfe_device *vfe) +{ + /* enable reset ack IRQ and top BUS status IRQ */ + writel_relaxed(IRQ_MASK_0_RESET_ACK | IRQ_MASK_0_BUS_TOP_IRQ, + vfe->base + VFE_IRQ_MASK(0)); +} + +static void vfe_enable_lines_irq(struct vfe_device *vfe) +{ + int i; + u32 bus_irq_mask = 0; + + for (i = 0; i < MAX_VFE_OUTPUT_LINES; i++) { + /* Enable IRQ for newly added lines, but also keep already running lines's IRQ */ + if (vfe->line[i].output.state == VFE_OUTPUT_RESERVED || + vfe->line[i].output.state == VFE_OUTPUT_ON) { + bus_irq_mask |= BUS_IRQ_MASK_0_RDI_RUP(vfe, i) + | BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(i)); + } + } + + writel_relaxed(bus_irq_mask, vfe->base + VFE_BUS_IRQ_MASK(0)); +} + +static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id); +static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm); + +/* + * vfe_isr - VFE module interrupt handler + * @irq: Interrupt line + * @dev: VFE device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t vfe_isr(int irq, void *dev) +{ + struct vfe_device *vfe = dev; + u32 status; + int i; + + status = readl_relaxed(vfe->base + VFE_IRQ_STATUS(0)); + writel_relaxed(status, vfe->base + VFE_IRQ_CLEAR(0)); + writel_relaxed(IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_IRQ_CMD); + + if (status & IRQ_MASK_0_RESET_ACK) + vfe_isr_reset_ack(vfe); + + if (status & IRQ_MASK_0_BUS_TOP_IRQ) { + u32 status = readl_relaxed(vfe->base + VFE_BUS_IRQ_STATUS(0)); + + writel_relaxed(status, vfe->base + VFE_BUS_IRQ_CLEAR(0)); + writel_relaxed(1, vfe->base + VFE_BUS_IRQ_CLEAR_GLOBAL); + + /* Loop through all WMs IRQs */ + for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++) { + if (status & BUS_IRQ_MASK_0_RDI_RUP(vfe, i)) + vfe_isr_reg_update(vfe, i); + + if (status & BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(i))) + vfe_isr_wm_done(vfe, i); + } + } + + return IRQ_HANDLED; +} + +/* + * vfe_halt - Trigger halt on VFE module and wait to complete + * @vfe: VFE device + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_halt(struct vfe_device *vfe) +{ + /* rely on vfe_disable_output() to stop the VFE */ + return 0; +} + +static int vfe_get_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output; + unsigned long flags; + + spin_lock_irqsave(&vfe->output_lock, flags); + + output = &line->output; + if (output->state > VFE_OUTPUT_RESERVED) { + dev_err(vfe->camss->dev, "Output is running\n"); + goto error; + } + + output->wm_num = 1; + + /* Correspondence between VFE line number and WM number. + * line 0 -> RDI 0, line 1 -> RDI1, line 2 -> RDI2, line 3 -> PIX/RDI3 + * Note this 1:1 mapping will not work for PIX streams. + */ + output->wm_idx[0] = line->id; + vfe->wm_output_map[line->id] = line->id; + + output->drop_update_idx = 0; + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; + +error: + spin_unlock_irqrestore(&vfe->output_lock, flags); + output->state = VFE_OUTPUT_OFF; + + return -EINVAL; +} + +static int vfe_enable_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output = &line->output; + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&vfe->output_lock, flags); + + vfe_reg_update_clear(vfe, line->id); + + if (output->state > VFE_OUTPUT_RESERVED) { + dev_err(vfe->camss->dev, "Output is not in reserved state %d\n", + output->state); + spin_unlock_irqrestore(&vfe->output_lock, flags); + return -EINVAL; + } + + WARN_ON(output->gen2.active_num); + + output->state = VFE_OUTPUT_ON; + + output->sequence = 0; + output->wait_reg_update = 0; + reinit_completion(&output->reg_update); + + vfe_wm_start(vfe, output->wm_idx[0], line); + + for (i = 0; i < 2; i++) { + output->buf[i] = vfe_buf_get_pending(output); + if (!output->buf[i]) + break; + output->gen2.active_num++; + vfe_wm_update(vfe, output->wm_idx[0], output->buf[i]->addr[0], line); + } + + vfe_reg_update(vfe, line->id); + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; +} + +static void vfe_disable_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output = &line->output; + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&vfe->output_lock, flags); + for (i = 0; i < output->wm_num; i++) + vfe_wm_stop(vfe, output->wm_idx[i]); + output->gen2.active_num = 0; + spin_unlock_irqrestore(&vfe->output_lock, flags); + + vfe_reset(vfe); +} + +/* + * vfe_enable - Enable streaming on VFE line + * @line: VFE line + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_enable(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + int ret; + + mutex_lock(&vfe->stream_lock); + + if (!vfe->stream_count) + vfe_enable_irq_common(vfe); + + vfe->stream_count++; + + vfe_enable_lines_irq(vfe); + + mutex_unlock(&vfe->stream_lock); + + ret = vfe_get_output(line); + if (ret < 0) + goto error_get_output; + + ret = vfe_enable_output(line); + if (ret < 0) + goto error_enable_output; + + vfe->was_streaming = 1; + + return 0; + +error_enable_output: + vfe_put_output(line); + +error_get_output: + mutex_lock(&vfe->stream_lock); + + vfe->stream_count--; + + mutex_unlock(&vfe->stream_lock); + + return ret; +} + +/* + * vfe_disable - Disable streaming on VFE line + * @line: VFE line + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_disable(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + + vfe_disable_output(line); + + vfe_put_output(line); + + mutex_lock(&vfe->stream_lock); + + vfe->stream_count--; + + mutex_unlock(&vfe->stream_lock); + + return 0; +} + +/* + * vfe_isr_reg_update - Process reg update interrupt + * @vfe: VFE Device + * @line_id: VFE line + */ +static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + struct vfe_output *output; + unsigned long flags; + + spin_lock_irqsave(&vfe->output_lock, flags); + vfe_reg_update_clear(vfe, line_id); + + output = &vfe->line[line_id].output; + + if (output->wait_reg_update) { + output->wait_reg_update = 0; + complete(&output->reg_update); + } + + spin_unlock_irqrestore(&vfe->output_lock, flags); +} + +/* + * vfe_isr_wm_done - Process write master done interrupt + * @vfe: VFE Device + * @wm: Write master id + */ +static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm) +{ + struct vfe_line *line = &vfe->line[vfe->wm_output_map[wm]]; + struct camss_buffer *ready_buf; + struct vfe_output *output; + unsigned long flags; + u32 index; + u64 ts = ktime_get_ns(); + + spin_lock_irqsave(&vfe->output_lock, flags); + + if (vfe->wm_output_map[wm] == VFE_LINE_NONE) { + dev_err_ratelimited(vfe->camss->dev, + "Received wm done for unmapped index\n"); + goto out_unlock; + } + output = &vfe->line[vfe->wm_output_map[wm]].output; + + ready_buf = output->buf[0]; + if (!ready_buf) { + dev_err_ratelimited(vfe->camss->dev, + "Missing ready buf %d!\n", output->state); + goto out_unlock; + } + + ready_buf->vb.vb2_buf.timestamp = ts; + ready_buf->vb.sequence = output->sequence++; + + index = 0; + output->buf[0] = output->buf[1]; + if (output->buf[0]) + index = 1; + + output->buf[index] = vfe_buf_get_pending(output); + + if (output->buf[index]) + vfe_wm_update(vfe, output->wm_idx[0], output->buf[index]->addr[0], line); + else + output->gen2.active_num--; + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE); + + return; + +out_unlock: + spin_unlock_irqrestore(&vfe->output_lock, flags); +} + +/* + * vfe_pm_domain_off - Disable power domains specific to this VFE. + * @vfe: VFE Device + */ +static void vfe_pm_domain_off(struct vfe_device *vfe) +{ + struct camss *camss = vfe->camss; + + if (vfe->id >= camss->vfe_num) + return; + + device_link_del(camss->genpd_link[vfe->id]); +} + +/* + * vfe_pm_domain_on - Enable power domains specific to this VFE. + * @vfe: VFE Device + */ +static int vfe_pm_domain_on(struct vfe_device *vfe) +{ + struct camss *camss = vfe->camss; + enum vfe_line_id id = vfe->id; + + if (id >= camss->vfe_num) + return 0; + + camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id], + DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | + DL_FLAG_RPM_ACTIVE); + if (!camss->genpd_link[id]) + return -EINVAL; + + return 0; +} + +/* + * vfe_queue_buffer - Add empty buffer + * @vid: Video device structure + * @buf: Buffer to be enqueued + * + * Add an empty buffer - depending on the current number of buffers it will be + * put in pending buffer queue or directly given to the hardware to be filled. + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_queue_buffer(struct camss_video *vid, + struct camss_buffer *buf) +{ + struct vfe_line *line = container_of(vid, struct vfe_line, video_out); + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output; + unsigned long flags; + + output = &line->output; + + spin_lock_irqsave(&vfe->output_lock, flags); + + if (output->state == VFE_OUTPUT_ON && output->gen2.active_num < 2) { + output->buf[output->gen2.active_num++] = buf; + vfe_wm_update(vfe, output->wm_idx[0], buf->addr[0], line); + } else { + vfe_buf_add_pending(output, buf); + } + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; +} + +static const struct camss_video_ops vfe_video_ops_480 = { + .queue_buffer = vfe_queue_buffer, + .flush_buffers = vfe_flush_buffers, +}; + +static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe) +{ + vfe->video_ops = vfe_video_ops_480; + vfe->line_num = MAX_VFE_OUTPUT_LINES; +} + +const struct vfe_hw_ops vfe_ops_480 = { + .global_reset = vfe_global_reset, + .hw_version = vfe_hw_version, + .isr = vfe_isr, + .pm_domain_off = vfe_pm_domain_off, + .pm_domain_on = vfe_pm_domain_on, + .subdev_init = vfe_subdev_init, + .vfe_disable = vfe_disable, + .vfe_enable = vfe_enable, + .vfe_halt = vfe_halt, +}; diff --git a/drivers/media/platform/qcom/camss/camss-vfe-gen1.c b/drivers/media/platform/qcom/camss/camss-vfe-gen1.c new file mode 100644 index 0000000000..239d3d4ac6 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-vfe-gen1.c @@ -0,0 +1,742 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-vfe-gen1.c + * + * Qualcomm MSM Camera Subsystem - VFE Common functionality for Gen 1 versions of hw (4.1, 4.7..) + * + * Copyright (C) 2020 Linaro Ltd. + */ + +#include "camss.h" +#include "camss-vfe.h" +#include "camss-vfe-gen1.h" + +/* Max number of frame drop updates per frame */ +#define VFE_FRAME_DROP_UPDATES 2 +#define VFE_NEXT_SOF_MS 500 + +int vfe_gen1_halt(struct vfe_device *vfe) +{ + unsigned long time; + + reinit_completion(&vfe->halt_complete); + + vfe->ops_gen1->halt_request(vfe); + + time = wait_for_completion_timeout(&vfe->halt_complete, + msecs_to_jiffies(VFE_HALT_TIMEOUT_MS)); + if (!time) { + dev_err(vfe->camss->dev, "VFE halt timeout\n"); + return -EIO; + } + + return 0; +} + +static int vfe_disable_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output = &line->output; + const struct vfe_hw_ops *ops = vfe->ops; + unsigned long flags; + unsigned long time; + unsigned int i; + + spin_lock_irqsave(&vfe->output_lock, flags); + + output->gen1.wait_sof = 1; + spin_unlock_irqrestore(&vfe->output_lock, flags); + + time = wait_for_completion_timeout(&output->sof, msecs_to_jiffies(VFE_NEXT_SOF_MS)); + if (!time) + dev_err(vfe->camss->dev, "VFE sof timeout\n"); + + spin_lock_irqsave(&vfe->output_lock, flags); + for (i = 0; i < output->wm_num; i++) + vfe->ops_gen1->wm_enable(vfe, output->wm_idx[i], 0); + + ops->reg_update(vfe, line->id); + output->wait_reg_update = 1; + spin_unlock_irqrestore(&vfe->output_lock, flags); + + time = wait_for_completion_timeout(&output->reg_update, msecs_to_jiffies(VFE_NEXT_SOF_MS)); + if (!time) + dev_err(vfe->camss->dev, "VFE reg update timeout\n"); + + spin_lock_irqsave(&vfe->output_lock, flags); + + if (line->id != VFE_LINE_PIX) { + vfe->ops_gen1->wm_frame_based(vfe, output->wm_idx[0], 0); + vfe->ops_gen1->bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0], line->id); + vfe->ops_gen1->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0); + vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[0], 0); + spin_unlock_irqrestore(&vfe->output_lock, flags); + } else { + for (i = 0; i < output->wm_num; i++) { + vfe->ops_gen1->wm_line_based(vfe, output->wm_idx[i], NULL, i, 0); + vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[i], 0); + } + + vfe->ops_gen1->enable_irq_pix_line(vfe, 0, line->id, 0); + vfe->ops_gen1->set_module_cfg(vfe, 0); + vfe->ops_gen1->set_realign_cfg(vfe, line, 0); + vfe->ops_gen1->set_xbar_cfg(vfe, output, 0); + vfe->ops_gen1->set_camif_cmd(vfe, 0); + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + vfe->ops_gen1->camif_wait_for_stop(vfe, vfe->camss->dev); + } + + return 0; +} + +/* + * vfe_gen1_disable - Disable streaming on VFE line + * @line: VFE line + * + * Return 0 on success or a negative error code otherwise + */ +int vfe_gen1_disable(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + + vfe_disable_output(line); + + vfe_put_output(line); + + mutex_lock(&vfe->stream_lock); + + if (vfe->stream_count == 1) + vfe->ops_gen1->bus_enable_wr_if(vfe, 0); + + vfe->stream_count--; + + mutex_unlock(&vfe->stream_lock); + + return 0; +} + +static void vfe_output_init_addrs(struct vfe_device *vfe, + struct vfe_output *output, u8 sync, + struct vfe_line *line) +{ + u32 ping_addr; + u32 pong_addr; + unsigned int i; + + output->gen1.active_buf = 0; + + for (i = 0; i < output->wm_num; i++) { + if (output->buf[0]) + ping_addr = output->buf[0]->addr[i]; + else + ping_addr = 0; + + if (output->buf[1]) + pong_addr = output->buf[1]->addr[i]; + else + pong_addr = ping_addr; + + vfe->ops_gen1->wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr); + vfe->ops_gen1->wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr); + if (sync) + vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]); + } +} + +static void vfe_output_frame_drop(struct vfe_device *vfe, + struct vfe_output *output, + u32 drop_pattern) +{ + u8 drop_period; + unsigned int i; + + /* We need to toggle update period to be valid on next frame */ + output->drop_update_idx++; + output->drop_update_idx %= VFE_FRAME_DROP_UPDATES; + drop_period = VFE_FRAME_DROP_VAL + output->drop_update_idx; + + for (i = 0; i < output->wm_num; i++) { + vfe->ops_gen1->wm_set_framedrop_period(vfe, output->wm_idx[i], drop_period); + vfe->ops_gen1->wm_set_framedrop_pattern(vfe, output->wm_idx[i], drop_pattern); + } + + vfe->ops->reg_update(vfe, container_of(output, struct vfe_line, output)->id); +} + +static int vfe_enable_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output = &line->output; + const struct vfe_hw_ops *ops = vfe->ops; + struct media_entity *sensor; + unsigned long flags; + unsigned int frame_skip = 0; + unsigned int i; + u16 ub_size; + + ub_size = vfe->ops_gen1->get_ub_size(vfe->id); + if (!ub_size) + return -EINVAL; + + sensor = camss_find_sensor(&line->subdev.entity); + if (sensor) { + struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(sensor); + + v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip); + /* Max frame skip is 29 frames */ + if (frame_skip > VFE_FRAME_DROP_VAL - 1) + frame_skip = VFE_FRAME_DROP_VAL - 1; + } + + spin_lock_irqsave(&vfe->output_lock, flags); + + ops->reg_update_clear(vfe, line->id); + + if (output->state > VFE_OUTPUT_RESERVED) { + dev_err(vfe->camss->dev, "Output is not in reserved state %d\n", output->state); + spin_unlock_irqrestore(&vfe->output_lock, flags); + return -EINVAL; + } + output->state = VFE_OUTPUT_IDLE; + + output->buf[0] = vfe_buf_get_pending(output); + output->buf[1] = vfe_buf_get_pending(output); + + if (!output->buf[0] && output->buf[1]) { + output->buf[0] = output->buf[1]; + output->buf[1] = NULL; + } + + if (output->buf[0]) + output->state = VFE_OUTPUT_SINGLE; + + if (output->buf[1]) + output->state = VFE_OUTPUT_CONTINUOUS; + + switch (output->state) { + case VFE_OUTPUT_SINGLE: + vfe_output_frame_drop(vfe, output, 1 << frame_skip); + break; + case VFE_OUTPUT_CONTINUOUS: + vfe_output_frame_drop(vfe, output, 3 << frame_skip); + break; + default: + vfe_output_frame_drop(vfe, output, 0); + break; + } + + output->sequence = 0; + output->gen1.wait_sof = 0; + output->wait_reg_update = 0; + reinit_completion(&output->sof); + reinit_completion(&output->reg_update); + + vfe_output_init_addrs(vfe, output, 0, line); + + if (line->id != VFE_LINE_PIX) { + vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[0], 1); + vfe->ops_gen1->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1); + vfe->ops_gen1->bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id); + vfe->ops_gen1->wm_set_subsample(vfe, output->wm_idx[0]); + vfe->ops_gen1->set_rdi_cid(vfe, line->id, 0); + vfe->ops_gen1->wm_set_ub_cfg(vfe, output->wm_idx[0], + (ub_size + 1) * output->wm_idx[0], ub_size); + vfe->ops_gen1->wm_frame_based(vfe, output->wm_idx[0], 1); + vfe->ops_gen1->wm_enable(vfe, output->wm_idx[0], 1); + vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[0]); + } else { + ub_size /= output->wm_num; + for (i = 0; i < output->wm_num; i++) { + vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[i], 1); + vfe->ops_gen1->wm_set_subsample(vfe, output->wm_idx[i]); + vfe->ops_gen1->wm_set_ub_cfg(vfe, output->wm_idx[i], + (ub_size + 1) * output->wm_idx[i], ub_size); + vfe->ops_gen1->wm_line_based(vfe, output->wm_idx[i], + &line->video_out.active_fmt.fmt.pix_mp, i, 1); + vfe->ops_gen1->wm_enable(vfe, output->wm_idx[i], 1); + vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]); + } + vfe->ops_gen1->enable_irq_pix_line(vfe, 0, line->id, 1); + vfe->ops_gen1->set_module_cfg(vfe, 1); + vfe->ops_gen1->set_camif_cfg(vfe, line); + vfe->ops_gen1->set_realign_cfg(vfe, line, 1); + vfe->ops_gen1->set_xbar_cfg(vfe, output, 1); + vfe->ops_gen1->set_demux_cfg(vfe, line); + vfe->ops_gen1->set_scale_cfg(vfe, line); + vfe->ops_gen1->set_crop_cfg(vfe, line); + vfe->ops_gen1->set_clamp_cfg(vfe); + vfe->ops_gen1->set_camif_cmd(vfe, 1); + } + + ops->reg_update(vfe, line->id); + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; +} + +static int vfe_get_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output; + struct v4l2_format *f = &line->video_out.active_fmt; + unsigned long flags; + int i; + int wm_idx; + + spin_lock_irqsave(&vfe->output_lock, flags); + + output = &line->output; + if (output->state > VFE_OUTPUT_RESERVED) { + dev_err(vfe->camss->dev, "Output is running\n"); + goto error; + } + output->state = VFE_OUTPUT_RESERVED; + + output->gen1.active_buf = 0; + + switch (f->fmt.pix_mp.pixelformat) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + output->wm_num = 2; + break; + default: + output->wm_num = 1; + break; + } + + for (i = 0; i < output->wm_num; i++) { + wm_idx = vfe_reserve_wm(vfe, line->id); + if (wm_idx < 0) { + dev_err(vfe->camss->dev, "Can not reserve wm\n"); + goto error_get_wm; + } + output->wm_idx[i] = wm_idx; + } + + output->drop_update_idx = 0; + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; + +error_get_wm: + for (i--; i >= 0; i--) + vfe_release_wm(vfe, output->wm_idx[i]); + output->state = VFE_OUTPUT_OFF; +error: + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return -EINVAL; +} + +int vfe_gen1_enable(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + int ret; + + mutex_lock(&vfe->stream_lock); + + if (!vfe->stream_count) { + vfe->ops_gen1->enable_irq_common(vfe); + vfe->ops_gen1->bus_enable_wr_if(vfe, 1); + vfe->ops_gen1->set_qos(vfe); + vfe->ops_gen1->set_ds(vfe); + } + + vfe->stream_count++; + + mutex_unlock(&vfe->stream_lock); + + ret = vfe_get_output(line); + if (ret < 0) + goto error_get_output; + + ret = vfe_enable_output(line); + if (ret < 0) + goto error_enable_output; + + vfe->was_streaming = 1; + + return 0; + +error_enable_output: + vfe_put_output(line); + +error_get_output: + mutex_lock(&vfe->stream_lock); + + if (vfe->stream_count == 1) + vfe->ops_gen1->bus_enable_wr_if(vfe, 0); + + vfe->stream_count--; + + mutex_unlock(&vfe->stream_lock); + + return ret; +} + +static void vfe_output_update_ping_addr(struct vfe_device *vfe, + struct vfe_output *output, u8 sync, + struct vfe_line *line) +{ + u32 addr; + unsigned int i; + + for (i = 0; i < output->wm_num; i++) { + if (output->buf[0]) + addr = output->buf[0]->addr[i]; + else + addr = 0; + + vfe->ops_gen1->wm_set_ping_addr(vfe, output->wm_idx[i], addr); + if (sync) + vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]); + } +} + +static void vfe_output_update_pong_addr(struct vfe_device *vfe, + struct vfe_output *output, u8 sync, + struct vfe_line *line) +{ + u32 addr; + unsigned int i; + + for (i = 0; i < output->wm_num; i++) { + if (output->buf[1]) + addr = output->buf[1]->addr[i]; + else + addr = 0; + + vfe->ops_gen1->wm_set_pong_addr(vfe, output->wm_idx[i], addr); + if (sync) + vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]); + } +} + +static void vfe_buf_update_wm_on_next(struct vfe_device *vfe, + struct vfe_output *output) +{ + switch (output->state) { + case VFE_OUTPUT_CONTINUOUS: + vfe_output_frame_drop(vfe, output, 3); + break; + case VFE_OUTPUT_SINGLE: + default: + dev_err_ratelimited(vfe->camss->dev, + "Next buf in wrong state! %d\n", + output->state); + break; + } +} + +static void vfe_buf_update_wm_on_last(struct vfe_device *vfe, + struct vfe_output *output) +{ + switch (output->state) { + case VFE_OUTPUT_CONTINUOUS: + output->state = VFE_OUTPUT_SINGLE; + vfe_output_frame_drop(vfe, output, 1); + break; + case VFE_OUTPUT_SINGLE: + output->state = VFE_OUTPUT_STOPPING; + vfe_output_frame_drop(vfe, output, 0); + break; + default: + dev_err_ratelimited(vfe->camss->dev, + "Last buff in wrong state! %d\n", + output->state); + break; + } +} + +static void vfe_buf_update_wm_on_new(struct vfe_device *vfe, + struct vfe_output *output, + struct camss_buffer *new_buf, + struct vfe_line *line) +{ + int inactive_idx; + + switch (output->state) { + case VFE_OUTPUT_SINGLE: + inactive_idx = !output->gen1.active_buf; + + if (!output->buf[inactive_idx]) { + output->buf[inactive_idx] = new_buf; + + if (inactive_idx) + vfe_output_update_pong_addr(vfe, output, 0, line); + else + vfe_output_update_ping_addr(vfe, output, 0, line); + + vfe_output_frame_drop(vfe, output, 3); + output->state = VFE_OUTPUT_CONTINUOUS; + } else { + vfe_buf_add_pending(output, new_buf); + dev_err_ratelimited(vfe->camss->dev, + "Inactive buffer is busy\n"); + } + break; + + case VFE_OUTPUT_IDLE: + if (!output->buf[0]) { + output->buf[0] = new_buf; + + vfe_output_init_addrs(vfe, output, 1, line); + vfe_output_frame_drop(vfe, output, 1); + + output->state = VFE_OUTPUT_SINGLE; + } else { + vfe_buf_add_pending(output, new_buf); + dev_err_ratelimited(vfe->camss->dev, + "Output idle with buffer set!\n"); + } + break; + + case VFE_OUTPUT_CONTINUOUS: + default: + vfe_buf_add_pending(output, new_buf); + break; + } +} + +/* + * vfe_isr_halt_ack - Process halt ack + * @vfe: VFE Device + */ +static void vfe_isr_halt_ack(struct vfe_device *vfe) +{ + complete(&vfe->halt_complete); + vfe->ops_gen1->halt_clear(vfe); +} + +/* + * vfe_isr_sof - Process start of frame interrupt + * @vfe: VFE Device + * @line_id: VFE line + */ +static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + struct vfe_output *output; + unsigned long flags; + + spin_lock_irqsave(&vfe->output_lock, flags); + output = &vfe->line[line_id].output; + if (output->gen1.wait_sof) { + output->gen1.wait_sof = 0; + complete(&output->sof); + } + spin_unlock_irqrestore(&vfe->output_lock, flags); +} + +/* + * vfe_isr_reg_update - Process reg update interrupt + * @vfe: VFE Device + * @line_id: VFE line + */ +static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + struct vfe_output *output; + struct vfe_line *line = &vfe->line[line_id]; + unsigned long flags; + + spin_lock_irqsave(&vfe->output_lock, flags); + vfe->ops->reg_update_clear(vfe, line_id); + + output = &line->output; + + if (output->wait_reg_update) { + output->wait_reg_update = 0; + complete(&output->reg_update); + spin_unlock_irqrestore(&vfe->output_lock, flags); + return; + } + + if (output->state == VFE_OUTPUT_STOPPING) { + /* Release last buffer when hw is idle */ + if (output->last_buffer) { + vb2_buffer_done(&output->last_buffer->vb.vb2_buf, + VB2_BUF_STATE_DONE); + output->last_buffer = NULL; + } + output->state = VFE_OUTPUT_IDLE; + + /* Buffers received in stopping state are queued in */ + /* dma pending queue, start next capture here */ + + output->buf[0] = vfe_buf_get_pending(output); + output->buf[1] = vfe_buf_get_pending(output); + + if (!output->buf[0] && output->buf[1]) { + output->buf[0] = output->buf[1]; + output->buf[1] = NULL; + } + + if (output->buf[0]) + output->state = VFE_OUTPUT_SINGLE; + + if (output->buf[1]) + output->state = VFE_OUTPUT_CONTINUOUS; + + switch (output->state) { + case VFE_OUTPUT_SINGLE: + vfe_output_frame_drop(vfe, output, 2); + break; + case VFE_OUTPUT_CONTINUOUS: + vfe_output_frame_drop(vfe, output, 3); + break; + default: + vfe_output_frame_drop(vfe, output, 0); + break; + } + + vfe_output_init_addrs(vfe, output, 1, &vfe->line[line_id]); + } + + spin_unlock_irqrestore(&vfe->output_lock, flags); +} + +/* + * vfe_isr_wm_done - Process write master done interrupt + * @vfe: VFE Device + * @wm: Write master id + */ +static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm) +{ + struct camss_buffer *ready_buf; + struct vfe_output *output; + dma_addr_t *new_addr; + unsigned long flags; + u32 active_index; + u64 ts = ktime_get_ns(); + unsigned int i; + + active_index = vfe->ops_gen1->wm_get_ping_pong_status(vfe, wm); + + spin_lock_irqsave(&vfe->output_lock, flags); + + if (vfe->wm_output_map[wm] == VFE_LINE_NONE) { + dev_err_ratelimited(vfe->camss->dev, + "Received wm done for unmapped index\n"); + goto out_unlock; + } + output = &vfe->line[vfe->wm_output_map[wm]].output; + + if (output->gen1.active_buf == active_index && 0) { + dev_err_ratelimited(vfe->camss->dev, + "Active buffer mismatch!\n"); + goto out_unlock; + } + output->gen1.active_buf = active_index; + + ready_buf = output->buf[!active_index]; + if (!ready_buf) { + dev_err_ratelimited(vfe->camss->dev, + "Missing ready buf %d %d!\n", + !active_index, output->state); + goto out_unlock; + } + + ready_buf->vb.vb2_buf.timestamp = ts; + ready_buf->vb.sequence = output->sequence++; + + /* Get next buffer */ + output->buf[!active_index] = vfe_buf_get_pending(output); + if (!output->buf[!active_index]) { + /* No next buffer - set same address */ + new_addr = ready_buf->addr; + vfe_buf_update_wm_on_last(vfe, output); + } else { + new_addr = output->buf[!active_index]->addr; + vfe_buf_update_wm_on_next(vfe, output); + } + + if (active_index) + for (i = 0; i < output->wm_num; i++) + vfe->ops_gen1->wm_set_ping_addr(vfe, output->wm_idx[i], new_addr[i]); + else + for (i = 0; i < output->wm_num; i++) + vfe->ops_gen1->wm_set_pong_addr(vfe, output->wm_idx[i], new_addr[i]); + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + if (output->state == VFE_OUTPUT_STOPPING) + output->last_buffer = ready_buf; + else + vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE); + + return; + +out_unlock: + spin_unlock_irqrestore(&vfe->output_lock, flags); +} + +/* + * vfe_queue_buffer - Add empty buffer + * @vid: Video device structure + * @buf: Buffer to be enqueued + * + * Add an empty buffer - depending on the current number of buffers it will be + * put in pending buffer queue or directly given to the hardware to be filled. + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_queue_buffer(struct camss_video *vid, struct camss_buffer *buf) +{ + struct vfe_line *line = container_of(vid, struct vfe_line, video_out); + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output; + unsigned long flags; + + output = &line->output; + + spin_lock_irqsave(&vfe->output_lock, flags); + + vfe_buf_update_wm_on_new(vfe, output, buf, line); + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; +} + +#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N)) + +int vfe_word_per_line(u32 format, u32 width) +{ + int val = 0; + + switch (format) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + val = CALC_WORD(width, 1, 8); + break; + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_YVYU: + case V4L2_PIX_FMT_UYVY: + case V4L2_PIX_FMT_VYUY: + val = CALC_WORD(width, 2, 8); + break; + } + + return val; +} + +const struct vfe_isr_ops vfe_isr_ops_gen1 = { + .reset_ack = vfe_isr_reset_ack, + .halt_ack = vfe_isr_halt_ack, + .reg_update = vfe_isr_reg_update, + .sof = vfe_isr_sof, + .comp_done = vfe_isr_comp_done, + .wm_done = vfe_isr_wm_done, +}; + +const struct camss_video_ops vfe_video_ops_gen1 = { + .queue_buffer = vfe_queue_buffer, + .flush_buffers = vfe_flush_buffers, +}; diff --git a/drivers/media/platform/qcom/camss/camss-vfe-gen1.h b/drivers/media/platform/qcom/camss/camss-vfe-gen1.h new file mode 100644 index 0000000000..6d5f965656 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-vfe-gen1.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * camss-vfe.h + * + * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2018 Linaro Ltd. + */ +#ifndef QC_MSM_CAMSS_VFE_GEN1_H +#define QC_MSM_CAMSS_VFE_GEN1_H + +#include "camss-vfe.h" + +enum vfe_line_id; +struct vfe_device; +struct vfe_line; +struct vfe_output; + +struct vfe_hw_ops_gen1 { + void (*bus_connect_wm_to_rdi)(struct vfe_device *vfe, u8 wm, enum vfe_line_id id); + void (*bus_disconnect_wm_from_rdi)(struct vfe_device *vfe, u8 wm, enum vfe_line_id id); + void (*bus_enable_wr_if)(struct vfe_device *vfe, u8 enable); + void (*bus_reload_wm)(struct vfe_device *vfe, u8 wm); + int (*camif_wait_for_stop)(struct vfe_device *vfe, struct device *dev); + void (*enable_irq_common)(struct vfe_device *vfe); + void (*enable_irq_wm_line)(struct vfe_device *vfe, u8 wm, enum vfe_line_id line_id, + u8 enable); + void (*enable_irq_pix_line)(struct vfe_device *vfe, u8 comp, enum vfe_line_id line_id, + u8 enable); + u16 (*get_ub_size)(u8 vfe_id); + void (*halt_clear)(struct vfe_device *vfe); + void (*halt_request)(struct vfe_device *vfe); + void (*set_camif_cfg)(struct vfe_device *vfe, struct vfe_line *line); + void (*set_camif_cmd)(struct vfe_device *vfe, u8 enable); + void (*set_cgc_override)(struct vfe_device *vfe, u8 wm, u8 enable); + void (*set_clamp_cfg)(struct vfe_device *vfe); + void (*set_crop_cfg)(struct vfe_device *vfe, struct vfe_line *line); + void (*set_demux_cfg)(struct vfe_device *vfe, struct vfe_line *line); + void (*set_ds)(struct vfe_device *vfe); + void (*set_module_cfg)(struct vfe_device *vfe, u8 enable); + void (*set_scale_cfg)(struct vfe_device *vfe, struct vfe_line *line); + void (*set_rdi_cid)(struct vfe_device *vfe, enum vfe_line_id id, u8 cid); + void (*set_realign_cfg)(struct vfe_device *vfe, struct vfe_line *line, u8 enable); + void (*set_qos)(struct vfe_device *vfe); + void (*set_xbar_cfg)(struct vfe_device *vfe, struct vfe_output *output, u8 enable); + void (*wm_frame_based)(struct vfe_device *vfe, u8 wm, u8 enable); + void (*wm_line_based)(struct vfe_device *vfe, u32 wm, struct v4l2_pix_format_mplane *pix, + u8 plane, u32 enable); + void (*wm_set_ub_cfg)(struct vfe_device *vfe, u8 wm, u16 offset, u16 depth); + void (*wm_set_subsample)(struct vfe_device *vfe, u8 wm); + void (*wm_set_framedrop_period)(struct vfe_device *vfe, u8 wm, u8 per); + void (*wm_set_framedrop_pattern)(struct vfe_device *vfe, u8 wm, u32 pattern); + void (*wm_set_ping_addr)(struct vfe_device *vfe, u8 wm, u32 addr); + void (*wm_set_pong_addr)(struct vfe_device *vfe, u8 wm, u32 addr); + int (*wm_get_ping_pong_status)(struct vfe_device *vfe, u8 wm); + void (*wm_enable)(struct vfe_device *vfe, u8 wm, u8 enable); +}; + +/* + * vfe_calc_interp_reso - Calculate interpolation mode + * @input: Input resolution + * @output: Output resolution + * + * Return interpolation mode + */ +static inline u8 vfe_calc_interp_reso(u16 input, u16 output) +{ + if (input / output >= 16) + return 0; + + if (input / output >= 8) + return 1; + + if (input / output >= 4) + return 2; + + return 3; +} + +/* + * vfe_gen1_disable - Disable streaming on VFE line + * @line: VFE line + * + * Return 0 on success or a negative error code otherwise + */ +int vfe_gen1_disable(struct vfe_line *line); + +/* + * vfe_gen1_enable - Enable VFE module + * @line: VFE line + * + * Return 0 on success + */ +int vfe_gen1_enable(struct vfe_line *line); + +/* + * vfe_gen1_enable - Halt VFE module + * @vfe: VFE device + * + * Return 0 on success + */ +int vfe_gen1_halt(struct vfe_device *vfe); + +/* + * vfe_word_per_line - Calculate number of words per frame width + * @format: V4L2 format + * @width: Frame width + * + * Return number of words per frame width + */ +int vfe_word_per_line(u32 format, u32 width); + +extern const struct vfe_isr_ops vfe_isr_ops_gen1; +extern const struct camss_video_ops vfe_video_ops_gen1; + +#endif /* QC_MSM_CAMSS_VFE_GEN1_H */ diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c new file mode 100644 index 0000000000..965500b83d --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-vfe.c @@ -0,0 +1,1620 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-vfe.c + * + * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2018 Linaro Ltd. + */ +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/iommu.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/spinlock_types.h> +#include <linux/spinlock.h> +#include <media/media-entity.h> +#include <media/v4l2-device.h> +#include <media/v4l2-subdev.h> + +#include "camss-vfe.h" +#include "camss.h" + +#define MSM_VFE_NAME "msm_vfe" + +/* VFE reset timeout */ +#define VFE_RESET_TIMEOUT_MS 50 + +#define SCALER_RATIO_MAX 16 + +struct vfe_format { + u32 code; + u8 bpp; +}; + +static const struct vfe_format formats_rdi_8x16[] = { + { MEDIA_BUS_FMT_UYVY8_2X8, 8 }, + { MEDIA_BUS_FMT_VYUY8_2X8, 8 }, + { MEDIA_BUS_FMT_YUYV8_2X8, 8 }, + { MEDIA_BUS_FMT_YVYU8_2X8, 8 }, + { MEDIA_BUS_FMT_SBGGR8_1X8, 8 }, + { MEDIA_BUS_FMT_SGBRG8_1X8, 8 }, + { MEDIA_BUS_FMT_SGRBG8_1X8, 8 }, + { MEDIA_BUS_FMT_SRGGB8_1X8, 8 }, + { MEDIA_BUS_FMT_SBGGR10_1X10, 10 }, + { MEDIA_BUS_FMT_SGBRG10_1X10, 10 }, + { MEDIA_BUS_FMT_SGRBG10_1X10, 10 }, + { MEDIA_BUS_FMT_SRGGB10_1X10, 10 }, + { MEDIA_BUS_FMT_SBGGR12_1X12, 12 }, + { MEDIA_BUS_FMT_SGBRG12_1X12, 12 }, + { MEDIA_BUS_FMT_SGRBG12_1X12, 12 }, + { MEDIA_BUS_FMT_SRGGB12_1X12, 12 }, + { MEDIA_BUS_FMT_Y10_1X10, 10 }, +}; + +static const struct vfe_format formats_pix_8x16[] = { + { MEDIA_BUS_FMT_UYVY8_2X8, 8 }, + { MEDIA_BUS_FMT_VYUY8_2X8, 8 }, + { MEDIA_BUS_FMT_YUYV8_2X8, 8 }, + { MEDIA_BUS_FMT_YVYU8_2X8, 8 }, +}; + +static const struct vfe_format formats_rdi_8x96[] = { + { MEDIA_BUS_FMT_UYVY8_2X8, 8 }, + { MEDIA_BUS_FMT_VYUY8_2X8, 8 }, + { MEDIA_BUS_FMT_YUYV8_2X8, 8 }, + { MEDIA_BUS_FMT_YVYU8_2X8, 8 }, + { MEDIA_BUS_FMT_SBGGR8_1X8, 8 }, + { MEDIA_BUS_FMT_SGBRG8_1X8, 8 }, + { MEDIA_BUS_FMT_SGRBG8_1X8, 8 }, + { MEDIA_BUS_FMT_SRGGB8_1X8, 8 }, + { MEDIA_BUS_FMT_SBGGR10_1X10, 10 }, + { MEDIA_BUS_FMT_SGBRG10_1X10, 10 }, + { MEDIA_BUS_FMT_SGRBG10_1X10, 10 }, + { MEDIA_BUS_FMT_SRGGB10_1X10, 10 }, + { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, 16 }, + { MEDIA_BUS_FMT_SBGGR12_1X12, 12 }, + { MEDIA_BUS_FMT_SGBRG12_1X12, 12 }, + { MEDIA_BUS_FMT_SGRBG12_1X12, 12 }, + { MEDIA_BUS_FMT_SRGGB12_1X12, 12 }, + { MEDIA_BUS_FMT_SBGGR14_1X14, 14 }, + { MEDIA_BUS_FMT_SGBRG14_1X14, 14 }, + { MEDIA_BUS_FMT_SGRBG14_1X14, 14 }, + { MEDIA_BUS_FMT_SRGGB14_1X14, 14 }, + { MEDIA_BUS_FMT_Y10_1X10, 10 }, + { MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, 16 }, +}; + +static const struct vfe_format formats_pix_8x96[] = { + { MEDIA_BUS_FMT_UYVY8_2X8, 8 }, + { MEDIA_BUS_FMT_VYUY8_2X8, 8 }, + { MEDIA_BUS_FMT_YUYV8_2X8, 8 }, + { MEDIA_BUS_FMT_YVYU8_2X8, 8 }, +}; + +static const struct vfe_format formats_rdi_845[] = { + { MEDIA_BUS_FMT_UYVY8_2X8, 8 }, + { MEDIA_BUS_FMT_VYUY8_2X8, 8 }, + { MEDIA_BUS_FMT_YUYV8_2X8, 8 }, + { MEDIA_BUS_FMT_YVYU8_2X8, 8 }, + { MEDIA_BUS_FMT_SBGGR8_1X8, 8 }, + { MEDIA_BUS_FMT_SGBRG8_1X8, 8 }, + { MEDIA_BUS_FMT_SGRBG8_1X8, 8 }, + { MEDIA_BUS_FMT_SRGGB8_1X8, 8 }, + { MEDIA_BUS_FMT_SBGGR10_1X10, 10 }, + { MEDIA_BUS_FMT_SGBRG10_1X10, 10 }, + { MEDIA_BUS_FMT_SGRBG10_1X10, 10 }, + { MEDIA_BUS_FMT_SRGGB10_1X10, 10 }, + { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, 16 }, + { MEDIA_BUS_FMT_SBGGR12_1X12, 12 }, + { MEDIA_BUS_FMT_SGBRG12_1X12, 12 }, + { MEDIA_BUS_FMT_SGRBG12_1X12, 12 }, + { MEDIA_BUS_FMT_SRGGB12_1X12, 12 }, + { MEDIA_BUS_FMT_SBGGR14_1X14, 14 }, + { MEDIA_BUS_FMT_SGBRG14_1X14, 14 }, + { MEDIA_BUS_FMT_SGRBG14_1X14, 14 }, + { MEDIA_BUS_FMT_SRGGB14_1X14, 14 }, + { MEDIA_BUS_FMT_Y8_1X8, 8 }, + { MEDIA_BUS_FMT_Y10_1X10, 10 }, + { MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, 16 }, +}; + +/* + * vfe_get_bpp - map media bus format to bits per pixel + * @formats: supported media bus formats array + * @nformats: size of @formats array + * @code: media bus format code + * + * Return number of bits per pixel + */ +static u8 vfe_get_bpp(const struct vfe_format *formats, + unsigned int nformats, u32 code) +{ + unsigned int i; + + for (i = 0; i < nformats; i++) + if (code == formats[i].code) + return formats[i].bpp; + + WARN(1, "Unknown format\n"); + + return formats[0].bpp; +} + +static u32 vfe_find_code(u32 *code, unsigned int n_code, + unsigned int index, u32 req_code) +{ + int i; + + if (!req_code && (index >= n_code)) + return 0; + + for (i = 0; i < n_code; i++) + if (req_code) { + if (req_code == code[i]) + return req_code; + } else { + if (i == index) + return code[i]; + } + + return code[0]; +} + +static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code, + unsigned int index, u32 src_req_code) +{ + struct vfe_device *vfe = to_vfe(line); + + if (vfe->camss->version == CAMSS_8x16) + switch (sink_code) { + case MEDIA_BUS_FMT_YUYV8_2X8: + { + u32 src_code[] = { + MEDIA_BUS_FMT_YUYV8_2X8, + MEDIA_BUS_FMT_YUYV8_1_5X8, + }; + + return vfe_find_code(src_code, ARRAY_SIZE(src_code), + index, src_req_code); + } + case MEDIA_BUS_FMT_YVYU8_2X8: + { + u32 src_code[] = { + MEDIA_BUS_FMT_YVYU8_2X8, + MEDIA_BUS_FMT_YVYU8_1_5X8, + }; + + return vfe_find_code(src_code, ARRAY_SIZE(src_code), + index, src_req_code); + } + case MEDIA_BUS_FMT_UYVY8_2X8: + { + u32 src_code[] = { + MEDIA_BUS_FMT_UYVY8_2X8, + MEDIA_BUS_FMT_UYVY8_1_5X8, + }; + + return vfe_find_code(src_code, ARRAY_SIZE(src_code), + index, src_req_code); + } + case MEDIA_BUS_FMT_VYUY8_2X8: + { + u32 src_code[] = { + MEDIA_BUS_FMT_VYUY8_2X8, + MEDIA_BUS_FMT_VYUY8_1_5X8, + }; + + return vfe_find_code(src_code, ARRAY_SIZE(src_code), + index, src_req_code); + } + default: + if (index > 0) + return 0; + + return sink_code; + } + else if (vfe->camss->version == CAMSS_8x96 || + vfe->camss->version == CAMSS_660 || + vfe->camss->version == CAMSS_845 || + vfe->camss->version == CAMSS_8250) + switch (sink_code) { + case MEDIA_BUS_FMT_YUYV8_2X8: + { + u32 src_code[] = { + MEDIA_BUS_FMT_YUYV8_2X8, + MEDIA_BUS_FMT_YVYU8_2X8, + MEDIA_BUS_FMT_UYVY8_2X8, + MEDIA_BUS_FMT_VYUY8_2X8, + MEDIA_BUS_FMT_YUYV8_1_5X8, + }; + + return vfe_find_code(src_code, ARRAY_SIZE(src_code), + index, src_req_code); + } + case MEDIA_BUS_FMT_YVYU8_2X8: + { + u32 src_code[] = { + MEDIA_BUS_FMT_YVYU8_2X8, + MEDIA_BUS_FMT_YUYV8_2X8, + MEDIA_BUS_FMT_UYVY8_2X8, + MEDIA_BUS_FMT_VYUY8_2X8, + MEDIA_BUS_FMT_YVYU8_1_5X8, + }; + + return vfe_find_code(src_code, ARRAY_SIZE(src_code), + index, src_req_code); + } + case MEDIA_BUS_FMT_UYVY8_2X8: + { + u32 src_code[] = { + MEDIA_BUS_FMT_UYVY8_2X8, + MEDIA_BUS_FMT_YUYV8_2X8, + MEDIA_BUS_FMT_YVYU8_2X8, + MEDIA_BUS_FMT_VYUY8_2X8, + MEDIA_BUS_FMT_UYVY8_1_5X8, + }; + + return vfe_find_code(src_code, ARRAY_SIZE(src_code), + index, src_req_code); + } + case MEDIA_BUS_FMT_VYUY8_2X8: + { + u32 src_code[] = { + MEDIA_BUS_FMT_VYUY8_2X8, + MEDIA_BUS_FMT_YUYV8_2X8, + MEDIA_BUS_FMT_YVYU8_2X8, + MEDIA_BUS_FMT_UYVY8_2X8, + MEDIA_BUS_FMT_VYUY8_1_5X8, + }; + + return vfe_find_code(src_code, ARRAY_SIZE(src_code), + index, src_req_code); + } + default: + if (index > 0) + return 0; + + return sink_code; + } + else + return 0; +} + +int vfe_reset(struct vfe_device *vfe) +{ + unsigned long time; + + reinit_completion(&vfe->reset_complete); + + vfe->ops->global_reset(vfe); + + time = wait_for_completion_timeout(&vfe->reset_complete, + msecs_to_jiffies(VFE_RESET_TIMEOUT_MS)); + if (!time) { + dev_err(vfe->camss->dev, "VFE reset timeout\n"); + return -EIO; + } + + return 0; +} + +static void vfe_init_outputs(struct vfe_device *vfe) +{ + int i; + + for (i = 0; i < vfe->line_num; i++) { + struct vfe_output *output = &vfe->line[i].output; + + output->state = VFE_OUTPUT_OFF; + output->buf[0] = NULL; + output->buf[1] = NULL; + INIT_LIST_HEAD(&output->pending_bufs); + } +} + +static void vfe_reset_output_maps(struct vfe_device *vfe) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) + vfe->wm_output_map[i] = VFE_LINE_NONE; +} + +int vfe_reserve_wm(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + int ret = -EBUSY; + int i; + + for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) { + if (vfe->wm_output_map[i] == VFE_LINE_NONE) { + vfe->wm_output_map[i] = line_id; + ret = i; + break; + } + } + + return ret; +} + +int vfe_release_wm(struct vfe_device *vfe, u8 wm) +{ + if (wm >= ARRAY_SIZE(vfe->wm_output_map)) + return -EINVAL; + + vfe->wm_output_map[wm] = VFE_LINE_NONE; + + return 0; +} + +struct camss_buffer *vfe_buf_get_pending(struct vfe_output *output) +{ + struct camss_buffer *buffer = NULL; + + if (!list_empty(&output->pending_bufs)) { + buffer = list_first_entry(&output->pending_bufs, + struct camss_buffer, + queue); + list_del(&buffer->queue); + } + + return buffer; +} + +void vfe_buf_add_pending(struct vfe_output *output, + struct camss_buffer *buffer) +{ + INIT_LIST_HEAD(&buffer->queue); + list_add_tail(&buffer->queue, &output->pending_bufs); +} + +/* + * vfe_buf_flush_pending - Flush all pending buffers. + * @output: VFE output + * @state: vb2 buffer state + */ +static void vfe_buf_flush_pending(struct vfe_output *output, + enum vb2_buffer_state state) +{ + struct camss_buffer *buf; + struct camss_buffer *t; + + list_for_each_entry_safe(buf, t, &output->pending_bufs, queue) { + vb2_buffer_done(&buf->vb.vb2_buf, state); + list_del(&buf->queue); + } +} + +int vfe_put_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output = &line->output; + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&vfe->output_lock, flags); + + for (i = 0; i < output->wm_num; i++) + vfe_release_wm(vfe, output->wm_idx[i]); + + output->state = VFE_OUTPUT_OFF; + + spin_unlock_irqrestore(&vfe->output_lock, flags); + return 0; +} + +/** + * vfe_isr_comp_done() - Process composite image done interrupt + * @vfe: VFE Device + * @comp: Composite image id + */ +void vfe_isr_comp_done(struct vfe_device *vfe, u8 comp) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) + if (vfe->wm_output_map[i] == VFE_LINE_PIX) { + vfe->isr_ops.wm_done(vfe, i); + break; + } +} + +void vfe_isr_reset_ack(struct vfe_device *vfe) +{ + complete(&vfe->reset_complete); +} + +/* + * vfe_set_clock_rates - Calculate and set clock rates on VFE module + * @vfe: VFE device + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_set_clock_rates(struct vfe_device *vfe) +{ + struct device *dev = vfe->camss->dev; + u64 pixel_clock[VFE_LINE_NUM_MAX]; + int i, j; + int ret; + + for (i = VFE_LINE_RDI0; i < vfe->line_num; i++) { + ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity, + &pixel_clock[i]); + if (ret) + pixel_clock[i] = 0; + } + + for (i = 0; i < vfe->nclocks; i++) { + struct camss_clock *clock = &vfe->clock[i]; + + if (!strcmp(clock->name, "vfe0") || + !strcmp(clock->name, "vfe1") || + !strcmp(clock->name, "vfe_lite")) { + u64 min_rate = 0; + long rate; + + for (j = VFE_LINE_RDI0; j < vfe->line_num; j++) { + u32 tmp; + u8 bpp; + + if (j == VFE_LINE_PIX) { + tmp = pixel_clock[j]; + } else { + struct vfe_line *l = &vfe->line[j]; + + bpp = vfe_get_bpp(l->formats, + l->nformats, + l->fmt[MSM_VFE_PAD_SINK].code); + tmp = pixel_clock[j] * bpp / 64; + } + + if (min_rate < tmp) + min_rate = tmp; + } + + camss_add_clock_margin(&min_rate); + + for (j = 0; j < clock->nfreqs; j++) + if (min_rate < clock->freq[j]) + break; + + if (j == clock->nfreqs) { + dev_err(dev, + "Pixel clock is too high for VFE"); + return -EINVAL; + } + + /* if sensor pixel clock is not available */ + /* set highest possible VFE clock rate */ + if (min_rate == 0) + j = clock->nfreqs - 1; + + rate = clk_round_rate(clock->clk, clock->freq[j]); + if (rate < 0) { + dev_err(dev, "clk round rate failed: %ld\n", + rate); + return -EINVAL; + } + + ret = clk_set_rate(clock->clk, rate); + if (ret < 0) { + dev_err(dev, "clk set rate failed: %d\n", ret); + return ret; + } + } + } + + return 0; +} + +/* + * vfe_check_clock_rates - Check current clock rates on VFE module + * @vfe: VFE device + * + * Return 0 if current clock rates are suitable for a new pipeline + * or a negative error code otherwise + */ +static int vfe_check_clock_rates(struct vfe_device *vfe) +{ + u64 pixel_clock[VFE_LINE_NUM_MAX]; + int i, j; + int ret; + + for (i = VFE_LINE_RDI0; i < vfe->line_num; i++) { + ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity, + &pixel_clock[i]); + if (ret) + pixel_clock[i] = 0; + } + + for (i = 0; i < vfe->nclocks; i++) { + struct camss_clock *clock = &vfe->clock[i]; + + if (!strcmp(clock->name, "vfe0") || + !strcmp(clock->name, "vfe1") || + !strcmp(clock->name, "vfe_lite")) { + u64 min_rate = 0; + unsigned long rate; + + for (j = VFE_LINE_RDI0; j < vfe->line_num; j++) { + u32 tmp; + u8 bpp; + + if (j == VFE_LINE_PIX) { + tmp = pixel_clock[j]; + } else { + struct vfe_line *l = &vfe->line[j]; + + bpp = vfe_get_bpp(l->formats, + l->nformats, + l->fmt[MSM_VFE_PAD_SINK].code); + tmp = pixel_clock[j] * bpp / 64; + } + + if (min_rate < tmp) + min_rate = tmp; + } + + camss_add_clock_margin(&min_rate); + + rate = clk_get_rate(clock->clk); + if (rate < min_rate) + return -EBUSY; + } + } + + return 0; +} + +/* + * vfe_get - Power up and reset VFE module + * @vfe: VFE Device + * + * Return 0 on success or a negative error code otherwise + */ +int vfe_get(struct vfe_device *vfe) +{ + int ret; + + mutex_lock(&vfe->power_lock); + + if (vfe->power_count == 0) { + ret = vfe->ops->pm_domain_on(vfe); + if (ret < 0) + goto error_pm_domain; + + ret = pm_runtime_resume_and_get(vfe->camss->dev); + if (ret < 0) + goto error_domain_off; + + ret = vfe_set_clock_rates(vfe); + if (ret < 0) + goto error_pm_runtime_get; + + ret = camss_enable_clocks(vfe->nclocks, vfe->clock, + vfe->camss->dev); + if (ret < 0) + goto error_pm_runtime_get; + + ret = vfe_reset(vfe); + if (ret < 0) + goto error_reset; + + vfe_reset_output_maps(vfe); + + vfe_init_outputs(vfe); + + vfe->ops->hw_version(vfe); + } else { + ret = vfe_check_clock_rates(vfe); + if (ret < 0) + goto error_pm_domain; + } + vfe->power_count++; + + mutex_unlock(&vfe->power_lock); + + return 0; + +error_reset: + camss_disable_clocks(vfe->nclocks, vfe->clock); + +error_pm_runtime_get: + pm_runtime_put_sync(vfe->camss->dev); +error_domain_off: + vfe->ops->pm_domain_off(vfe); + +error_pm_domain: + mutex_unlock(&vfe->power_lock); + + return ret; +} + +/* + * vfe_put - Power down VFE module + * @vfe: VFE Device + */ +void vfe_put(struct vfe_device *vfe) +{ + mutex_lock(&vfe->power_lock); + + if (vfe->power_count == 0) { + dev_err(vfe->camss->dev, "vfe power off on power_count == 0\n"); + goto exit; + } else if (vfe->power_count == 1) { + if (vfe->was_streaming) { + vfe->was_streaming = 0; + vfe->ops->vfe_halt(vfe); + } + camss_disable_clocks(vfe->nclocks, vfe->clock); + pm_runtime_put_sync(vfe->camss->dev); + vfe->ops->pm_domain_off(vfe); + } + + vfe->power_count--; + +exit: + mutex_unlock(&vfe->power_lock); +} + +/* + * vfe_flush_buffers - Return all vb2 buffers + * @vid: Video device structure + * @state: vb2 buffer state of the returned buffers + * + * Return all buffers to vb2. This includes queued pending buffers (still + * unused) and any buffers given to the hardware but again still not used. + * + * Return 0 on success or a negative error code otherwise + */ +int vfe_flush_buffers(struct camss_video *vid, + enum vb2_buffer_state state) +{ + struct vfe_line *line = container_of(vid, struct vfe_line, video_out); + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output; + unsigned long flags; + + output = &line->output; + + spin_lock_irqsave(&vfe->output_lock, flags); + + vfe_buf_flush_pending(output, state); + + if (output->buf[0]) + vb2_buffer_done(&output->buf[0]->vb.vb2_buf, state); + + if (output->buf[1]) + vb2_buffer_done(&output->buf[1]->vb.vb2_buf, state); + + if (output->last_buffer) { + vb2_buffer_done(&output->last_buffer->vb.vb2_buf, state); + output->last_buffer = NULL; + } + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; +} + +/* + * vfe_set_power - Power on/off VFE module + * @sd: VFE V4L2 subdevice + * @on: Requested power state + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_set_power(struct v4l2_subdev *sd, int on) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct vfe_device *vfe = to_vfe(line); + int ret; + + if (on) { + ret = vfe_get(vfe); + if (ret < 0) + return ret; + } else { + vfe_put(vfe); + } + + return 0; +} + +/* + * vfe_set_stream - Enable/disable streaming on VFE module + * @sd: VFE V4L2 subdevice + * @enable: Requested streaming state + * + * Main configuration of VFE module is triggered here. + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct vfe_device *vfe = to_vfe(line); + int ret; + + if (enable) { + line->output.state = VFE_OUTPUT_RESERVED; + ret = vfe->ops->vfe_enable(line); + if (ret < 0) + dev_err(vfe->camss->dev, + "Failed to enable vfe outputs\n"); + } else { + ret = vfe->ops->vfe_disable(line); + if (ret < 0) + dev_err(vfe->camss->dev, + "Failed to disable vfe outputs\n"); + } + + return ret; +} + +/* + * __vfe_get_format - Get pointer to format structure + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @pad: pad from which format is requested + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE format structure + */ +static struct v4l2_mbus_framefmt * +__vfe_get_format(struct vfe_line *line, + struct v4l2_subdev_state *sd_state, + unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(&line->subdev, sd_state, + pad); + + return &line->fmt[pad]; +} + +/* + * __vfe_get_compose - Get pointer to compose selection structure + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE compose rectangle structure + */ +static struct v4l2_rect * +__vfe_get_compose(struct vfe_line *line, + struct v4l2_subdev_state *sd_state, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_compose(&line->subdev, sd_state, + MSM_VFE_PAD_SINK); + + return &line->compose; +} + +/* + * __vfe_get_crop - Get pointer to crop selection structure + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE crop rectangle structure + */ +static struct v4l2_rect * +__vfe_get_crop(struct vfe_line *line, + struct v4l2_subdev_state *sd_state, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_crop(&line->subdev, sd_state, + MSM_VFE_PAD_SRC); + + return &line->crop; +} + +/* + * vfe_try_format - Handle try format by pad subdev method + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @pad: pad on which format is requested + * @fmt: pointer to v4l2 format structure + * @which: wanted subdev format + */ +static void vfe_try_format(struct vfe_line *line, + struct v4l2_subdev_state *sd_state, + unsigned int pad, + struct v4l2_mbus_framefmt *fmt, + enum v4l2_subdev_format_whence which) +{ + unsigned int i; + u32 code; + + switch (pad) { + case MSM_VFE_PAD_SINK: + /* Set format on sink pad */ + + for (i = 0; i < line->nformats; i++) + if (fmt->code == line->formats[i].code) + break; + + /* If not found, use UYVY as default */ + if (i >= line->nformats) + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + + fmt->width = clamp_t(u32, fmt->width, 1, 8191); + fmt->height = clamp_t(u32, fmt->height, 1, 8191); + + fmt->field = V4L2_FIELD_NONE; + fmt->colorspace = V4L2_COLORSPACE_SRGB; + + break; + + case MSM_VFE_PAD_SRC: + /* Set and return a format same as sink pad */ + code = fmt->code; + + *fmt = *__vfe_get_format(line, sd_state, MSM_VFE_PAD_SINK, + which); + + fmt->code = vfe_src_pad_code(line, fmt->code, 0, code); + + if (line->id == VFE_LINE_PIX) { + struct v4l2_rect *rect; + + rect = __vfe_get_crop(line, sd_state, which); + + fmt->width = rect->width; + fmt->height = rect->height; + } + + break; + } + + fmt->colorspace = V4L2_COLORSPACE_SRGB; +} + +/* + * vfe_try_compose - Handle try compose selection by pad subdev method + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @rect: pointer to v4l2 rect structure + * @which: wanted subdev format + */ +static void vfe_try_compose(struct vfe_line *line, + struct v4l2_subdev_state *sd_state, + struct v4l2_rect *rect, + enum v4l2_subdev_format_whence which) +{ + struct v4l2_mbus_framefmt *fmt; + + fmt = __vfe_get_format(line, sd_state, MSM_VFE_PAD_SINK, which); + + if (rect->width > fmt->width) + rect->width = fmt->width; + + if (rect->height > fmt->height) + rect->height = fmt->height; + + if (fmt->width > rect->width * SCALER_RATIO_MAX) + rect->width = (fmt->width + SCALER_RATIO_MAX - 1) / + SCALER_RATIO_MAX; + + rect->width &= ~0x1; + + if (fmt->height > rect->height * SCALER_RATIO_MAX) + rect->height = (fmt->height + SCALER_RATIO_MAX - 1) / + SCALER_RATIO_MAX; + + if (rect->width < 16) + rect->width = 16; + + if (rect->height < 4) + rect->height = 4; +} + +/* + * vfe_try_crop - Handle try crop selection by pad subdev method + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @rect: pointer to v4l2 rect structure + * @which: wanted subdev format + */ +static void vfe_try_crop(struct vfe_line *line, + struct v4l2_subdev_state *sd_state, + struct v4l2_rect *rect, + enum v4l2_subdev_format_whence which) +{ + struct v4l2_rect *compose; + + compose = __vfe_get_compose(line, sd_state, which); + + if (rect->width > compose->width) + rect->width = compose->width; + + if (rect->width + rect->left > compose->width) + rect->left = compose->width - rect->width; + + if (rect->height > compose->height) + rect->height = compose->height; + + if (rect->height + rect->top > compose->height) + rect->top = compose->height - rect->height; + + /* wm in line based mode writes multiple of 16 horizontally */ + rect->left += (rect->width & 0xf) >> 1; + rect->width &= ~0xf; + + if (rect->width < 16) { + rect->left = 0; + rect->width = 16; + } + + if (rect->height < 4) { + rect->top = 0; + rect->height = 4; + } +} + +/* + * vfe_enum_mbus_code - Handle pixel format enumeration + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @code: pointer to v4l2_subdev_mbus_code_enum structure + * + * return -EINVAL or zero on success + */ +static int vfe_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + + if (code->pad == MSM_VFE_PAD_SINK) { + if (code->index >= line->nformats) + return -EINVAL; + + code->code = line->formats[code->index].code; + } else { + struct v4l2_mbus_framefmt *sink_fmt; + + sink_fmt = __vfe_get_format(line, sd_state, MSM_VFE_PAD_SINK, + code->which); + + code->code = vfe_src_pad_code(line, sink_fmt->code, + code->index, 0); + if (!code->code) + return -EINVAL; + } + + return 0; +} + +/* + * vfe_enum_frame_size - Handle frame size enumeration + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fse: pointer to v4l2_subdev_frame_size_enum structure + * + * Return -EINVAL or zero on success + */ +static int vfe_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt format; + + if (fse->index != 0) + return -EINVAL; + + format.code = fse->code; + format.width = 1; + format.height = 1; + vfe_try_format(line, sd_state, fse->pad, &format, fse->which); + fse->min_width = format.width; + fse->min_height = format.height; + + if (format.code != fse->code) + return -EINVAL; + + format.code = fse->code; + format.width = -1; + format.height = -1; + vfe_try_format(line, sd_state, fse->pad, &format, fse->which); + fse->max_width = format.width; + fse->max_height = format.height; + + return 0; +} + +/* + * vfe_get_format - Handle get format by pads subdev method + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int vfe_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *fmt) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __vfe_get_format(line, sd_state, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + fmt->format = *format; + + return 0; +} + +static int vfe_set_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_selection *sel); + +/* + * vfe_set_format - Handle set format by pads subdev method + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int vfe_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *fmt) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __vfe_get_format(line, sd_state, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + vfe_try_format(line, sd_state, fmt->pad, &fmt->format, fmt->which); + *format = fmt->format; + + if (fmt->pad == MSM_VFE_PAD_SINK) { + struct v4l2_subdev_selection sel = { 0 }; + int ret; + + /* Propagate the format from sink to source */ + format = __vfe_get_format(line, sd_state, MSM_VFE_PAD_SRC, + fmt->which); + + *format = fmt->format; + vfe_try_format(line, sd_state, MSM_VFE_PAD_SRC, format, + fmt->which); + + if (line->id != VFE_LINE_PIX) + return 0; + + /* Reset sink pad compose selection */ + sel.which = fmt->which; + sel.pad = MSM_VFE_PAD_SINK; + sel.target = V4L2_SEL_TGT_COMPOSE; + sel.r.width = fmt->format.width; + sel.r.height = fmt->format.height; + ret = vfe_set_selection(sd, sd_state, &sel); + if (ret < 0) + return ret; + } + + return 0; +} + +/* + * vfe_get_selection - Handle get selection by pads subdev method + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @sel: pointer to v4l2 subdev selection structure + * + * Return -EINVAL or zero on success + */ +static int vfe_get_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_selection *sel) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_subdev_format fmt = { 0 }; + struct v4l2_rect *rect; + int ret; + + if (line->id != VFE_LINE_PIX) + return -EINVAL; + + if (sel->pad == MSM_VFE_PAD_SINK) + switch (sel->target) { + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + fmt.pad = sel->pad; + fmt.which = sel->which; + ret = vfe_get_format(sd, sd_state, &fmt); + if (ret < 0) + return ret; + + sel->r.left = 0; + sel->r.top = 0; + sel->r.width = fmt.format.width; + sel->r.height = fmt.format.height; + break; + case V4L2_SEL_TGT_COMPOSE: + rect = __vfe_get_compose(line, sd_state, sel->which); + if (rect == NULL) + return -EINVAL; + + sel->r = *rect; + break; + default: + return -EINVAL; + } + else if (sel->pad == MSM_VFE_PAD_SRC) + switch (sel->target) { + case V4L2_SEL_TGT_CROP_BOUNDS: + rect = __vfe_get_compose(line, sd_state, sel->which); + if (rect == NULL) + return -EINVAL; + + sel->r.left = rect->left; + sel->r.top = rect->top; + sel->r.width = rect->width; + sel->r.height = rect->height; + break; + case V4L2_SEL_TGT_CROP: + rect = __vfe_get_crop(line, sd_state, sel->which); + if (rect == NULL) + return -EINVAL; + + sel->r = *rect; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * vfe_set_selection - Handle set selection by pads subdev method + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @sel: pointer to v4l2 subdev selection structure + * + * Return -EINVAL or zero on success + */ +static int vfe_set_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_selection *sel) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_rect *rect; + int ret; + + if (line->id != VFE_LINE_PIX) + return -EINVAL; + + if (sel->target == V4L2_SEL_TGT_COMPOSE && + sel->pad == MSM_VFE_PAD_SINK) { + struct v4l2_subdev_selection crop = { 0 }; + + rect = __vfe_get_compose(line, sd_state, sel->which); + if (rect == NULL) + return -EINVAL; + + vfe_try_compose(line, sd_state, &sel->r, sel->which); + *rect = sel->r; + + /* Reset source crop selection */ + crop.which = sel->which; + crop.pad = MSM_VFE_PAD_SRC; + crop.target = V4L2_SEL_TGT_CROP; + crop.r = *rect; + ret = vfe_set_selection(sd, sd_state, &crop); + } else if (sel->target == V4L2_SEL_TGT_CROP && + sel->pad == MSM_VFE_PAD_SRC) { + struct v4l2_subdev_format fmt = { 0 }; + + rect = __vfe_get_crop(line, sd_state, sel->which); + if (rect == NULL) + return -EINVAL; + + vfe_try_crop(line, sd_state, &sel->r, sel->which); + *rect = sel->r; + + /* Reset source pad format width and height */ + fmt.which = sel->which; + fmt.pad = MSM_VFE_PAD_SRC; + ret = vfe_get_format(sd, sd_state, &fmt); + if (ret < 0) + return ret; + + fmt.format.width = rect->width; + fmt.format.height = rect->height; + ret = vfe_set_format(sd, sd_state, &fmt); + } else { + ret = -EINVAL; + } + + return ret; +} + +/* + * vfe_init_formats - Initialize formats on all pads + * @sd: VFE V4L2 subdevice + * @fh: V4L2 subdev file handle + * + * Initialize all pad formats with default values. + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_subdev_format format = { + .pad = MSM_VFE_PAD_SINK, + .which = fh ? V4L2_SUBDEV_FORMAT_TRY : + V4L2_SUBDEV_FORMAT_ACTIVE, + .format = { + .code = MEDIA_BUS_FMT_UYVY8_2X8, + .width = 1920, + .height = 1080 + } + }; + + return vfe_set_format(sd, fh ? fh->state : NULL, &format); +} + +/* + * msm_vfe_subdev_init - Initialize VFE device structure and resources + * @vfe: VFE device + * @res: VFE module resources table + * + * Return 0 on success or a negative error code otherwise + */ +int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe, + const struct resources *res, u8 id) +{ + struct device *dev = camss->dev; + struct platform_device *pdev = to_platform_device(dev); + int i, j; + int ret; + + switch (camss->version) { + case CAMSS_8x16: + vfe->ops = &vfe_ops_4_1; + break; + case CAMSS_8x96: + vfe->ops = &vfe_ops_4_7; + break; + case CAMSS_660: + vfe->ops = &vfe_ops_4_8; + break; + case CAMSS_845: + vfe->ops = &vfe_ops_170; + break; + case CAMSS_8250: + vfe->ops = &vfe_ops_480; + break; + default: + return -EINVAL; + } + vfe->ops->subdev_init(dev, vfe); + + /* Memory */ + + vfe->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]); + if (IS_ERR(vfe->base)) { + dev_err(dev, "could not map memory\n"); + return PTR_ERR(vfe->base); + } + + /* Interrupt */ + + ret = platform_get_irq_byname(pdev, res->interrupt[0]); + if (ret < 0) + return ret; + + vfe->irq = ret; + snprintf(vfe->irq_name, sizeof(vfe->irq_name), "%s_%s%d", + dev_name(dev), MSM_VFE_NAME, id); + ret = devm_request_irq(dev, vfe->irq, vfe->ops->isr, + IRQF_TRIGGER_RISING, vfe->irq_name, vfe); + if (ret < 0) { + dev_err(dev, "request_irq failed: %d\n", ret); + return ret; + } + + /* Clocks */ + + vfe->nclocks = 0; + while (res->clock[vfe->nclocks]) + vfe->nclocks++; + + vfe->clock = devm_kcalloc(dev, vfe->nclocks, sizeof(*vfe->clock), + GFP_KERNEL); + if (!vfe->clock) + return -ENOMEM; + + for (i = 0; i < vfe->nclocks; i++) { + struct camss_clock *clock = &vfe->clock[i]; + + clock->clk = devm_clk_get(dev, res->clock[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->name = res->clock[i]; + + clock->nfreqs = 0; + while (res->clock_rate[i][clock->nfreqs]) + clock->nfreqs++; + + if (!clock->nfreqs) { + clock->freq = NULL; + continue; + } + + clock->freq = devm_kcalloc(dev, + clock->nfreqs, + sizeof(*clock->freq), + GFP_KERNEL); + if (!clock->freq) + return -ENOMEM; + + for (j = 0; j < clock->nfreqs; j++) + clock->freq[j] = res->clock_rate[i][j]; + } + + mutex_init(&vfe->power_lock); + vfe->power_count = 0; + + mutex_init(&vfe->stream_lock); + vfe->stream_count = 0; + + spin_lock_init(&vfe->output_lock); + + vfe->camss = camss; + vfe->id = id; + vfe->reg_update = 0; + + for (i = VFE_LINE_RDI0; i < vfe->line_num; i++) { + struct vfe_line *l = &vfe->line[i]; + + l->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + l->video_out.camss = camss; + l->id = i; + init_completion(&l->output.sof); + init_completion(&l->output.reg_update); + + if (camss->version == CAMSS_8x16) { + if (i == VFE_LINE_PIX) { + l->formats = formats_pix_8x16; + l->nformats = ARRAY_SIZE(formats_pix_8x16); + } else { + l->formats = formats_rdi_8x16; + l->nformats = ARRAY_SIZE(formats_rdi_8x16); + } + } else if (camss->version == CAMSS_8x96 || + camss->version == CAMSS_660) { + if (i == VFE_LINE_PIX) { + l->formats = formats_pix_8x96; + l->nformats = ARRAY_SIZE(formats_pix_8x96); + } else { + l->formats = formats_rdi_8x96; + l->nformats = ARRAY_SIZE(formats_rdi_8x96); + } + } else if (camss->version == CAMSS_845 || + camss->version == CAMSS_8250) { + l->formats = formats_rdi_845; + l->nformats = ARRAY_SIZE(formats_rdi_845); + } else { + return -EINVAL; + } + } + + init_completion(&vfe->reset_complete); + init_completion(&vfe->halt_complete); + + return 0; +} + +/* + * vfe_link_setup - Setup VFE connections + * @entity: Pointer to media entity structure + * @local: Pointer to local pad + * @remote: Pointer to remote pad + * @flags: Link flags + * + * Return 0 on success + */ +static int vfe_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags) +{ + if (flags & MEDIA_LNK_FL_ENABLED) + if (media_pad_remote_pad_first(local)) + return -EBUSY; + + return 0; +} + +static const struct v4l2_subdev_core_ops vfe_core_ops = { + .s_power = vfe_set_power, +}; + +static const struct v4l2_subdev_video_ops vfe_video_ops = { + .s_stream = vfe_set_stream, +}; + +static const struct v4l2_subdev_pad_ops vfe_pad_ops = { + .enum_mbus_code = vfe_enum_mbus_code, + .enum_frame_size = vfe_enum_frame_size, + .get_fmt = vfe_get_format, + .set_fmt = vfe_set_format, + .get_selection = vfe_get_selection, + .set_selection = vfe_set_selection, +}; + +static const struct v4l2_subdev_ops vfe_v4l2_ops = { + .core = &vfe_core_ops, + .video = &vfe_video_ops, + .pad = &vfe_pad_ops, +}; + +static const struct v4l2_subdev_internal_ops vfe_v4l2_internal_ops = { + .open = vfe_init_formats, +}; + +static const struct media_entity_operations vfe_media_ops = { + .link_setup = vfe_link_setup, + .link_validate = v4l2_subdev_link_validate, +}; + +/* + * msm_vfe_register_entities - Register subdev node for VFE module + * @vfe: VFE device + * @v4l2_dev: V4L2 device + * + * Initialize and register a subdev node for the VFE module. Then + * call msm_video_register() to register the video device node which + * will be connected to this subdev node. Then actually create the + * media link between them. + * + * Return 0 on success or a negative error code otherwise + */ +int msm_vfe_register_entities(struct vfe_device *vfe, + struct v4l2_device *v4l2_dev) +{ + struct device *dev = vfe->camss->dev; + struct v4l2_subdev *sd; + struct media_pad *pads; + struct camss_video *video_out; + int ret; + int i; + + for (i = 0; i < vfe->line_num; i++) { + char name[32]; + + sd = &vfe->line[i].subdev; + pads = vfe->line[i].pads; + video_out = &vfe->line[i].video_out; + + v4l2_subdev_init(sd, &vfe_v4l2_ops); + sd->internal_ops = &vfe_v4l2_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + if (i == VFE_LINE_PIX) + snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s", + MSM_VFE_NAME, vfe->id, "pix"); + else + snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s%d", + MSM_VFE_NAME, vfe->id, "rdi", i); + + v4l2_set_subdevdata(sd, &vfe->line[i]); + + ret = vfe_init_formats(sd, NULL); + if (ret < 0) { + dev_err(dev, "Failed to init format: %d\n", ret); + goto error_init; + } + + pads[MSM_VFE_PAD_SINK].flags = MEDIA_PAD_FL_SINK; + pads[MSM_VFE_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE; + + sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; + sd->entity.ops = &vfe_media_ops; + ret = media_entity_pads_init(&sd->entity, MSM_VFE_PADS_NUM, + pads); + if (ret < 0) { + dev_err(dev, "Failed to init media entity: %d\n", ret); + goto error_init; + } + + ret = v4l2_device_register_subdev(v4l2_dev, sd); + if (ret < 0) { + dev_err(dev, "Failed to register subdev: %d\n", ret); + goto error_reg_subdev; + } + + video_out->ops = &vfe->video_ops; + if (vfe->camss->version == CAMSS_845 || + vfe->camss->version == CAMSS_8250) + video_out->bpl_alignment = 16; + else + video_out->bpl_alignment = 8; + video_out->line_based = 0; + if (i == VFE_LINE_PIX) { + video_out->bpl_alignment = 16; + video_out->line_based = 1; + } + snprintf(name, ARRAY_SIZE(name), "%s%d_%s%d", + MSM_VFE_NAME, vfe->id, "video", i); + ret = msm_video_register(video_out, v4l2_dev, name, + i == VFE_LINE_PIX ? 1 : 0); + if (ret < 0) { + dev_err(dev, "Failed to register video node: %d\n", + ret); + goto error_reg_video; + } + + ret = media_create_pad_link( + &sd->entity, MSM_VFE_PAD_SRC, + &video_out->vdev.entity, 0, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (ret < 0) { + dev_err(dev, "Failed to link %s->%s entities: %d\n", + sd->entity.name, video_out->vdev.entity.name, + ret); + goto error_link; + } + } + + return 0; + +error_link: + msm_video_unregister(video_out); + +error_reg_video: + v4l2_device_unregister_subdev(sd); + +error_reg_subdev: + media_entity_cleanup(&sd->entity); + +error_init: + for (i--; i >= 0; i--) { + sd = &vfe->line[i].subdev; + video_out = &vfe->line[i].video_out; + + msm_video_unregister(video_out); + v4l2_device_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + } + + return ret; +} + +/* + * msm_vfe_unregister_entities - Unregister VFE module subdev node + * @vfe: VFE device + */ +void msm_vfe_unregister_entities(struct vfe_device *vfe) +{ + int i; + + mutex_destroy(&vfe->power_lock); + mutex_destroy(&vfe->stream_lock); + + for (i = 0; i < vfe->line_num; i++) { + struct v4l2_subdev *sd = &vfe->line[i].subdev; + struct camss_video *video_out = &vfe->line[i].video_out; + + msm_video_unregister(video_out); + v4l2_device_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + } +} diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h new file mode 100644 index 0000000000..cbc314c4e2 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-vfe.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * camss-vfe.h + * + * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2018 Linaro Ltd. + */ +#ifndef QC_MSM_CAMSS_VFE_H +#define QC_MSM_CAMSS_VFE_H + +#include <linux/clk.h> +#include <linux/spinlock_types.h> +#include <media/media-entity.h> +#include <media/v4l2-device.h> +#include <media/v4l2-subdev.h> + +#include "camss-video.h" +#include "camss-vfe-gen1.h" + +#define MSM_VFE_PAD_SINK 0 +#define MSM_VFE_PAD_SRC 1 +#define MSM_VFE_PADS_NUM 2 + +#define MSM_VFE_IMAGE_MASTERS_NUM 7 +#define MSM_VFE_COMPOSITE_IRQ_NUM 4 + +/* VFE halt timeout */ +#define VFE_HALT_TIMEOUT_MS 100 +/* Frame drop value. VAL + UPDATES - 1 should not exceed 31 */ +#define VFE_FRAME_DROP_VAL 30 + +#define vfe_line_array(ptr_line) \ + ((const struct vfe_line (*)[]) &(ptr_line)[-(ptr_line)->id]) + +#define to_vfe(ptr_line) \ + container_of(vfe_line_array(ptr_line), struct vfe_device, line) + +enum vfe_output_state { + VFE_OUTPUT_OFF, + VFE_OUTPUT_RESERVED, + VFE_OUTPUT_SINGLE, + VFE_OUTPUT_CONTINUOUS, + VFE_OUTPUT_IDLE, + VFE_OUTPUT_STOPPING, + VFE_OUTPUT_ON, +}; + +enum vfe_line_id { + VFE_LINE_NONE = -1, + VFE_LINE_RDI0 = 0, + VFE_LINE_RDI1 = 1, + VFE_LINE_RDI2 = 2, + VFE_LINE_NUM_GEN2 = 3, + VFE_LINE_PIX = 3, + VFE_LINE_NUM_GEN1 = 4, + VFE_LINE_NUM_MAX = 4 +}; + +struct vfe_output { + u8 wm_num; + u8 wm_idx[3]; + + struct camss_buffer *buf[2]; + struct camss_buffer *last_buffer; + struct list_head pending_bufs; + + unsigned int drop_update_idx; + + union { + struct { + int active_buf; + int wait_sof; + } gen1; + struct { + int active_num; + } gen2; + }; + enum vfe_output_state state; + unsigned int sequence; + + int wait_reg_update; + struct completion sof; + struct completion reg_update; +}; + +struct vfe_line { + enum vfe_line_id id; + struct v4l2_subdev subdev; + struct media_pad pads[MSM_VFE_PADS_NUM]; + struct v4l2_mbus_framefmt fmt[MSM_VFE_PADS_NUM]; + struct v4l2_rect compose; + struct v4l2_rect crop; + struct camss_video video_out; + struct vfe_output output; + const struct vfe_format *formats; + unsigned int nformats; +}; + +struct vfe_device; + +struct vfe_hw_ops { + void (*enable_irq_common)(struct vfe_device *vfe); + void (*global_reset)(struct vfe_device *vfe); + u32 (*hw_version)(struct vfe_device *vfe); + irqreturn_t (*isr)(int irq, void *dev); + void (*isr_read)(struct vfe_device *vfe, u32 *value0, u32 *value1); + void (*pm_domain_off)(struct vfe_device *vfe); + int (*pm_domain_on)(struct vfe_device *vfe); + void (*reg_update)(struct vfe_device *vfe, enum vfe_line_id line_id); + void (*reg_update_clear)(struct vfe_device *vfe, + enum vfe_line_id line_id); + void (*subdev_init)(struct device *dev, struct vfe_device *vfe); + int (*vfe_disable)(struct vfe_line *line); + int (*vfe_enable)(struct vfe_line *line); + int (*vfe_halt)(struct vfe_device *vfe); + void (*violation_read)(struct vfe_device *vfe); +}; + +struct vfe_isr_ops { + void (*reset_ack)(struct vfe_device *vfe); + void (*halt_ack)(struct vfe_device *vfe); + void (*reg_update)(struct vfe_device *vfe, enum vfe_line_id line_id); + void (*sof)(struct vfe_device *vfe, enum vfe_line_id line_id); + void (*comp_done)(struct vfe_device *vfe, u8 comp); + void (*wm_done)(struct vfe_device *vfe, u8 wm); +}; + +struct vfe_device { + struct camss *camss; + u8 id; + void __iomem *base; + u32 irq; + char irq_name[30]; + struct camss_clock *clock; + int nclocks; + struct completion reset_complete; + struct completion halt_complete; + struct mutex power_lock; + int power_count; + struct mutex stream_lock; + int stream_count; + spinlock_t output_lock; + enum vfe_line_id wm_output_map[MSM_VFE_IMAGE_MASTERS_NUM]; + struct vfe_line line[VFE_LINE_NUM_MAX]; + u8 line_num; + u32 reg_update; + u8 was_streaming; + const struct vfe_hw_ops *ops; + const struct vfe_hw_ops_gen1 *ops_gen1; + struct vfe_isr_ops isr_ops; + struct camss_video_ops video_ops; +}; + +struct resources; + +int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe, + const struct resources *res, u8 id); + +int msm_vfe_register_entities(struct vfe_device *vfe, + struct v4l2_device *v4l2_dev); + +void msm_vfe_unregister_entities(struct vfe_device *vfe); + +/* + * vfe_buf_add_pending - Add output buffer to list of pending + * @output: VFE output + * @buffer: Video buffer + */ +void vfe_buf_add_pending(struct vfe_output *output, struct camss_buffer *buffer); + +struct camss_buffer *vfe_buf_get_pending(struct vfe_output *output); + +int vfe_flush_buffers(struct camss_video *vid, enum vb2_buffer_state state); + +/* + * vfe_isr_comp_done - Process composite image done interrupt + * @vfe: VFE Device + * @comp: Composite image id + */ +void vfe_isr_comp_done(struct vfe_device *vfe, u8 comp); + +void vfe_isr_reset_ack(struct vfe_device *vfe); +int vfe_put_output(struct vfe_line *line); +int vfe_release_wm(struct vfe_device *vfe, u8 wm); +int vfe_reserve_wm(struct vfe_device *vfe, enum vfe_line_id line_id); + +/* + * vfe_reset - Trigger reset on VFE module and wait to complete + * @vfe: VFE device + * + * Return 0 on success or a negative error code otherwise + */ +int vfe_reset(struct vfe_device *vfe); + +extern const struct vfe_hw_ops vfe_ops_4_1; +extern const struct vfe_hw_ops vfe_ops_4_7; +extern const struct vfe_hw_ops vfe_ops_4_8; +extern const struct vfe_hw_ops vfe_ops_170; +extern const struct vfe_hw_ops vfe_ops_480; + +int vfe_get(struct vfe_device *vfe); +void vfe_put(struct vfe_device *vfe); + +#endif /* QC_MSM_CAMSS_VFE_H */ diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c new file mode 100644 index 0000000000..8640db3060 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-video.c @@ -0,0 +1,1078 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss-video.c + * + * Qualcomm MSM Camera Subsystem - V4L2 device node + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2018 Linaro Ltd. + */ +#include <linux/slab.h> +#include <media/media-entity.h> +#include <media/v4l2-dev.h> +#include <media/v4l2-device.h> +#include <media/v4l2-ioctl.h> +#include <media/v4l2-mc.h> +#include <media/videobuf2-dma-sg.h> + +#include "camss-video.h" +#include "camss.h" + +#define CAMSS_FRAME_MIN_WIDTH 1 +#define CAMSS_FRAME_MAX_WIDTH 8191 +#define CAMSS_FRAME_MIN_HEIGHT 1 +#define CAMSS_FRAME_MAX_HEIGHT_RDI 8191 +#define CAMSS_FRAME_MAX_HEIGHT_PIX 4096 + +struct fract { + u8 numerator; + u8 denominator; +}; + +/* + * struct camss_format_info - ISP media bus format information + * @code: V4L2 media bus format code + * @pixelformat: V4L2 pixel format FCC identifier + * @planes: Number of planes + * @hsub: Horizontal subsampling (for each plane) + * @vsub: Vertical subsampling (for each plane) + * @bpp: Bits per pixel when stored in memory (for each plane) + */ +struct camss_format_info { + u32 code; + u32 pixelformat; + u8 planes; + struct fract hsub[3]; + struct fract vsub[3]; + unsigned int bpp[3]; +}; + +static const struct camss_format_info formats_rdi_8x16[] = { + { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_Y10_1X10, V4L2_PIX_FMT_Y10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, +}; + +static const struct camss_format_info formats_rdi_8x96[] = { + { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_PIX_FMT_SBGGR10, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SBGGR14_1X14, V4L2_PIX_FMT_SBGGR14P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 14 } }, + { MEDIA_BUS_FMT_SGBRG14_1X14, V4L2_PIX_FMT_SGBRG14P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 14 } }, + { MEDIA_BUS_FMT_SGRBG14_1X14, V4L2_PIX_FMT_SGRBG14P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 14 } }, + { MEDIA_BUS_FMT_SRGGB14_1X14, V4L2_PIX_FMT_SRGGB14P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 14 } }, + { MEDIA_BUS_FMT_Y10_1X10, V4L2_PIX_FMT_Y10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, V4L2_PIX_FMT_Y10, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, +}; + +static const struct camss_format_info formats_rdi_845[] = { + { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_PIX_FMT_SBGGR10, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SBGGR14_1X14, V4L2_PIX_FMT_SBGGR14P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 14 } }, + { MEDIA_BUS_FMT_SGBRG14_1X14, V4L2_PIX_FMT_SGBRG14P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 14 } }, + { MEDIA_BUS_FMT_SGRBG14_1X14, V4L2_PIX_FMT_SGRBG14P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 14 } }, + { MEDIA_BUS_FMT_SRGGB14_1X14, V4L2_PIX_FMT_SRGGB14P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 14 } }, + { MEDIA_BUS_FMT_Y8_1X8, V4L2_PIX_FMT_GREY, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_Y10_1X10, V4L2_PIX_FMT_Y10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, V4L2_PIX_FMT_Y10, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, +}; + +static const struct camss_format_info formats_pix_8x16[] = { + { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, +}; + +static const struct camss_format_info formats_pix_8x96[] = { + { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, +}; + +/* ----------------------------------------------------------------------------- + * Helper functions + */ + +static int video_find_format(u32 code, u32 pixelformat, + const struct camss_format_info *formats, + unsigned int nformats) +{ + int i; + + for (i = 0; i < nformats; i++) { + if (formats[i].code == code && + formats[i].pixelformat == pixelformat) + return i; + } + + for (i = 0; i < nformats; i++) + if (formats[i].code == code) + return i; + + WARN_ON(1); + + return -EINVAL; +} + +/* + * video_mbus_to_pix_mp - Convert v4l2_mbus_framefmt to v4l2_pix_format_mplane + * @mbus: v4l2_mbus_framefmt format (input) + * @pix: v4l2_pix_format_mplane format (output) + * @f: a pointer to formats array element to be used for the conversion + * @alignment: bytesperline alignment value + * + * Fill the output pix structure with information from the input mbus format. + * + * Return 0 on success or a negative error code otherwise + */ +static int video_mbus_to_pix_mp(const struct v4l2_mbus_framefmt *mbus, + struct v4l2_pix_format_mplane *pix, + const struct camss_format_info *f, + unsigned int alignment) +{ + unsigned int i; + u32 bytesperline; + + memset(pix, 0, sizeof(*pix)); + v4l2_fill_pix_format_mplane(pix, mbus); + pix->pixelformat = f->pixelformat; + pix->num_planes = f->planes; + for (i = 0; i < pix->num_planes; i++) { + bytesperline = pix->width / f->hsub[i].numerator * + f->hsub[i].denominator * f->bpp[i] / 8; + bytesperline = ALIGN(bytesperline, alignment); + pix->plane_fmt[i].bytesperline = bytesperline; + pix->plane_fmt[i].sizeimage = pix->height / + f->vsub[i].numerator * f->vsub[i].denominator * + bytesperline; + } + + return 0; +} + +static struct v4l2_subdev *video_remote_subdev(struct camss_video *video, + u32 *pad) +{ + struct media_pad *remote; + + remote = media_pad_remote_pad_first(&video->pad); + + if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) + return NULL; + + if (pad) + *pad = remote->index; + + return media_entity_to_v4l2_subdev(remote->entity); +} + +static int video_get_subdev_format(struct camss_video *video, + struct v4l2_format *format) +{ + struct v4l2_subdev_format fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + struct v4l2_subdev *subdev; + u32 pad; + int ret; + + subdev = video_remote_subdev(video, &pad); + if (subdev == NULL) + return -EPIPE; + + fmt.pad = pad; + + ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); + if (ret) + return ret; + + ret = video_find_format(fmt.format.code, + format->fmt.pix_mp.pixelformat, + video->formats, video->nformats); + if (ret < 0) + return ret; + + format->type = video->type; + + return video_mbus_to_pix_mp(&fmt.format, &format->fmt.pix_mp, + &video->formats[ret], video->bpl_alignment); +} + +/* ----------------------------------------------------------------------------- + * Video queue operations + */ + +static int video_queue_setup(struct vb2_queue *q, + unsigned int *num_buffers, unsigned int *num_planes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct camss_video *video = vb2_get_drv_priv(q); + const struct v4l2_pix_format_mplane *format = + &video->active_fmt.fmt.pix_mp; + unsigned int i; + + if (*num_planes) { + if (*num_planes != format->num_planes) + return -EINVAL; + + for (i = 0; i < *num_planes; i++) + if (sizes[i] < format->plane_fmt[i].sizeimage) + return -EINVAL; + + return 0; + } + + *num_planes = format->num_planes; + + for (i = 0; i < *num_planes; i++) + sizes[i] = format->plane_fmt[i].sizeimage; + + return 0; +} + +static int video_buf_init(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue); + struct camss_buffer *buffer = container_of(vbuf, struct camss_buffer, + vb); + const struct v4l2_pix_format_mplane *format = + &video->active_fmt.fmt.pix_mp; + struct sg_table *sgt; + unsigned int i; + + for (i = 0; i < format->num_planes; i++) { + sgt = vb2_dma_sg_plane_desc(vb, i); + if (!sgt) + return -EFAULT; + + buffer->addr[i] = sg_dma_address(sgt->sgl); + } + + if (format->pixelformat == V4L2_PIX_FMT_NV12 || + format->pixelformat == V4L2_PIX_FMT_NV21 || + format->pixelformat == V4L2_PIX_FMT_NV16 || + format->pixelformat == V4L2_PIX_FMT_NV61) + buffer->addr[1] = buffer->addr[0] + + format->plane_fmt[0].bytesperline * + format->height; + + return 0; +} + +static int video_buf_prepare(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue); + const struct v4l2_pix_format_mplane *format = + &video->active_fmt.fmt.pix_mp; + unsigned int i; + + for (i = 0; i < format->num_planes; i++) { + if (format->plane_fmt[i].sizeimage > vb2_plane_size(vb, i)) + return -EINVAL; + + vb2_set_plane_payload(vb, i, format->plane_fmt[i].sizeimage); + } + + vbuf->field = V4L2_FIELD_NONE; + + return 0; +} + +static void video_buf_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue); + struct camss_buffer *buffer = container_of(vbuf, struct camss_buffer, + vb); + + video->ops->queue_buffer(video, buffer); +} + +static int video_check_format(struct camss_video *video) +{ + struct v4l2_pix_format_mplane *pix = &video->active_fmt.fmt.pix_mp; + struct v4l2_format format; + struct v4l2_pix_format_mplane *sd_pix = &format.fmt.pix_mp; + int ret; + + sd_pix->pixelformat = pix->pixelformat; + ret = video_get_subdev_format(video, &format); + if (ret < 0) + return ret; + + if (pix->pixelformat != sd_pix->pixelformat || + pix->height != sd_pix->height || + pix->width != sd_pix->width || + pix->num_planes != sd_pix->num_planes || + pix->field != format.fmt.pix_mp.field) + return -EPIPE; + + return 0; +} + +static int video_start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct camss_video *video = vb2_get_drv_priv(q); + struct video_device *vdev = &video->vdev; + struct media_entity *entity; + struct media_pad *pad; + struct v4l2_subdev *subdev; + int ret; + + ret = video_device_pipeline_alloc_start(vdev); + if (ret < 0) { + dev_err(video->camss->dev, "Failed to start media pipeline: %d\n", ret); + goto flush_buffers; + } + + ret = video_check_format(video); + if (ret < 0) + goto error; + + entity = &vdev->entity; + while (1) { + pad = &entity->pads[0]; + if (!(pad->flags & MEDIA_PAD_FL_SINK)) + break; + + pad = media_pad_remote_pad_first(pad); + if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) + break; + + entity = pad->entity; + subdev = media_entity_to_v4l2_subdev(entity); + + ret = v4l2_subdev_call(subdev, video, s_stream, 1); + if (ret < 0 && ret != -ENOIOCTLCMD) + goto error; + } + + return 0; + +error: + video_device_pipeline_stop(vdev); + +flush_buffers: + video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED); + + return ret; +} + +static void video_stop_streaming(struct vb2_queue *q) +{ + struct camss_video *video = vb2_get_drv_priv(q); + struct video_device *vdev = &video->vdev; + struct media_entity *entity; + struct media_pad *pad; + struct v4l2_subdev *subdev; + int ret; + + entity = &vdev->entity; + while (1) { + pad = &entity->pads[0]; + if (!(pad->flags & MEDIA_PAD_FL_SINK)) + break; + + pad = media_pad_remote_pad_first(pad); + if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) + break; + + entity = pad->entity; + subdev = media_entity_to_v4l2_subdev(entity); + + ret = v4l2_subdev_call(subdev, video, s_stream, 0); + + if (entity->use_count > 1) { + /* Don't stop if other instances of the pipeline are still running */ + dev_dbg(video->camss->dev, "Video pipeline still used, don't stop streaming.\n"); + return; + } + + if (ret) { + dev_err(video->camss->dev, "Video pipeline stop failed: %d\n", ret); + return; + } + } + + video_device_pipeline_stop(vdev); + + video->ops->flush_buffers(video, VB2_BUF_STATE_ERROR); +} + +static const struct vb2_ops msm_video_vb2_q_ops = { + .queue_setup = video_queue_setup, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, + .buf_init = video_buf_init, + .buf_prepare = video_buf_prepare, + .buf_queue = video_buf_queue, + .start_streaming = video_start_streaming, + .stop_streaming = video_stop_streaming, +}; + +/* ----------------------------------------------------------------------------- + * V4L2 ioctls + */ + +static int video_querycap(struct file *file, void *fh, + struct v4l2_capability *cap) +{ + strscpy(cap->driver, "qcom-camss", sizeof(cap->driver)); + strscpy(cap->card, "Qualcomm Camera Subsystem", sizeof(cap->card)); + + return 0; +} + +static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f) +{ + struct camss_video *video = video_drvdata(file); + int i, j, k; + u32 mcode = f->mbus_code; + + if (f->type != video->type) + return -EINVAL; + + if (f->index >= video->nformats) + return -EINVAL; + + /* + * Find index "i" of "k"th unique pixelformat in formats array. + * + * If f->mbus_code passed to video_enum_fmt() is not zero, a device + * with V4L2_CAP_IO_MC capability restricts enumeration to only the + * pixel formats that can be produced from that media bus code. + * This is implemented by skipping video->formats[] entries with + * code != f->mbus_code (if f->mbus_code is not zero). + * If the f->mbus_code passed to video_enum_fmt() is not supported, + * -EINVAL is returned. + * If f->mbus_code is zero, all the pixel formats are enumerated. + */ + k = -1; + for (i = 0; i < video->nformats; i++) { + if (mcode != 0 && video->formats[i].code != mcode) + continue; + + for (j = 0; j < i; j++) { + if (mcode != 0 && video->formats[j].code != mcode) + continue; + if (video->formats[i].pixelformat == + video->formats[j].pixelformat) + break; + } + + if (j == i) + k++; + + if (k == f->index) + break; + } + + if (k == -1 || k < f->index) + /* + * All the unique pixel formats matching the arguments + * have been enumerated (k >= 0 and f->index > 0), or + * no pixel formats match the non-zero f->mbus_code (k == -1). + */ + return -EINVAL; + + f->pixelformat = video->formats[i].pixelformat; + + return 0; +} + +static int video_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + struct camss_video *video = video_drvdata(file); + int i; + + if (fsize->index) + return -EINVAL; + + /* Only accept pixel format present in the formats[] table */ + for (i = 0; i < video->nformats; i++) { + if (video->formats[i].pixelformat == fsize->pixel_format) + break; + } + + if (i == video->nformats) + return -EINVAL; + + fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS; + fsize->stepwise.min_width = CAMSS_FRAME_MIN_WIDTH; + fsize->stepwise.max_width = CAMSS_FRAME_MAX_WIDTH; + fsize->stepwise.min_height = CAMSS_FRAME_MIN_HEIGHT; + fsize->stepwise.max_height = (video->line_based) ? + CAMSS_FRAME_MAX_HEIGHT_PIX : CAMSS_FRAME_MAX_HEIGHT_RDI; + fsize->stepwise.step_width = 1; + fsize->stepwise.step_height = 1; + + return 0; +} + +static int video_g_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct camss_video *video = video_drvdata(file); + + *f = video->active_fmt; + + return 0; +} + +static int __video_try_fmt(struct camss_video *video, struct v4l2_format *f) +{ + struct v4l2_pix_format_mplane *pix_mp; + const struct camss_format_info *fi; + struct v4l2_plane_pix_format *p; + u32 bytesperline[3] = { 0 }; + u32 sizeimage[3] = { 0 }; + u32 width, height; + u32 bpl, lines; + int i, j; + + pix_mp = &f->fmt.pix_mp; + + if (video->line_based) + for (i = 0; i < pix_mp->num_planes && i < 3; i++) { + p = &pix_mp->plane_fmt[i]; + bytesperline[i] = clamp_t(u32, p->bytesperline, + 1, 65528); + sizeimage[i] = clamp_t(u32, p->sizeimage, + bytesperline[i], + bytesperline[i] * CAMSS_FRAME_MAX_HEIGHT_PIX); + } + + for (j = 0; j < video->nformats; j++) + if (pix_mp->pixelformat == video->formats[j].pixelformat) + break; + + if (j == video->nformats) + j = 0; /* default format */ + + fi = &video->formats[j]; + width = pix_mp->width; + height = pix_mp->height; + + memset(pix_mp, 0, sizeof(*pix_mp)); + + pix_mp->pixelformat = fi->pixelformat; + pix_mp->width = clamp_t(u32, width, 1, CAMSS_FRAME_MAX_WIDTH); + pix_mp->height = clamp_t(u32, height, 1, CAMSS_FRAME_MAX_HEIGHT_RDI); + pix_mp->num_planes = fi->planes; + for (i = 0; i < pix_mp->num_planes; i++) { + bpl = pix_mp->width / fi->hsub[i].numerator * + fi->hsub[i].denominator * fi->bpp[i] / 8; + bpl = ALIGN(bpl, video->bpl_alignment); + pix_mp->plane_fmt[i].bytesperline = bpl; + pix_mp->plane_fmt[i].sizeimage = pix_mp->height / + fi->vsub[i].numerator * fi->vsub[i].denominator * bpl; + } + + pix_mp->field = V4L2_FIELD_NONE; + pix_mp->colorspace = V4L2_COLORSPACE_SRGB; + pix_mp->flags = 0; + pix_mp->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix_mp->colorspace); + pix_mp->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, + pix_mp->colorspace, pix_mp->ycbcr_enc); + pix_mp->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix_mp->colorspace); + + if (video->line_based) + for (i = 0; i < pix_mp->num_planes; i++) { + p = &pix_mp->plane_fmt[i]; + p->bytesperline = clamp_t(u32, p->bytesperline, + 1, 65528); + p->sizeimage = clamp_t(u32, p->sizeimage, + p->bytesperline, + p->bytesperline * CAMSS_FRAME_MAX_HEIGHT_PIX); + lines = p->sizeimage / p->bytesperline; + + if (p->bytesperline < bytesperline[i]) + p->bytesperline = ALIGN(bytesperline[i], 8); + + if (p->sizeimage < p->bytesperline * lines) + p->sizeimage = p->bytesperline * lines; + + if (p->sizeimage < sizeimage[i]) + p->sizeimage = sizeimage[i]; + } + + return 0; +} + +static int video_try_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct camss_video *video = video_drvdata(file); + + return __video_try_fmt(video, f); +} + +static int video_s_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct camss_video *video = video_drvdata(file); + int ret; + + if (vb2_is_busy(&video->vb2_q)) + return -EBUSY; + + ret = __video_try_fmt(video, f); + if (ret < 0) + return ret; + + video->active_fmt = *f; + + return 0; +} + +static int video_enum_input(struct file *file, void *fh, + struct v4l2_input *input) +{ + if (input->index > 0) + return -EINVAL; + + strscpy(input->name, "camera", sizeof(input->name)); + input->type = V4L2_INPUT_TYPE_CAMERA; + + return 0; +} + +static int video_g_input(struct file *file, void *fh, unsigned int *input) +{ + *input = 0; + + return 0; +} + +static int video_s_input(struct file *file, void *fh, unsigned int input) +{ + return input == 0 ? 0 : -EINVAL; +} + +static const struct v4l2_ioctl_ops msm_vid_ioctl_ops = { + .vidioc_querycap = video_querycap, + .vidioc_enum_fmt_vid_cap = video_enum_fmt, + .vidioc_enum_framesizes = video_enum_framesizes, + .vidioc_g_fmt_vid_cap_mplane = video_g_fmt, + .vidioc_s_fmt_vid_cap_mplane = video_s_fmt, + .vidioc_try_fmt_vid_cap_mplane = video_try_fmt, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_enum_input = video_enum_input, + .vidioc_g_input = video_g_input, + .vidioc_s_input = video_s_input, +}; + +/* ----------------------------------------------------------------------------- + * V4L2 file operations + */ + +static int video_open(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + struct camss_video *video = video_drvdata(file); + struct v4l2_fh *vfh; + int ret; + + mutex_lock(&video->lock); + + vfh = kzalloc(sizeof(*vfh), GFP_KERNEL); + if (vfh == NULL) { + ret = -ENOMEM; + goto error_alloc; + } + + v4l2_fh_init(vfh, vdev); + v4l2_fh_add(vfh); + + file->private_data = vfh; + + ret = v4l2_pipeline_pm_get(&vdev->entity); + if (ret < 0) { + dev_err(video->camss->dev, "Failed to power up pipeline: %d\n", + ret); + goto error_pm_use; + } + + mutex_unlock(&video->lock); + + return 0; + +error_pm_use: + v4l2_fh_release(file); + +error_alloc: + mutex_unlock(&video->lock); + + return ret; +} + +static int video_release(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + + vb2_fop_release(file); + + v4l2_pipeline_pm_put(&vdev->entity); + + file->private_data = NULL; + + return 0; +} + +static const struct v4l2_file_operations msm_vid_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = video_ioctl2, + .open = video_open, + .release = video_release, + .poll = vb2_fop_poll, + .mmap = vb2_fop_mmap, + .read = vb2_fop_read, +}; + +/* ----------------------------------------------------------------------------- + * CAMSS video core + */ + +static void msm_video_release(struct video_device *vdev) +{ + struct camss_video *video = video_get_drvdata(vdev); + + media_entity_cleanup(&vdev->entity); + + mutex_destroy(&video->q_lock); + mutex_destroy(&video->lock); + + if (atomic_dec_and_test(&video->camss->ref_count)) + camss_delete(video->camss); +} + +/* + * msm_video_init_format - Helper function to initialize format + * @video: struct camss_video + * + * Initialize pad format with default value. + * + * Return 0 on success or a negative error code otherwise + */ +static int msm_video_init_format(struct camss_video *video) +{ + int ret; + struct v4l2_format format = { + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + .fmt.pix_mp = { + .width = 1920, + .height = 1080, + .pixelformat = video->formats[0].pixelformat, + }, + }; + + ret = __video_try_fmt(video, &format); + if (ret < 0) + return ret; + + video->active_fmt = format; + + return 0; +} + +/* + * msm_video_register - Register a video device node + * @video: struct camss_video + * @v4l2_dev: V4L2 device + * @name: name to be used for the video device node + * + * Initialize and register a video device node to a V4L2 device. Also + * initialize the vb2 queue. + * + * Return 0 on success or a negative error code otherwise + */ + +int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev, + const char *name, int is_pix) +{ + struct media_pad *pad = &video->pad; + struct video_device *vdev; + struct vb2_queue *q; + int ret; + + vdev = &video->vdev; + + mutex_init(&video->q_lock); + + q = &video->vb2_q; + q->drv_priv = video; + q->mem_ops = &vb2_dma_sg_memops; + q->ops = &msm_video_vb2_q_ops; + q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + q->io_modes = VB2_DMABUF | VB2_MMAP | VB2_READ; + q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + q->buf_struct_size = sizeof(struct camss_buffer); + q->dev = video->camss->dev; + q->lock = &video->q_lock; + ret = vb2_queue_init(q); + if (ret < 0) { + dev_err(v4l2_dev->dev, "Failed to init vb2 queue: %d\n", ret); + goto error_vb2_init; + } + + pad->flags = MEDIA_PAD_FL_SINK; + ret = media_entity_pads_init(&vdev->entity, 1, pad); + if (ret < 0) { + dev_err(v4l2_dev->dev, "Failed to init video entity: %d\n", + ret); + goto error_vb2_init; + } + + mutex_init(&video->lock); + + if (video->camss->version == CAMSS_8x16) { + if (is_pix) { + video->formats = formats_pix_8x16; + video->nformats = ARRAY_SIZE(formats_pix_8x16); + } else { + video->formats = formats_rdi_8x16; + video->nformats = ARRAY_SIZE(formats_rdi_8x16); + } + } else if (video->camss->version == CAMSS_8x96 || + video->camss->version == CAMSS_660) { + if (is_pix) { + video->formats = formats_pix_8x96; + video->nformats = ARRAY_SIZE(formats_pix_8x96); + } else { + video->formats = formats_rdi_8x96; + video->nformats = ARRAY_SIZE(formats_rdi_8x96); + } + } else if (video->camss->version == CAMSS_845 || + video->camss->version == CAMSS_8250) { + video->formats = formats_rdi_845; + video->nformats = ARRAY_SIZE(formats_rdi_845); + } else { + ret = -EINVAL; + goto error_video_register; + } + + ret = msm_video_init_format(video); + if (ret < 0) { + dev_err(v4l2_dev->dev, "Failed to init format: %d\n", ret); + goto error_video_register; + } + + vdev->fops = &msm_vid_fops; + vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING + | V4L2_CAP_READWRITE | V4L2_CAP_IO_MC; + vdev->ioctl_ops = &msm_vid_ioctl_ops; + vdev->release = msm_video_release; + vdev->v4l2_dev = v4l2_dev; + vdev->vfl_dir = VFL_DIR_RX; + vdev->queue = &video->vb2_q; + vdev->lock = &video->lock; + strscpy(vdev->name, name, sizeof(vdev->name)); + + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); + if (ret < 0) { + dev_err(v4l2_dev->dev, "Failed to register video device: %d\n", + ret); + goto error_video_register; + } + + video_set_drvdata(vdev, video); + atomic_inc(&video->camss->ref_count); + + return 0; + +error_video_register: + media_entity_cleanup(&vdev->entity); + mutex_destroy(&video->lock); +error_vb2_init: + mutex_destroy(&video->q_lock); + + return ret; +} + +void msm_video_unregister(struct camss_video *video) +{ + atomic_inc(&video->camss->ref_count); + vb2_video_unregister_device(&video->vdev); + atomic_dec(&video->camss->ref_count); +} diff --git a/drivers/media/platform/qcom/camss/camss-video.h b/drivers/media/platform/qcom/camss/camss-video.h new file mode 100644 index 0000000000..bdbae84241 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss-video.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * camss-video.h + * + * Qualcomm MSM Camera Subsystem - V4L2 device node + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2018 Linaro Ltd. + */ +#ifndef QC_MSM_CAMSS_VIDEO_H +#define QC_MSM_CAMSS_VIDEO_H + +#include <linux/mutex.h> +#include <linux/videodev2.h> +#include <media/media-entity.h> +#include <media/v4l2-dev.h> +#include <media/v4l2-device.h> +#include <media/v4l2-fh.h> +#include <media/v4l2-mediabus.h> +#include <media/videobuf2-v4l2.h> + +struct camss_buffer { + struct vb2_v4l2_buffer vb; + dma_addr_t addr[3]; + struct list_head queue; +}; + +struct camss_video; + +struct camss_video_ops { + int (*queue_buffer)(struct camss_video *vid, struct camss_buffer *buf); + int (*flush_buffers)(struct camss_video *vid, + enum vb2_buffer_state state); +}; + +struct camss_format_info; + +struct camss_video { + struct camss *camss; + struct vb2_queue vb2_q; + struct video_device vdev; + struct media_pad pad; + struct v4l2_format active_fmt; + enum v4l2_buf_type type; + struct media_pipeline pipe; + const struct camss_video_ops *ops; + struct mutex lock; + struct mutex q_lock; + unsigned int bpl_alignment; + unsigned int line_based; + const struct camss_format_info *formats; + unsigned int nformats; +}; + +int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev, + const char *name, int is_pix); + +void msm_video_unregister(struct camss_video *video); + +#endif /* QC_MSM_CAMSS_VIDEO_H */ diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c new file mode 100644 index 0000000000..c6df862c79 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss.c @@ -0,0 +1,1817 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * camss.c + * + * Qualcomm MSM Camera Subsystem - Core + * + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2018 Linaro Ltd. + */ +#include <linux/clk.h> +#include <linux/interconnect.h> +#include <linux/media-bus-format.h> +#include <linux/media.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/pm_runtime.h> +#include <linux/pm_domain.h> +#include <linux/slab.h> +#include <linux/videodev2.h> + +#include <media/media-device.h> +#include <media/v4l2-async.h> +#include <media/v4l2-device.h> +#include <media/v4l2-mc.h> +#include <media/v4l2-fwnode.h> + +#include "camss.h" + +#define CAMSS_CLOCK_MARGIN_NUMERATOR 105 +#define CAMSS_CLOCK_MARGIN_DENOMINATOR 100 + +static const struct resources csiphy_res_8x16[] = { + /* CSIPHY0 */ + { + .regulators = {}, + .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy0_timer" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000 } }, + .reg = { "csiphy0", "csiphy0_clk_mux" }, + .interrupt = { "csiphy0" } + }, + + /* CSIPHY1 */ + { + .regulators = {}, + .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy1_timer" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000 } }, + .reg = { "csiphy1", "csiphy1_clk_mux" }, + .interrupt = { "csiphy1" } + } +}; + +static const struct resources csid_res_8x16[] = { + /* CSID0 */ + { + .regulators = { "vdda" }, + .clock = { "top_ahb", "ispif_ahb", "csi0_ahb", "ahb", + "csi0", "csi0_phy", "csi0_pix", "csi0_rdi" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "csid0" }, + .interrupt = { "csid0" } + }, + + /* CSID1 */ + { + .regulators = { "vdda" }, + .clock = { "top_ahb", "ispif_ahb", "csi1_ahb", "ahb", + "csi1", "csi1_phy", "csi1_pix", "csi1_rdi" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "csid1" }, + .interrupt = { "csid1" } + }, +}; + +static const struct resources_ispif ispif_res_8x16 = { + /* ISPIF */ + .clock = { "top_ahb", "ahb", "ispif_ahb", + "csi0", "csi0_pix", "csi0_rdi", + "csi1", "csi1_pix", "csi1_rdi" }, + .clock_for_reset = { "vfe0", "csi_vfe0" }, + .reg = { "ispif", "csi_clk_mux" }, + .interrupt = "ispif" + +}; + +static const struct resources vfe_res_8x16[] = { + /* VFE0 */ + { + .regulators = {}, + .clock = { "top_ahb", "vfe0", "csi_vfe0", + "vfe_ahb", "vfe_axi", "ahb" }, + .clock_rate = { { 0 }, + { 50000000, 80000000, 100000000, 160000000, + 177780000, 200000000, 266670000, 320000000, + 400000000, 465000000 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "vfe0" }, + .interrupt = { "vfe0" } + } +}; + +static const struct resources csiphy_res_8x96[] = { + /* CSIPHY0 */ + { + .regulators = {}, + .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy0_timer" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 266666667 } }, + .reg = { "csiphy0", "csiphy0_clk_mux" }, + .interrupt = { "csiphy0" } + }, + + /* CSIPHY1 */ + { + .regulators = {}, + .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy1_timer" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 266666667 } }, + .reg = { "csiphy1", "csiphy1_clk_mux" }, + .interrupt = { "csiphy1" } + }, + + /* CSIPHY2 */ + { + .regulators = {}, + .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy2_timer" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 266666667 } }, + .reg = { "csiphy2", "csiphy2_clk_mux" }, + .interrupt = { "csiphy2" } + } +}; + +static const struct resources csid_res_8x96[] = { + /* CSID0 */ + { + .regulators = { "vdda" }, + .clock = { "top_ahb", "ispif_ahb", "csi0_ahb", "ahb", + "csi0", "csi0_phy", "csi0_pix", "csi0_rdi" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 266666667 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "csid0" }, + .interrupt = { "csid0" } + }, + + /* CSID1 */ + { + .regulators = { "vdda" }, + .clock = { "top_ahb", "ispif_ahb", "csi1_ahb", "ahb", + "csi1", "csi1_phy", "csi1_pix", "csi1_rdi" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 266666667 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "csid1" }, + .interrupt = { "csid1" } + }, + + /* CSID2 */ + { + .regulators = { "vdda" }, + .clock = { "top_ahb", "ispif_ahb", "csi2_ahb", "ahb", + "csi2", "csi2_phy", "csi2_pix", "csi2_rdi" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 266666667 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "csid2" }, + .interrupt = { "csid2" } + }, + + /* CSID3 */ + { + .regulators = { "vdda" }, + .clock = { "top_ahb", "ispif_ahb", "csi3_ahb", "ahb", + "csi3", "csi3_phy", "csi3_pix", "csi3_rdi" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 266666667 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "csid3" }, + .interrupt = { "csid3" } + } +}; + +static const struct resources_ispif ispif_res_8x96 = { + /* ISPIF */ + .clock = { "top_ahb", "ahb", "ispif_ahb", + "csi0", "csi0_pix", "csi0_rdi", + "csi1", "csi1_pix", "csi1_rdi", + "csi2", "csi2_pix", "csi2_rdi", + "csi3", "csi3_pix", "csi3_rdi" }, + .clock_for_reset = { "vfe0", "csi_vfe0", "vfe1", "csi_vfe1" }, + .reg = { "ispif", "csi_clk_mux" }, + .interrupt = "ispif" +}; + +static const struct resources vfe_res_8x96[] = { + /* VFE0 */ + { + .regulators = {}, + .clock = { "top_ahb", "ahb", "vfe0", "csi_vfe0", "vfe_ahb", + "vfe0_ahb", "vfe_axi", "vfe0_stream"}, + .clock_rate = { { 0 }, + { 0 }, + { 75000000, 100000000, 300000000, + 320000000, 480000000, 600000000 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "vfe0" }, + .interrupt = { "vfe0" } + }, + + /* VFE1 */ + { + .regulators = {}, + .clock = { "top_ahb", "ahb", "vfe1", "csi_vfe1", "vfe_ahb", + "vfe1_ahb", "vfe_axi", "vfe1_stream"}, + .clock_rate = { { 0 }, + { 0 }, + { 75000000, 100000000, 300000000, + 320000000, 480000000, 600000000 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "vfe1" }, + .interrupt = { "vfe1" } + } +}; + +static const struct resources csiphy_res_660[] = { + /* CSIPHY0 */ + { + .regulators = {}, + .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy0_timer", + "csi0_phy", "csiphy_ahb2crif" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 269333333 }, + { 0 } }, + .reg = { "csiphy0", "csiphy0_clk_mux" }, + .interrupt = { "csiphy0" } + }, + + /* CSIPHY1 */ + { + .regulators = {}, + .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy1_timer", + "csi1_phy", "csiphy_ahb2crif" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 269333333 }, + { 0 } }, + .reg = { "csiphy1", "csiphy1_clk_mux" }, + .interrupt = { "csiphy1" } + }, + + /* CSIPHY2 */ + { + .regulators = {}, + .clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy2_timer", + "csi2_phy", "csiphy_ahb2crif" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 269333333 }, + { 0 } }, + .reg = { "csiphy2", "csiphy2_clk_mux" }, + .interrupt = { "csiphy2" } + } +}; + +static const struct resources csid_res_660[] = { + /* CSID0 */ + { + .regulators = { "vdda", "vdd_sec" }, + .clock = { "top_ahb", "ispif_ahb", "csi0_ahb", "ahb", + "csi0", "csi0_phy", "csi0_pix", "csi0_rdi", + "cphy_csid0" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 310000000, + 404000000, 465000000 }, + { 0 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "csid0" }, + .interrupt = { "csid0" } + }, + + /* CSID1 */ + { + .regulators = { "vdda", "vdd_sec" }, + .clock = { "top_ahb", "ispif_ahb", "csi1_ahb", "ahb", + "csi1", "csi1_phy", "csi1_pix", "csi1_rdi", + "cphy_csid1" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 310000000, + 404000000, 465000000 }, + { 0 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "csid1" }, + .interrupt = { "csid1" } + }, + + /* CSID2 */ + { + .regulators = { "vdda", "vdd_sec" }, + .clock = { "top_ahb", "ispif_ahb", "csi2_ahb", "ahb", + "csi2", "csi2_phy", "csi2_pix", "csi2_rdi", + "cphy_csid2" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 310000000, + 404000000, 465000000 }, + { 0 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "csid2" }, + .interrupt = { "csid2" } + }, + + /* CSID3 */ + { + .regulators = { "vdda", "vdd_sec" }, + .clock = { "top_ahb", "ispif_ahb", "csi3_ahb", "ahb", + "csi3", "csi3_phy", "csi3_pix", "csi3_rdi", + "cphy_csid3" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 310000000, + 404000000, 465000000 }, + { 0 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "csid3" }, + .interrupt = { "csid3" } + } +}; + +static const struct resources_ispif ispif_res_660 = { + /* ISPIF */ + .clock = { "top_ahb", "ahb", "ispif_ahb", + "csi0", "csi0_pix", "csi0_rdi", + "csi1", "csi1_pix", "csi1_rdi", + "csi2", "csi2_pix", "csi2_rdi", + "csi3", "csi3_pix", "csi3_rdi" }, + .clock_for_reset = { "vfe0", "csi_vfe0", "vfe1", "csi_vfe1" }, + .reg = { "ispif", "csi_clk_mux" }, + .interrupt = "ispif" +}; + +static const struct resources vfe_res_660[] = { + /* VFE0 */ + { + .regulators = {}, + .clock = { "throttle_axi", "top_ahb", "ahb", "vfe0", + "csi_vfe0", "vfe_ahb", "vfe0_ahb", "vfe_axi", + "vfe0_stream"}, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 120000000, 200000000, 256000000, + 300000000, 404000000, 480000000, + 540000000, 576000000 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "vfe0" }, + .interrupt = { "vfe0" } + }, + + /* VFE1 */ + { + .regulators = {}, + .clock = { "throttle_axi", "top_ahb", "ahb", "vfe1", + "csi_vfe1", "vfe_ahb", "vfe1_ahb", "vfe_axi", + "vfe1_stream"}, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 120000000, 200000000, 256000000, + 300000000, 404000000, 480000000, + 540000000, 576000000 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "vfe1" }, + .interrupt = { "vfe1" } + } +}; + +static const struct resources csiphy_res_845[] = { + /* CSIPHY0 */ + { + .regulators = {}, + .clock = { "camnoc_axi", "soc_ahb", "slow_ahb_src", + "cpas_ahb", "cphy_rx_src", "csiphy0", + "csiphy0_timer_src", "csiphy0_timer" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 19200000, 240000000, 269333333 } }, + .reg = { "csiphy0" }, + .interrupt = { "csiphy0" } + }, + + /* CSIPHY1 */ + { + .regulators = {}, + .clock = { "camnoc_axi", "soc_ahb", "slow_ahb_src", + "cpas_ahb", "cphy_rx_src", "csiphy1", + "csiphy1_timer_src", "csiphy1_timer" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 19200000, 240000000, 269333333 } }, + .reg = { "csiphy1" }, + .interrupt = { "csiphy1" } + }, + + /* CSIPHY2 */ + { + .regulators = {}, + .clock = { "camnoc_axi", "soc_ahb", "slow_ahb_src", + "cpas_ahb", "cphy_rx_src", "csiphy2", + "csiphy2_timer_src", "csiphy2_timer" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 19200000, 240000000, 269333333 } }, + .reg = { "csiphy2" }, + .interrupt = { "csiphy2" } + }, + + /* CSIPHY3 */ + { + .regulators = {}, + .clock = { "camnoc_axi", "soc_ahb", "slow_ahb_src", + "cpas_ahb", "cphy_rx_src", "csiphy3", + "csiphy3_timer_src", "csiphy3_timer" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 19200000, 240000000, 269333333 } }, + .reg = { "csiphy3" }, + .interrupt = { "csiphy3" } + } +}; + +static const struct resources csid_res_845[] = { + /* CSID0 */ + { + .regulators = { "vdda-phy", "vdda-pll" }, + .clock = { "cpas_ahb", "cphy_rx_src", "slow_ahb_src", + "soc_ahb", "vfe0", "vfe0_src", + "vfe0_cphy_rx", "csi0", + "csi0_src" }, + .clock_rate = { { 0 }, + { 384000000 }, + { 80000000 }, + { 0 }, + { 19200000, 100000000, 320000000, 404000000, 480000000, 600000000 }, + { 320000000 }, + { 0 }, + { 19200000, 75000000, 384000000, 538666667 }, + { 384000000 } }, + .reg = { "csid0" }, + .interrupt = { "csid0" } + }, + + /* CSID1 */ + { + .regulators = { "vdda-phy", "vdda-pll" }, + .clock = { "cpas_ahb", "cphy_rx_src", "slow_ahb_src", + "soc_ahb", "vfe1", "vfe1_src", + "vfe1_cphy_rx", "csi1", + "csi1_src" }, + .clock_rate = { { 0 }, + { 384000000 }, + { 80000000 }, + { 0 }, + { 19200000, 100000000, 320000000, 404000000, 480000000, 600000000 }, + { 320000000 }, + { 0 }, + { 19200000, 75000000, 384000000, 538666667 }, + { 384000000 } }, + .reg = { "csid1" }, + .interrupt = { "csid1" } + }, + + /* CSID2 */ + { + .regulators = { "vdda-phy", "vdda-pll" }, + .clock = { "cpas_ahb", "cphy_rx_src", "slow_ahb_src", + "soc_ahb", "vfe_lite", "vfe_lite_src", + "vfe_lite_cphy_rx", "csi2", + "csi2_src" }, + .clock_rate = { { 0 }, + { 384000000 }, + { 80000000 }, + { 0 }, + { 19200000, 100000000, 320000000, 404000000, 480000000, 600000000 }, + { 320000000 }, + { 0 }, + { 19200000, 75000000, 384000000, 538666667 }, + { 384000000 } }, + .reg = { "csid2" }, + .interrupt = { "csid2" } + } +}; + +static const struct resources vfe_res_845[] = { + /* VFE0 */ + { + .regulators = {}, + .clock = { "camnoc_axi", "cpas_ahb", "slow_ahb_src", + "soc_ahb", "vfe0", "vfe0_axi", + "vfe0_src", "csi0", + "csi0_src"}, + .clock_rate = { { 0 }, + { 0 }, + { 80000000 }, + { 0 }, + { 19200000, 100000000, 320000000, 404000000, 480000000, 600000000 }, + { 0 }, + { 320000000 }, + { 19200000, 75000000, 384000000, 538666667 }, + { 384000000 } }, + .reg = { "vfe0" }, + .interrupt = { "vfe0" } + }, + + /* VFE1 */ + { + .regulators = {}, + .clock = { "camnoc_axi", "cpas_ahb", "slow_ahb_src", + "soc_ahb", "vfe1", "vfe1_axi", + "vfe1_src", "csi1", + "csi1_src"}, + .clock_rate = { { 0 }, + { 0 }, + { 80000000 }, + { 0 }, + { 19200000, 100000000, 320000000, 404000000, 480000000, 600000000 }, + { 0 }, + { 320000000 }, + { 19200000, 75000000, 384000000, 538666667 }, + { 384000000 } }, + .reg = { "vfe1" }, + .interrupt = { "vfe1" } + }, + + /* VFE-lite */ + { + .regulators = {}, + .clock = { "camnoc_axi", "cpas_ahb", "slow_ahb_src", + "soc_ahb", "vfe_lite", + "vfe_lite_src", "csi2", + "csi2_src"}, + .clock_rate = { { 0 }, + { 0 }, + { 80000000 }, + { 0 }, + { 19200000, 100000000, 320000000, 404000000, 480000000, 600000000 }, + { 320000000 }, + { 19200000, 75000000, 384000000, 538666667 }, + { 384000000 } }, + .reg = { "vfe_lite" }, + .interrupt = { "vfe_lite" } + } +}; + +static const struct resources csiphy_res_8250[] = { + /* CSIPHY0 */ + { + .regulators = {}, + .clock = { "csiphy0", "csiphy0_timer" }, + .clock_rate = { { 400000000 }, + { 300000000 } }, + .reg = { "csiphy0" }, + .interrupt = { "csiphy0" } + }, + /* CSIPHY1 */ + { + .regulators = {}, + .clock = { "csiphy1", "csiphy1_timer" }, + .clock_rate = { { 400000000 }, + { 300000000 } }, + .reg = { "csiphy1" }, + .interrupt = { "csiphy1" } + }, + /* CSIPHY2 */ + { + .regulators = {}, + .clock = { "csiphy2", "csiphy2_timer" }, + .clock_rate = { { 400000000 }, + { 300000000 } }, + .reg = { "csiphy2" }, + .interrupt = { "csiphy2" } + }, + /* CSIPHY3 */ + { + .regulators = {}, + .clock = { "csiphy3", "csiphy3_timer" }, + .clock_rate = { { 400000000 }, + { 300000000 } }, + .reg = { "csiphy3" }, + .interrupt = { "csiphy3" } + }, + /* CSIPHY4 */ + { + .regulators = {}, + .clock = { "csiphy4", "csiphy4_timer" }, + .clock_rate = { { 400000000 }, + { 300000000 } }, + .reg = { "csiphy4" }, + .interrupt = { "csiphy4" } + }, + /* CSIPHY5 */ + { + .regulators = {}, + .clock = { "csiphy5", "csiphy5_timer" }, + .clock_rate = { { 400000000 }, + { 300000000 } }, + .reg = { "csiphy5" }, + .interrupt = { "csiphy5" } + } +}; + +static const struct resources csid_res_8250[] = { + /* CSID0 */ + { + .regulators = { "vdda-phy", "vdda-pll" }, + .clock = { "vfe0_csid", "vfe0_cphy_rx", "vfe0", "vfe0_areg", "vfe0_ahb" }, + .clock_rate = { { 400000000 }, + { 400000000 }, + { 350000000, 475000000, 576000000, 720000000 }, + { 100000000, 200000000, 300000000, 400000000 }, + { 0 } }, + .reg = { "csid0" }, + .interrupt = { "csid0" } + }, + /* CSID1 */ + { + .regulators = { "vdda-phy", "vdda-pll" }, + .clock = { "vfe1_csid", "vfe1_cphy_rx", "vfe1", "vfe1_areg", "vfe1_ahb" }, + .clock_rate = { { 400000000 }, + { 400000000 }, + { 350000000, 475000000, 576000000, 720000000 }, + { 100000000, 200000000, 300000000, 400000000 }, + { 0 } }, + .reg = { "csid1" }, + .interrupt = { "csid1" } + }, + /* CSID2 */ + { + .regulators = { "vdda-phy", "vdda-pll" }, + .clock = { "vfe_lite_csid", "vfe_lite_cphy_rx", "vfe_lite", "vfe_lite_ahb" }, + .clock_rate = { { 400000000 }, + { 400000000 }, + { 400000000, 480000000 }, + { 0 } }, + .reg = { "csid2" }, + .interrupt = { "csid2" } + }, + /* CSID3 */ + { + .regulators = { "vdda-phy", "vdda-pll" }, + .clock = { "vfe_lite_csid", "vfe_lite_cphy_rx", "vfe_lite", "vfe_lite_ahb" }, + .clock_rate = { { 400000000 }, + { 400000000 }, + { 400000000, 480000000 }, + { 0 } }, + .reg = { "csid3" }, + .interrupt = { "csid3" } + } +}; + +static const struct resources vfe_res_8250[] = { + /* VFE0 */ + { + .regulators = {}, + .clock = { "camnoc_axi_src", "slow_ahb_src", "cpas_ahb", + "camnoc_axi", "vfe0_ahb", "vfe0_areg", "vfe0", + "vfe0_axi", "cam_hf_axi" }, + .clock_rate = { { 19200000, 300000000, 400000000, 480000000 }, + { 19200000, 80000000 }, + { 19200000 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 300000000, 400000000 }, + { 350000000, 475000000, 576000000, 720000000 }, + { 0 }, + { 0 } }, + .reg = { "vfe0" }, + .interrupt = { "vfe0" } + }, + /* VFE1 */ + { + .regulators = {}, + .clock = { "camnoc_axi_src", "slow_ahb_src", "cpas_ahb", + "camnoc_axi", "vfe1_ahb", "vfe1_areg", "vfe1", + "vfe1_axi", "cam_hf_axi" }, + .clock_rate = { { 19200000, 300000000, 400000000, 480000000 }, + { 19200000, 80000000 }, + { 19200000 }, + { 0 }, + { 0 }, + { 100000000, 200000000, 300000000, 400000000 }, + { 350000000, 475000000, 576000000, 720000000 }, + { 0 }, + { 0 } }, + .reg = { "vfe1" }, + .interrupt = { "vfe1" } + }, + /* VFE2 (lite) */ + { + .regulators = {}, + .clock = { "camnoc_axi_src", "slow_ahb_src", "cpas_ahb", + "camnoc_axi", "vfe_lite_ahb", "vfe_lite_axi", + "vfe_lite", "cam_hf_axi" }, + .clock_rate = { { 19200000, 300000000, 400000000, 480000000 }, + { 19200000, 80000000 }, + { 19200000 }, + { 0 }, + { 0 }, + { 0 }, + { 400000000, 480000000 }, + { 0 } }, + .reg = { "vfe_lite0" }, + .interrupt = { "vfe_lite0" } + }, + /* VFE3 (lite) */ + { + .regulators = {}, + .clock = { "camnoc_axi_src", "slow_ahb_src", "cpas_ahb", + "camnoc_axi", "vfe_lite_ahb", "vfe_lite_axi", + "vfe_lite", "cam_hf_axi" }, + .clock_rate = { { 19200000, 300000000, 400000000, 480000000 }, + { 19200000, 80000000 }, + { 19200000 }, + { 0 }, + { 0 }, + { 0 }, + { 400000000, 480000000 }, + { 0 } }, + .reg = { "vfe_lite1" }, + .interrupt = { "vfe_lite1" } + }, +}; + +static const struct resources_icc icc_res_sm8250[] = { + { + .name = "cam_ahb", + .icc_bw_tbl.avg = 38400, + .icc_bw_tbl.peak = 76800, + }, + { + .name = "cam_hf_0_mnoc", + .icc_bw_tbl.avg = 2097152, + .icc_bw_tbl.peak = 2097152, + }, + { + .name = "cam_sf_0_mnoc", + .icc_bw_tbl.avg = 0, + .icc_bw_tbl.peak = 2097152, + }, + { + .name = "cam_sf_icp_mnoc", + .icc_bw_tbl.avg = 2097152, + .icc_bw_tbl.peak = 2097152, + }, +}; + +/* + * camss_add_clock_margin - Add margin to clock frequency rate + * @rate: Clock frequency rate + * + * When making calculations with physical clock frequency values + * some safety margin must be added. Add it. + */ +inline void camss_add_clock_margin(u64 *rate) +{ + *rate *= CAMSS_CLOCK_MARGIN_NUMERATOR; + *rate = div_u64(*rate, CAMSS_CLOCK_MARGIN_DENOMINATOR); +} + +/* + * camss_enable_clocks - Enable multiple clocks + * @nclocks: Number of clocks in clock array + * @clock: Clock array + * @dev: Device + * + * Return 0 on success or a negative error code otherwise + */ +int camss_enable_clocks(int nclocks, struct camss_clock *clock, + struct device *dev) +{ + int ret; + int i; + + for (i = 0; i < nclocks; i++) { + ret = clk_prepare_enable(clock[i].clk); + if (ret) { + dev_err(dev, "clock enable failed: %d\n", ret); + goto error; + } + } + + return 0; + +error: + for (i--; i >= 0; i--) + clk_disable_unprepare(clock[i].clk); + + return ret; +} + +/* + * camss_disable_clocks - Disable multiple clocks + * @nclocks: Number of clocks in clock array + * @clock: Clock array + */ +void camss_disable_clocks(int nclocks, struct camss_clock *clock) +{ + int i; + + for (i = nclocks - 1; i >= 0; i--) + clk_disable_unprepare(clock[i].clk); +} + +/* + * camss_find_sensor - Find a linked media entity which represents a sensor + * @entity: Media entity to start searching from + * + * Return a pointer to sensor media entity or NULL if not found + */ +struct media_entity *camss_find_sensor(struct media_entity *entity) +{ + struct media_pad *pad; + + while (1) { + pad = &entity->pads[0]; + if (!(pad->flags & MEDIA_PAD_FL_SINK)) + return NULL; + + pad = media_pad_remote_pad_first(pad); + if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) + return NULL; + + entity = pad->entity; + + if (entity->function == MEDIA_ENT_F_CAM_SENSOR) + return entity; + } +} + +/** + * camss_get_link_freq - Get link frequency from sensor + * @entity: Media entity in the current pipeline + * @bpp: Number of bits per pixel for the current format + * @lanes: Number of lanes in the link to the sensor + * + * Return link frequency on success or a negative error code otherwise + */ +s64 camss_get_link_freq(struct media_entity *entity, unsigned int bpp, + unsigned int lanes) +{ + struct media_entity *sensor; + struct v4l2_subdev *subdev; + + sensor = camss_find_sensor(entity); + if (!sensor) + return -ENODEV; + + subdev = media_entity_to_v4l2_subdev(sensor); + + return v4l2_get_link_freq(subdev->ctrl_handler, bpp, 2 * lanes); +} + +/* + * camss_get_pixel_clock - Get pixel clock rate from sensor + * @entity: Media entity in the current pipeline + * @pixel_clock: Received pixel clock value + * + * Return 0 on success or a negative error code otherwise + */ +int camss_get_pixel_clock(struct media_entity *entity, u64 *pixel_clock) +{ + struct media_entity *sensor; + struct v4l2_subdev *subdev; + struct v4l2_ctrl *ctrl; + + sensor = camss_find_sensor(entity); + if (!sensor) + return -ENODEV; + + subdev = media_entity_to_v4l2_subdev(sensor); + + ctrl = v4l2_ctrl_find(subdev->ctrl_handler, V4L2_CID_PIXEL_RATE); + + if (!ctrl) + return -EINVAL; + + *pixel_clock = v4l2_ctrl_g_ctrl_int64(ctrl); + + return 0; +} + +int camss_pm_domain_on(struct camss *camss, int id) +{ + int ret = 0; + + if (id < camss->vfe_num) { + struct vfe_device *vfe = &camss->vfe[id]; + + ret = vfe->ops->pm_domain_on(vfe); + } + + return ret; +} + +void camss_pm_domain_off(struct camss *camss, int id) +{ + if (id < camss->vfe_num) { + struct vfe_device *vfe = &camss->vfe[id]; + + vfe->ops->pm_domain_off(vfe); + } +} + +/* + * camss_of_parse_endpoint_node - Parse port endpoint node + * @dev: Device + * @node: Device node to be parsed + * @csd: Parsed data from port endpoint node + * + * Return 0 on success or a negative error code on failure + */ +static int camss_of_parse_endpoint_node(struct device *dev, + struct device_node *node, + struct camss_async_subdev *csd) +{ + struct csiphy_lanes_cfg *lncfg = &csd->interface.csi2.lane_cfg; + struct v4l2_mbus_config_mipi_csi2 *mipi_csi2; + struct v4l2_fwnode_endpoint vep = { { 0 } }; + unsigned int i; + + v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep); + + csd->interface.csiphy_id = vep.base.port; + + mipi_csi2 = &vep.bus.mipi_csi2; + lncfg->clk.pos = mipi_csi2->clock_lane; + lncfg->clk.pol = mipi_csi2->lane_polarities[0]; + lncfg->num_data = mipi_csi2->num_data_lanes; + + lncfg->data = devm_kcalloc(dev, + lncfg->num_data, sizeof(*lncfg->data), + GFP_KERNEL); + if (!lncfg->data) + return -ENOMEM; + + for (i = 0; i < lncfg->num_data; i++) { + lncfg->data[i].pos = mipi_csi2->data_lanes[i]; + lncfg->data[i].pol = mipi_csi2->lane_polarities[i + 1]; + } + + return 0; +} + +/* + * camss_of_parse_ports - Parse ports node + * @dev: Device + * @notifier: v4l2_device notifier data + * + * Return number of "port" nodes found in "ports" node + */ +static int camss_of_parse_ports(struct camss *camss) +{ + struct device *dev = camss->dev; + struct device_node *node = NULL; + struct device_node *remote = NULL; + int ret, num_subdevs = 0; + + for_each_endpoint_of_node(dev->of_node, node) { + struct camss_async_subdev *csd; + + if (!of_device_is_available(node)) + continue; + + remote = of_graph_get_remote_port_parent(node); + if (!remote) { + dev_err(dev, "Cannot get remote parent\n"); + ret = -EINVAL; + goto err_cleanup; + } + + csd = v4l2_async_nf_add_fwnode(&camss->notifier, + of_fwnode_handle(remote), + struct camss_async_subdev); + of_node_put(remote); + if (IS_ERR(csd)) { + ret = PTR_ERR(csd); + goto err_cleanup; + } + + ret = camss_of_parse_endpoint_node(dev, node, csd); + if (ret < 0) + goto err_cleanup; + + num_subdevs++; + } + + return num_subdevs; + +err_cleanup: + of_node_put(node); + return ret; +} + +/* + * camss_init_subdevices - Initialize subdev structures and resources + * @camss: CAMSS device + * + * Return 0 on success or a negative error code on failure + */ +static int camss_init_subdevices(struct camss *camss) +{ + const struct resources *csiphy_res; + const struct resources *csid_res; + const struct resources_ispif *ispif_res; + const struct resources *vfe_res; + unsigned int i; + int ret; + + if (camss->version == CAMSS_8x16) { + csiphy_res = csiphy_res_8x16; + csid_res = csid_res_8x16; + ispif_res = &ispif_res_8x16; + vfe_res = vfe_res_8x16; + } else if (camss->version == CAMSS_8x96) { + csiphy_res = csiphy_res_8x96; + csid_res = csid_res_8x96; + ispif_res = &ispif_res_8x96; + vfe_res = vfe_res_8x96; + } else if (camss->version == CAMSS_660) { + csiphy_res = csiphy_res_660; + csid_res = csid_res_660; + ispif_res = &ispif_res_660; + vfe_res = vfe_res_660; + } else if (camss->version == CAMSS_845) { + csiphy_res = csiphy_res_845; + csid_res = csid_res_845; + /* Titan VFEs don't have an ISPIF */ + ispif_res = NULL; + vfe_res = vfe_res_845; + } else if (camss->version == CAMSS_8250) { + csiphy_res = csiphy_res_8250; + csid_res = csid_res_8250; + /* Titan VFEs don't have an ISPIF */ + ispif_res = NULL; + vfe_res = vfe_res_8250; + } else { + return -EINVAL; + } + + for (i = 0; i < camss->csiphy_num; i++) { + ret = msm_csiphy_subdev_init(camss, &camss->csiphy[i], + &csiphy_res[i], i); + if (ret < 0) { + dev_err(camss->dev, + "Failed to init csiphy%d sub-device: %d\n", + i, ret); + return ret; + } + } + + /* note: SM8250 requires VFE to be initialized before CSID */ + for (i = 0; i < camss->vfe_num + camss->vfe_lite_num; i++) { + ret = msm_vfe_subdev_init(camss, &camss->vfe[i], + &vfe_res[i], i); + if (ret < 0) { + dev_err(camss->dev, + "Fail to init vfe%d sub-device: %d\n", i, ret); + return ret; + } + } + + for (i = 0; i < camss->csid_num; i++) { + ret = msm_csid_subdev_init(camss, &camss->csid[i], + &csid_res[i], i); + if (ret < 0) { + dev_err(camss->dev, + "Failed to init csid%d sub-device: %d\n", + i, ret); + return ret; + } + } + + ret = msm_ispif_subdev_init(camss, ispif_res); + if (ret < 0) { + dev_err(camss->dev, "Failed to init ispif sub-device: %d\n", + ret); + return ret; + } + + return 0; +} + +/* + * camss_register_entities - Register subdev nodes and create links + * @camss: CAMSS device + * + * Return 0 on success or a negative error code on failure + */ +static int camss_register_entities(struct camss *camss) +{ + int i, j, k; + int ret; + + for (i = 0; i < camss->csiphy_num; i++) { + ret = msm_csiphy_register_entity(&camss->csiphy[i], + &camss->v4l2_dev); + if (ret < 0) { + dev_err(camss->dev, + "Failed to register csiphy%d entity: %d\n", + i, ret); + goto err_reg_csiphy; + } + } + + for (i = 0; i < camss->csid_num; i++) { + ret = msm_csid_register_entity(&camss->csid[i], + &camss->v4l2_dev); + if (ret < 0) { + dev_err(camss->dev, + "Failed to register csid%d entity: %d\n", + i, ret); + goto err_reg_csid; + } + } + + ret = msm_ispif_register_entities(camss->ispif, + &camss->v4l2_dev); + if (ret < 0) { + dev_err(camss->dev, "Failed to register ispif entities: %d\n", + ret); + goto err_reg_ispif; + } + + for (i = 0; i < camss->vfe_num + camss->vfe_lite_num; i++) { + ret = msm_vfe_register_entities(&camss->vfe[i], + &camss->v4l2_dev); + if (ret < 0) { + dev_err(camss->dev, + "Failed to register vfe%d entities: %d\n", + i, ret); + goto err_reg_vfe; + } + } + + for (i = 0; i < camss->csiphy_num; i++) { + for (j = 0; j < camss->csid_num; j++) { + ret = media_create_pad_link( + &camss->csiphy[i].subdev.entity, + MSM_CSIPHY_PAD_SRC, + &camss->csid[j].subdev.entity, + MSM_CSID_PAD_SINK, + 0); + if (ret < 0) { + dev_err(camss->dev, + "Failed to link %s->%s entities: %d\n", + camss->csiphy[i].subdev.entity.name, + camss->csid[j].subdev.entity.name, + ret); + goto err_link; + } + } + } + + if (camss->ispif) { + for (i = 0; i < camss->csid_num; i++) { + for (j = 0; j < camss->ispif->line_num; j++) { + ret = media_create_pad_link( + &camss->csid[i].subdev.entity, + MSM_CSID_PAD_SRC, + &camss->ispif->line[j].subdev.entity, + MSM_ISPIF_PAD_SINK, + 0); + if (ret < 0) { + dev_err(camss->dev, + "Failed to link %s->%s entities: %d\n", + camss->csid[i].subdev.entity.name, + camss->ispif->line[j].subdev.entity.name, + ret); + goto err_link; + } + } + } + + for (i = 0; i < camss->ispif->line_num; i++) + for (k = 0; k < camss->vfe_num; k++) + for (j = 0; j < camss->vfe[k].line_num; j++) { + struct v4l2_subdev *ispif = &camss->ispif->line[i].subdev; + struct v4l2_subdev *vfe = &camss->vfe[k].line[j].subdev; + + ret = media_create_pad_link(&ispif->entity, + MSM_ISPIF_PAD_SRC, + &vfe->entity, + MSM_VFE_PAD_SINK, + 0); + if (ret < 0) { + dev_err(camss->dev, + "Failed to link %s->%s entities: %d\n", + ispif->entity.name, + vfe->entity.name, + ret); + goto err_link; + } + } + } else { + for (i = 0; i < camss->csid_num; i++) + for (k = 0; k < camss->vfe_num + camss->vfe_lite_num; k++) + for (j = 0; j < camss->vfe[k].line_num; j++) { + struct v4l2_subdev *csid = &camss->csid[i].subdev; + struct v4l2_subdev *vfe = &camss->vfe[k].line[j].subdev; + + ret = media_create_pad_link(&csid->entity, + MSM_CSID_PAD_FIRST_SRC + j, + &vfe->entity, + MSM_VFE_PAD_SINK, + 0); + if (ret < 0) { + dev_err(camss->dev, + "Failed to link %s->%s entities: %d\n", + csid->entity.name, + vfe->entity.name, + ret); + goto err_link; + } + } + } + + return 0; + +err_link: + i = camss->vfe_num + camss->vfe_lite_num; +err_reg_vfe: + for (i--; i >= 0; i--) + msm_vfe_unregister_entities(&camss->vfe[i]); + +err_reg_ispif: + msm_ispif_unregister_entities(camss->ispif); + + i = camss->csid_num; +err_reg_csid: + for (i--; i >= 0; i--) + msm_csid_unregister_entity(&camss->csid[i]); + + i = camss->csiphy_num; +err_reg_csiphy: + for (i--; i >= 0; i--) + msm_csiphy_unregister_entity(&camss->csiphy[i]); + + return ret; +} + +/* + * camss_unregister_entities - Unregister subdev nodes + * @camss: CAMSS device + * + * Return 0 on success or a negative error code on failure + */ +static void camss_unregister_entities(struct camss *camss) +{ + unsigned int i; + + for (i = 0; i < camss->csiphy_num; i++) + msm_csiphy_unregister_entity(&camss->csiphy[i]); + + for (i = 0; i < camss->csid_num; i++) + msm_csid_unregister_entity(&camss->csid[i]); + + msm_ispif_unregister_entities(camss->ispif); + + for (i = 0; i < camss->vfe_num + camss->vfe_lite_num; i++) + msm_vfe_unregister_entities(&camss->vfe[i]); +} + +static int camss_subdev_notifier_bound(struct v4l2_async_notifier *async, + struct v4l2_subdev *subdev, + struct v4l2_async_connection *asd) +{ + struct camss *camss = container_of(async, struct camss, notifier); + struct camss_async_subdev *csd = + container_of(asd, struct camss_async_subdev, asd); + u8 id = csd->interface.csiphy_id; + struct csiphy_device *csiphy = &camss->csiphy[id]; + + csiphy->cfg.csi2 = &csd->interface.csi2; + subdev->host_priv = csiphy; + + return 0; +} + +static int camss_subdev_notifier_complete(struct v4l2_async_notifier *async) +{ + struct camss *camss = container_of(async, struct camss, notifier); + struct v4l2_device *v4l2_dev = &camss->v4l2_dev; + struct v4l2_subdev *sd; + int ret; + + list_for_each_entry(sd, &v4l2_dev->subdevs, list) { + if (sd->host_priv) { + struct media_entity *sensor = &sd->entity; + struct csiphy_device *csiphy = + (struct csiphy_device *) sd->host_priv; + struct media_entity *input = &csiphy->subdev.entity; + unsigned int i; + + for (i = 0; i < sensor->num_pads; i++) { + if (sensor->pads[i].flags & MEDIA_PAD_FL_SOURCE) + break; + } + if (i == sensor->num_pads) { + dev_err(camss->dev, + "No source pad in external entity\n"); + return -EINVAL; + } + + ret = media_create_pad_link(sensor, i, + input, MSM_CSIPHY_PAD_SINK, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (ret < 0) { + dev_err(camss->dev, + "Failed to link %s->%s entities: %d\n", + sensor->name, input->name, ret); + return ret; + } + } + } + + ret = v4l2_device_register_subdev_nodes(&camss->v4l2_dev); + if (ret < 0) + return ret; + + return media_device_register(&camss->media_dev); +} + +static const struct v4l2_async_notifier_operations camss_subdev_notifier_ops = { + .bound = camss_subdev_notifier_bound, + .complete = camss_subdev_notifier_complete, +}; + +static const struct media_device_ops camss_media_ops = { + .link_notify = v4l2_pipeline_link_notify, +}; + +static int camss_configure_pd(struct camss *camss) +{ + struct device *dev = camss->dev; + int i; + int ret; + + camss->genpd_num = of_count_phandle_with_args(dev->of_node, + "power-domains", + "#power-domain-cells"); + if (camss->genpd_num < 0) { + dev_err(dev, "Power domains are not defined for camss\n"); + return camss->genpd_num; + } + + /* + * If a platform device has just one power domain, then it is attached + * at platform_probe() level, thus there shall be no need and even no + * option to attach it again, this is the case for CAMSS on MSM8916. + */ + if (camss->genpd_num == 1) + return 0; + + camss->genpd = devm_kmalloc_array(dev, camss->genpd_num, + sizeof(*camss->genpd), GFP_KERNEL); + if (!camss->genpd) + return -ENOMEM; + + camss->genpd_link = devm_kmalloc_array(dev, camss->genpd_num, + sizeof(*camss->genpd_link), + GFP_KERNEL); + if (!camss->genpd_link) + return -ENOMEM; + + /* + * VFE power domains are in the beginning of the list, and while all + * power domains should be attached, only if TITAN_TOP power domain is + * found in the list, it should be linked over here. + */ + for (i = 0; i < camss->genpd_num; i++) { + camss->genpd[i] = dev_pm_domain_attach_by_id(camss->dev, i); + if (IS_ERR(camss->genpd[i])) { + ret = PTR_ERR(camss->genpd[i]); + goto fail_pm; + } + } + + if (i > camss->vfe_num) { + camss->genpd_link[i - 1] = device_link_add(camss->dev, camss->genpd[i - 1], + DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | + DL_FLAG_RPM_ACTIVE); + if (!camss->genpd_link[i - 1]) { + ret = -EINVAL; + goto fail_pm; + } + } + + return 0; + +fail_pm: + for (--i ; i >= 0; i--) + dev_pm_domain_detach(camss->genpd[i], true); + + return ret; +} + +static int camss_icc_get(struct camss *camss) +{ + const struct resources_icc *icc_res; + int nbr_icc_paths = 0; + int i; + + if (camss->version == CAMSS_8250) { + icc_res = &icc_res_sm8250[0]; + nbr_icc_paths = ICC_SM8250_COUNT; + } + + for (i = 0; i < nbr_icc_paths; i++) { + camss->icc_path[i] = devm_of_icc_get(camss->dev, + icc_res[i].name); + if (IS_ERR(camss->icc_path[i])) + return PTR_ERR(camss->icc_path[i]); + + camss->icc_bw_tbl[i] = icc_res[i].icc_bw_tbl; + } + + return 0; +} + +static void camss_genpd_cleanup(struct camss *camss) +{ + int i; + + if (camss->genpd_num == 1) + return; + + if (camss->genpd_num > camss->vfe_num) + device_link_del(camss->genpd_link[camss->genpd_num - 1]); + + for (i = 0; i < camss->genpd_num; i++) + dev_pm_domain_detach(camss->genpd[i], true); +} + +/* + * camss_probe - Probe CAMSS platform device + * @pdev: Pointer to CAMSS platform device + * + * Return 0 on success or a negative error code on failure + */ +static int camss_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct camss *camss; + int num_subdevs, ret; + + camss = devm_kzalloc(dev, sizeof(*camss), GFP_KERNEL); + if (!camss) + return -ENOMEM; + + atomic_set(&camss->ref_count, 0); + camss->dev = dev; + platform_set_drvdata(pdev, camss); + + if (of_device_is_compatible(dev->of_node, "qcom,msm8916-camss")) { + camss->version = CAMSS_8x16; + camss->csiphy_num = 2; + camss->csid_num = 2; + camss->vfe_num = 1; + } else if (of_device_is_compatible(dev->of_node, + "qcom,msm8996-camss")) { + camss->version = CAMSS_8x96; + camss->csiphy_num = 3; + camss->csid_num = 4; + camss->vfe_num = 2; + } else if (of_device_is_compatible(dev->of_node, + "qcom,sdm660-camss")) { + camss->version = CAMSS_660; + camss->csiphy_num = 3; + camss->csid_num = 4; + camss->vfe_num = 2; + } else if (of_device_is_compatible(dev->of_node, + "qcom,sdm845-camss")) { + camss->version = CAMSS_845; + camss->csiphy_num = 4; + camss->csid_num = 3; + camss->vfe_num = 2; + camss->vfe_lite_num = 1; + } else if (of_device_is_compatible(dev->of_node, + "qcom,sm8250-camss")) { + camss->version = CAMSS_8250; + camss->csiphy_num = 6; + camss->csid_num = 4; + camss->vfe_num = 2; + camss->vfe_lite_num = 2; + } else { + return -EINVAL; + } + + camss->csiphy = devm_kcalloc(dev, camss->csiphy_num, + sizeof(*camss->csiphy), GFP_KERNEL); + if (!camss->csiphy) + return -ENOMEM; + + camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid), + GFP_KERNEL); + if (!camss->csid) + return -ENOMEM; + + if (camss->version == CAMSS_8x16 || + camss->version == CAMSS_8x96) { + camss->ispif = devm_kcalloc(dev, 1, sizeof(*camss->ispif), GFP_KERNEL); + if (!camss->ispif) + return -ENOMEM; + } + + camss->vfe = devm_kcalloc(dev, camss->vfe_num + camss->vfe_lite_num, + sizeof(*camss->vfe), GFP_KERNEL); + if (!camss->vfe) + return -ENOMEM; + + ret = camss_icc_get(camss); + if (ret < 0) + return ret; + + ret = camss_configure_pd(camss); + if (ret < 0) { + dev_err(dev, "Failed to configure power domains: %d\n", ret); + return ret; + } + + ret = camss_init_subdevices(camss); + if (ret < 0) + goto err_genpd_cleanup; + + ret = dma_set_mask_and_coherent(dev, 0xffffffff); + if (ret) + goto err_genpd_cleanup; + + camss->media_dev.dev = camss->dev; + strscpy(camss->media_dev.model, "Qualcomm Camera Subsystem", + sizeof(camss->media_dev.model)); + camss->media_dev.ops = &camss_media_ops; + media_device_init(&camss->media_dev); + + camss->v4l2_dev.mdev = &camss->media_dev; + ret = v4l2_device_register(camss->dev, &camss->v4l2_dev); + if (ret < 0) { + dev_err(dev, "Failed to register V4L2 device: %d\n", ret); + goto err_genpd_cleanup; + } + + v4l2_async_nf_init(&camss->notifier, &camss->v4l2_dev); + + num_subdevs = camss_of_parse_ports(camss); + if (num_subdevs < 0) { + ret = num_subdevs; + goto err_v4l2_device_unregister; + } + + ret = camss_register_entities(camss); + if (ret < 0) + goto err_v4l2_device_unregister; + + if (num_subdevs) { + camss->notifier.ops = &camss_subdev_notifier_ops; + + ret = v4l2_async_nf_register(&camss->notifier); + if (ret) { + dev_err(dev, + "Failed to register async subdev nodes: %d\n", + ret); + goto err_register_subdevs; + } + } else { + ret = v4l2_device_register_subdev_nodes(&camss->v4l2_dev); + if (ret < 0) { + dev_err(dev, "Failed to register subdev nodes: %d\n", + ret); + goto err_register_subdevs; + } + + ret = media_device_register(&camss->media_dev); + if (ret < 0) { + dev_err(dev, "Failed to register media device: %d\n", + ret); + goto err_register_subdevs; + } + } + + pm_runtime_enable(dev); + + return 0; + +err_register_subdevs: + camss_unregister_entities(camss); +err_v4l2_device_unregister: + v4l2_device_unregister(&camss->v4l2_dev); + v4l2_async_nf_cleanup(&camss->notifier); +err_genpd_cleanup: + camss_genpd_cleanup(camss); + + return ret; +} + +void camss_delete(struct camss *camss) +{ + v4l2_device_unregister(&camss->v4l2_dev); + media_device_unregister(&camss->media_dev); + media_device_cleanup(&camss->media_dev); + + pm_runtime_disable(camss->dev); +} + +/* + * camss_remove - Remove CAMSS platform device + * @pdev: Pointer to CAMSS platform device + * + * Always returns 0. + */ +static void camss_remove(struct platform_device *pdev) +{ + struct camss *camss = platform_get_drvdata(pdev); + + v4l2_async_nf_unregister(&camss->notifier); + v4l2_async_nf_cleanup(&camss->notifier); + camss_unregister_entities(camss); + + if (atomic_read(&camss->ref_count) == 0) + camss_delete(camss); + + camss_genpd_cleanup(camss); +} + +static const struct of_device_id camss_dt_match[] = { + { .compatible = "qcom,msm8916-camss" }, + { .compatible = "qcom,msm8996-camss" }, + { .compatible = "qcom,sdm660-camss" }, + { .compatible = "qcom,sdm845-camss" }, + { .compatible = "qcom,sm8250-camss" }, + { } +}; + +MODULE_DEVICE_TABLE(of, camss_dt_match); + +static int __maybe_unused camss_runtime_suspend(struct device *dev) +{ + struct camss *camss = dev_get_drvdata(dev); + int nbr_icc_paths = 0; + int i; + int ret; + + if (camss->version == CAMSS_8250) + nbr_icc_paths = ICC_SM8250_COUNT; + + for (i = 0; i < nbr_icc_paths; i++) { + ret = icc_set_bw(camss->icc_path[i], 0, 0); + if (ret) + return ret; + } + + return 0; +} + +static int __maybe_unused camss_runtime_resume(struct device *dev) +{ + struct camss *camss = dev_get_drvdata(dev); + int nbr_icc_paths = 0; + int i; + int ret; + + if (camss->version == CAMSS_8250) + nbr_icc_paths = ICC_SM8250_COUNT; + + for (i = 0; i < nbr_icc_paths; i++) { + ret = icc_set_bw(camss->icc_path[i], + camss->icc_bw_tbl[i].avg, + camss->icc_bw_tbl[i].peak); + if (ret) + return ret; + } + + return 0; +} + +static const struct dev_pm_ops camss_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(camss_runtime_suspend, camss_runtime_resume, NULL) +}; + +static struct platform_driver qcom_camss_driver = { + .probe = camss_probe, + .remove_new = camss_remove, + .driver = { + .name = "qcom-camss", + .of_match_table = camss_dt_match, + .pm = &camss_pm_ops, + }, +}; + +module_platform_driver(qcom_camss_driver); + +MODULE_ALIAS("platform:qcom-camss"); +MODULE_DESCRIPTION("Qualcomm Camera Subsystem driver"); +MODULE_AUTHOR("Todor Tomov <todor.tomov@linaro.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h new file mode 100644 index 0000000000..f6c326cb85 --- /dev/null +++ b/drivers/media/platform/qcom/camss/camss.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * camss.h + * + * Qualcomm MSM Camera Subsystem - Core + * + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2018 Linaro Ltd. + */ +#ifndef QC_MSM_CAMSS_H +#define QC_MSM_CAMSS_H + +#include <linux/device.h> +#include <linux/types.h> +#include <media/v4l2-async.h> +#include <media/v4l2-device.h> +#include <media/v4l2-subdev.h> +#include <media/media-device.h> +#include <media/media-entity.h> + +#include "camss-csid.h" +#include "camss-csiphy.h" +#include "camss-ispif.h" +#include "camss-vfe.h" + +#define to_camss(ptr_module) \ + container_of(ptr_module, struct camss, ptr_module) + +#define to_device(ptr_module) \ + (to_camss(ptr_module)->dev) + +#define module_pointer(ptr_module, index) \ + ((const struct ptr_module##_device (*)[]) &(ptr_module[-(index)])) + +#define to_camss_index(ptr_module, index) \ + container_of(module_pointer(ptr_module, index), \ + struct camss, ptr_module) + +#define to_device_index(ptr_module, index) \ + (to_camss_index(ptr_module, index)->dev) + +#define CAMSS_RES_MAX 17 + +struct resources { + char *regulators[CAMSS_RES_MAX]; + char *clock[CAMSS_RES_MAX]; + u32 clock_rate[CAMSS_RES_MAX][CAMSS_RES_MAX]; + char *reg[CAMSS_RES_MAX]; + char *interrupt[CAMSS_RES_MAX]; +}; + +struct resources_ispif { + char *clock[CAMSS_RES_MAX]; + char *clock_for_reset[CAMSS_RES_MAX]; + char *reg[CAMSS_RES_MAX]; + char *interrupt; +}; + +struct icc_bw_tbl { + u32 avg; + u32 peak; +}; + +struct resources_icc { + char *name; + struct icc_bw_tbl icc_bw_tbl; +}; + +enum pm_domain { + PM_DOMAIN_VFE0 = 0, + PM_DOMAIN_VFE1 = 1, + PM_DOMAIN_VFELITE = 2, /* VFELITE / TOP GDSC */ +}; + +enum camss_version { + CAMSS_8x16, + CAMSS_8x96, + CAMSS_660, + CAMSS_845, + CAMSS_8250, +}; + +enum icc_count { + ICC_DEFAULT_COUNT = 0, + ICC_SM8250_COUNT = 4, +}; + +struct camss { + enum camss_version version; + struct v4l2_device v4l2_dev; + struct v4l2_async_notifier notifier; + struct media_device media_dev; + struct device *dev; + int csiphy_num; + struct csiphy_device *csiphy; + int csid_num; + struct csid_device *csid; + struct ispif_device *ispif; + int vfe_num; + int vfe_lite_num; + struct vfe_device *vfe; + atomic_t ref_count; + int genpd_num; + struct device **genpd; + struct device_link **genpd_link; + struct icc_path *icc_path[ICC_SM8250_COUNT]; + struct icc_bw_tbl icc_bw_tbl[ICC_SM8250_COUNT]; +}; + +struct camss_camera_interface { + u8 csiphy_id; + struct csiphy_csi2_cfg csi2; +}; + +struct camss_async_subdev { + struct v4l2_async_connection asd; /* must be first */ + struct camss_camera_interface interface; +}; + +struct camss_clock { + struct clk *clk; + const char *name; + u32 *freq; + u32 nfreqs; +}; + +void camss_add_clock_margin(u64 *rate); +int camss_enable_clocks(int nclocks, struct camss_clock *clock, + struct device *dev); +void camss_disable_clocks(int nclocks, struct camss_clock *clock); +struct media_entity *camss_find_sensor(struct media_entity *entity); +s64 camss_get_link_freq(struct media_entity *entity, unsigned int bpp, + unsigned int lanes); +int camss_get_pixel_clock(struct media_entity *entity, u64 *pixel_clock); +int camss_pm_domain_on(struct camss *camss, int id); +void camss_pm_domain_off(struct camss *camss, int id); +void camss_delete(struct camss *camss); + +#endif /* QC_MSM_CAMSS_H */ diff --git a/drivers/media/platform/qcom/venus/Kconfig b/drivers/media/platform/qcom/venus/Kconfig new file mode 100644 index 0000000000..bfd50e8f34 --- /dev/null +++ b/drivers/media/platform/qcom/venus/Kconfig @@ -0,0 +1,14 @@ +config VIDEO_QCOM_VENUS + tristate "Qualcomm Venus V4L2 encoder/decoder driver" + depends on V4L_MEM2MEM_DRIVERS + depends on VIDEO_DEV && QCOM_SMEM + depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST + select QCOM_MDT_LOADER if ARCH_QCOM + select QCOM_SCM + select VIDEOBUF2_DMA_CONTIG + select V4L2_MEM2MEM_DEV + help + This is a V4L2 driver for Qualcomm Venus video accelerator + hardware. It accelerates encoding and decoding operations + on various Qualcomm SoCs. + To compile this driver as a module choose m here. diff --git a/drivers/media/platform/qcom/venus/Makefile b/drivers/media/platform/qcom/venus/Makefile new file mode 100644 index 0000000000..91ee6be102 --- /dev/null +++ b/drivers/media/platform/qcom/venus/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# Makefile for Qualcomm Venus driver + +venus-core-objs += core.o helpers.o firmware.o \ + hfi_venus.o hfi_msgs.o hfi_cmds.o hfi.o \ + hfi_parser.o pm_helpers.o dbgfs.o \ + hfi_platform.o hfi_platform_v4.o \ + hfi_platform_v6.o hfi_plat_bufs_v6.o \ + +venus-dec-objs += vdec.o vdec_ctrls.o +venus-enc-objs += venc.o venc_ctrls.o + +obj-$(CONFIG_VIDEO_QCOM_VENUS) += venus-core.o +obj-$(CONFIG_VIDEO_QCOM_VENUS) += venus-dec.o +obj-$(CONFIG_VIDEO_QCOM_VENUS) += venus-enc.o diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c new file mode 100644 index 0000000000..054b8e74ba --- /dev/null +++ b/drivers/media/platform/qcom/venus/core.c @@ -0,0 +1,914 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#include <linux/init.h> +#include <linux/interconnect.h> +#include <linux/io.h> +#include <linux/ioctl.h> +#include <linux/delay.h> +#include <linux/devcoredump.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/pm_runtime.h> +#include <media/videobuf2-v4l2.h> +#include <media/v4l2-mem2mem.h> +#include <media/v4l2-ioctl.h> + +#include "core.h" +#include "firmware.h" +#include "pm_helpers.h" +#include "hfi_venus_io.h" + +static void venus_coredump(struct venus_core *core) +{ + struct device *dev; + phys_addr_t mem_phys; + size_t mem_size; + void *mem_va; + void *data; + + dev = core->dev; + mem_phys = core->fw.mem_phys; + mem_size = core->fw.mem_size; + + mem_va = memremap(mem_phys, mem_size, MEMREMAP_WC); + if (!mem_va) + return; + + data = vmalloc(mem_size); + if (!data) { + memunmap(mem_va); + return; + } + + memcpy(data, mem_va, mem_size); + memunmap(mem_va); + dev_coredumpv(dev, data, mem_size, GFP_KERNEL); +} + +static void venus_event_notify(struct venus_core *core, u32 event) +{ + struct venus_inst *inst; + + switch (event) { + case EVT_SYS_WATCHDOG_TIMEOUT: + case EVT_SYS_ERROR: + break; + default: + return; + } + + mutex_lock(&core->lock); + set_bit(0, &core->sys_error); + list_for_each_entry(inst, &core->instances, list) + inst->ops->event_notify(inst, EVT_SESSION_ERROR, NULL); + mutex_unlock(&core->lock); + + disable_irq_nosync(core->irq); + schedule_delayed_work(&core->work, msecs_to_jiffies(10)); +} + +static const struct hfi_core_ops venus_core_ops = { + .event_notify = venus_event_notify, +}; + +#define RPM_WAIT_FOR_IDLE_MAX_ATTEMPTS 10 + +static void venus_sys_error_handler(struct work_struct *work) +{ + struct venus_core *core = + container_of(work, struct venus_core, work.work); + int ret, i, max_attempts = RPM_WAIT_FOR_IDLE_MAX_ATTEMPTS; + const char *err_msg = ""; + bool failed = false; + + ret = pm_runtime_get_sync(core->dev); + if (ret < 0) { + err_msg = "resume runtime PM"; + max_attempts = 0; + failed = true; + } + + core->ops->core_deinit(core); + core->state = CORE_UNINIT; + + for (i = 0; i < max_attempts; i++) { + if (!pm_runtime_active(core->dev_dec) && !pm_runtime_active(core->dev_enc)) + break; + msleep(10); + } + + mutex_lock(&core->lock); + + venus_shutdown(core); + + venus_coredump(core); + + pm_runtime_put_sync(core->dev); + + for (i = 0; i < max_attempts; i++) { + if (!core->pmdomains[0] || !pm_runtime_active(core->pmdomains[0])) + break; + usleep_range(1000, 1500); + } + + hfi_reinit(core); + + ret = pm_runtime_get_sync(core->dev); + if (ret < 0) { + err_msg = "resume runtime PM"; + failed = true; + } + + ret = venus_boot(core); + if (ret && !failed) { + err_msg = "boot Venus"; + failed = true; + } + + ret = hfi_core_resume(core, true); + if (ret && !failed) { + err_msg = "resume HFI"; + failed = true; + } + + enable_irq(core->irq); + + mutex_unlock(&core->lock); + + ret = hfi_core_init(core); + if (ret && !failed) { + err_msg = "init HFI"; + failed = true; + } + + pm_runtime_put_sync(core->dev); + + if (failed) { + disable_irq_nosync(core->irq); + dev_warn_ratelimited(core->dev, + "System error has occurred, recovery failed to %s\n", + err_msg); + schedule_delayed_work(&core->work, msecs_to_jiffies(10)); + return; + } + + dev_warn(core->dev, "system error has occurred (recovered)\n"); + + mutex_lock(&core->lock); + clear_bit(0, &core->sys_error); + wake_up_all(&core->sys_err_done); + mutex_unlock(&core->lock); +} + +static u32 to_v4l2_codec_type(u32 codec) +{ + switch (codec) { + case HFI_VIDEO_CODEC_H264: + return V4L2_PIX_FMT_H264; + case HFI_VIDEO_CODEC_H263: + return V4L2_PIX_FMT_H263; + case HFI_VIDEO_CODEC_MPEG1: + return V4L2_PIX_FMT_MPEG1; + case HFI_VIDEO_CODEC_MPEG2: + return V4L2_PIX_FMT_MPEG2; + case HFI_VIDEO_CODEC_MPEG4: + return V4L2_PIX_FMT_MPEG4; + case HFI_VIDEO_CODEC_VC1: + return V4L2_PIX_FMT_VC1_ANNEX_G; + case HFI_VIDEO_CODEC_VP8: + return V4L2_PIX_FMT_VP8; + case HFI_VIDEO_CODEC_VP9: + return V4L2_PIX_FMT_VP9; + case HFI_VIDEO_CODEC_DIVX: + case HFI_VIDEO_CODEC_DIVX_311: + return V4L2_PIX_FMT_XVID; + default: + return 0; + } +} + +static int venus_enumerate_codecs(struct venus_core *core, u32 type) +{ + const struct hfi_inst_ops dummy_ops = {}; + struct venus_inst *inst; + u32 codec, codecs; + unsigned int i; + int ret; + + if (core->res->hfi_version != HFI_VERSION_1XX) + return 0; + + inst = kzalloc(sizeof(*inst), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + mutex_init(&inst->lock); + inst->core = core; + inst->session_type = type; + if (type == VIDC_SESSION_TYPE_DEC) + codecs = core->dec_codecs; + else + codecs = core->enc_codecs; + + ret = hfi_session_create(inst, &dummy_ops); + if (ret) + goto err; + + for (i = 0; i < MAX_CODEC_NUM; i++) { + codec = (1UL << i) & codecs; + if (!codec) + continue; + + ret = hfi_session_init(inst, to_v4l2_codec_type(codec)); + if (ret) + goto done; + + ret = hfi_session_deinit(inst); + if (ret) + goto done; + } + +done: + hfi_session_destroy(inst); +err: + mutex_destroy(&inst->lock); + kfree(inst); + + return ret; +} + +static void venus_assign_register_offsets(struct venus_core *core) +{ + if (IS_IRIS2(core) || IS_IRIS2_1(core)) { + core->vbif_base = core->base + VBIF_BASE; + core->cpu_base = core->base + CPU_BASE_V6; + core->cpu_cs_base = core->base + CPU_CS_BASE_V6; + core->cpu_ic_base = core->base + CPU_IC_BASE_V6; + core->wrapper_base = core->base + WRAPPER_BASE_V6; + core->wrapper_tz_base = core->base + WRAPPER_TZ_BASE_V6; + core->aon_base = core->base + AON_BASE_V6; + } else { + core->vbif_base = core->base + VBIF_BASE; + core->cpu_base = core->base + CPU_BASE; + core->cpu_cs_base = core->base + CPU_CS_BASE; + core->cpu_ic_base = core->base + CPU_IC_BASE; + core->wrapper_base = core->base + WRAPPER_BASE; + core->wrapper_tz_base = NULL; + core->aon_base = NULL; + } +} + +static irqreturn_t venus_isr_thread(int irq, void *dev_id) +{ + struct venus_core *core = dev_id; + irqreturn_t ret; + + ret = hfi_isr_thread(irq, dev_id); + + if (ret == IRQ_HANDLED && venus_fault_inject_ssr()) + hfi_core_trigger_ssr(core, HFI_TEST_SSR_SW_ERR_FATAL); + + return ret; +} + +static int venus_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct venus_core *core; + int ret; + + core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL); + if (!core) + return -ENOMEM; + + core->dev = dev; + + core->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(core->base)) + return PTR_ERR(core->base); + + core->video_path = devm_of_icc_get(dev, "video-mem"); + if (IS_ERR(core->video_path)) + return PTR_ERR(core->video_path); + + core->cpucfg_path = devm_of_icc_get(dev, "cpu-cfg"); + if (IS_ERR(core->cpucfg_path)) + return PTR_ERR(core->cpucfg_path); + + core->irq = platform_get_irq(pdev, 0); + if (core->irq < 0) + return core->irq; + + core->res = of_device_get_match_data(dev); + if (!core->res) + return -ENODEV; + + mutex_init(&core->pm_lock); + + core->pm_ops = venus_pm_get(core->res->hfi_version); + if (!core->pm_ops) + return -ENODEV; + + if (core->pm_ops->core_get) { + ret = core->pm_ops->core_get(core); + if (ret) + return ret; + } + + ret = dma_set_mask_and_coherent(dev, core->res->dma_mask); + if (ret) + goto err_core_put; + + dma_set_max_seg_size(dev, UINT_MAX); + + INIT_LIST_HEAD(&core->instances); + mutex_init(&core->lock); + INIT_DELAYED_WORK(&core->work, venus_sys_error_handler); + init_waitqueue_head(&core->sys_err_done); + + ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, venus_isr_thread, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "venus", core); + if (ret) + goto err_core_put; + + ret = hfi_create(core, &venus_core_ops); + if (ret) + goto err_core_put; + + venus_assign_register_offsets(core); + + ret = v4l2_device_register(dev, &core->v4l2_dev); + if (ret) + goto err_core_deinit; + + platform_set_drvdata(pdev, core); + + pm_runtime_enable(dev); + + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto err_runtime_disable; + + ret = of_platform_populate(dev->of_node, NULL, NULL, dev); + if (ret) + goto err_runtime_disable; + + ret = venus_firmware_init(core); + if (ret) + goto err_of_depopulate; + + ret = venus_boot(core); + if (ret) + goto err_firmware_deinit; + + ret = hfi_core_resume(core, true); + if (ret) + goto err_venus_shutdown; + + ret = hfi_core_init(core); + if (ret) + goto err_venus_shutdown; + + ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_DEC); + if (ret) + goto err_venus_shutdown; + + ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_ENC); + if (ret) + goto err_venus_shutdown; + + ret = pm_runtime_put_sync(dev); + if (ret) { + pm_runtime_get_noresume(dev); + goto err_dev_unregister; + } + + venus_dbgfs_init(core); + + return 0; + +err_dev_unregister: + v4l2_device_unregister(&core->v4l2_dev); +err_venus_shutdown: + venus_shutdown(core); +err_firmware_deinit: + venus_firmware_deinit(core); +err_of_depopulate: + of_platform_depopulate(dev); +err_runtime_disable: + pm_runtime_put_noidle(dev); + pm_runtime_set_suspended(dev); + pm_runtime_disable(dev); + hfi_destroy(core); +err_core_deinit: + hfi_core_deinit(core, false); +err_core_put: + if (core->pm_ops->core_put) + core->pm_ops->core_put(core); + return ret; +} + +static void venus_remove(struct platform_device *pdev) +{ + struct venus_core *core = platform_get_drvdata(pdev); + const struct venus_pm_ops *pm_ops = core->pm_ops; + struct device *dev = core->dev; + int ret; + + ret = pm_runtime_get_sync(dev); + WARN_ON(ret < 0); + + ret = hfi_core_deinit(core, true); + WARN_ON(ret); + + venus_shutdown(core); + of_platform_depopulate(dev); + + venus_firmware_deinit(core); + + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + + if (pm_ops->core_put) + pm_ops->core_put(core); + + v4l2_device_unregister(&core->v4l2_dev); + + hfi_destroy(core); + + mutex_destroy(&core->pm_lock); + mutex_destroy(&core->lock); + venus_dbgfs_deinit(core); +} + +static void venus_core_shutdown(struct platform_device *pdev) +{ + struct venus_core *core = platform_get_drvdata(pdev); + + pm_runtime_get_sync(core->dev); + venus_shutdown(core); + venus_firmware_deinit(core); + pm_runtime_put_sync(core->dev); +} + +static __maybe_unused int venus_runtime_suspend(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + const struct venus_pm_ops *pm_ops = core->pm_ops; + int ret; + + ret = hfi_core_suspend(core); + if (ret) + return ret; + + if (pm_ops->core_power) { + ret = pm_ops->core_power(core, POWER_OFF); + if (ret) + return ret; + } + + ret = icc_set_bw(core->cpucfg_path, 0, 0); + if (ret) + goto err_cpucfg_path; + + ret = icc_set_bw(core->video_path, 0, 0); + if (ret) + goto err_video_path; + + return ret; + +err_video_path: + icc_set_bw(core->cpucfg_path, kbps_to_icc(1000), 0); +err_cpucfg_path: + if (pm_ops->core_power) + pm_ops->core_power(core, POWER_ON); + + return ret; +} + +static __maybe_unused int venus_runtime_resume(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + const struct venus_pm_ops *pm_ops = core->pm_ops; + int ret; + + ret = icc_set_bw(core->video_path, kbps_to_icc(20000), 0); + if (ret) + return ret; + + ret = icc_set_bw(core->cpucfg_path, kbps_to_icc(1000), 0); + if (ret) + return ret; + + if (pm_ops->core_power) { + ret = pm_ops->core_power(core, POWER_ON); + if (ret) + return ret; + } + + return hfi_core_resume(core, false); +} + +static const struct dev_pm_ops venus_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(venus_runtime_suspend, venus_runtime_resume, NULL) +}; + +static const struct freq_tbl msm8916_freq_table[] = { + { 352800, 228570000 }, /* 1920x1088 @ 30 + 1280x720 @ 30 */ + { 244800, 160000000 }, /* 1920x1088 @ 30 */ + { 108000, 100000000 }, /* 1280x720 @ 30 */ +}; + +static const struct reg_val msm8916_reg_preset[] = { + { 0xe0020, 0x05555556 }, + { 0xe0024, 0x05555556 }, + { 0x80124, 0x00000003 }, +}; + +static const struct venus_resources msm8916_res = { + .freq_tbl = msm8916_freq_table, + .freq_tbl_size = ARRAY_SIZE(msm8916_freq_table), + .reg_tbl = msm8916_reg_preset, + .reg_tbl_size = ARRAY_SIZE(msm8916_reg_preset), + .clks = { "core", "iface", "bus", }, + .clks_num = 3, + .max_load = 352800, /* 720p@30 + 1080p@30 */ + .hfi_version = HFI_VERSION_1XX, + .vmem_id = VIDC_RESOURCE_NONE, + .vmem_size = 0, + .vmem_addr = 0, + .dma_mask = 0xddc00000 - 1, + .fwname = "qcom/venus-1.8/venus.mdt", +}; + +static const struct freq_tbl msm8996_freq_table[] = { + { 1944000, 520000000 }, /* 4k UHD @ 60 (decode only) */ + { 972000, 520000000 }, /* 4k UHD @ 30 */ + { 489600, 346666667 }, /* 1080p @ 60 */ + { 244800, 150000000 }, /* 1080p @ 30 */ + { 108000, 75000000 }, /* 720p @ 30 */ +}; + +static const struct reg_val msm8996_reg_preset[] = { + { 0x80010, 0xffffffff }, + { 0x80018, 0x00001556 }, + { 0x8001C, 0x00001556 }, +}; + +static const struct venus_resources msm8996_res = { + .freq_tbl = msm8996_freq_table, + .freq_tbl_size = ARRAY_SIZE(msm8996_freq_table), + .reg_tbl = msm8996_reg_preset, + .reg_tbl_size = ARRAY_SIZE(msm8996_reg_preset), + .clks = {"core", "iface", "bus", "mbus" }, + .clks_num = 4, + .vcodec0_clks = { "core" }, + .vcodec1_clks = { "core" }, + .vcodec_clks_num = 1, + .max_load = 2563200, + .hfi_version = HFI_VERSION_3XX, + .vmem_id = VIDC_RESOURCE_NONE, + .vmem_size = 0, + .vmem_addr = 0, + .dma_mask = 0xddc00000 - 1, + .fwname = "qcom/venus-4.2/venus.mdt", +}; + +static const struct freq_tbl sdm660_freq_table[] = { + { 979200, 518400000 }, + { 489600, 441600000 }, + { 432000, 404000000 }, + { 244800, 320000000 }, + { 216000, 269330000 }, + { 108000, 133330000 }, +}; + +static const struct reg_val sdm660_reg_preset[] = { + { 0x80010, 0x001f001f }, + { 0x80018, 0x00000156 }, + { 0x8001c, 0x00000156 }, +}; + +static const struct bw_tbl sdm660_bw_table_enc[] = { + { 979200, 1044000, 0, 2446336, 0 }, /* 4k UHD @ 30 */ + { 864000, 887000, 0, 2108416, 0 }, /* 720p @ 240 */ + { 489600, 666000, 0, 1207296, 0 }, /* 1080p @ 60 */ + { 432000, 578000, 0, 1058816, 0 }, /* 720p @ 120 */ + { 244800, 346000, 0, 616448, 0 }, /* 1080p @ 30 */ + { 216000, 293000, 0, 534528, 0 }, /* 720p @ 60 */ + { 108000, 151000, 0, 271360, 0 }, /* 720p @ 30 */ +}; + +static const struct bw_tbl sdm660_bw_table_dec[] = { + { 979200, 2365000, 0, 1892000, 0 }, /* 4k UHD @ 30 */ + { 864000, 1978000, 0, 1554000, 0 }, /* 720p @ 240 */ + { 489600, 1133000, 0, 895000, 0 }, /* 1080p @ 60 */ + { 432000, 994000, 0, 781000, 0 }, /* 720p @ 120 */ + { 244800, 580000, 0, 460000, 0 }, /* 1080p @ 30 */ + { 216000, 501000, 0, 301000, 0 }, /* 720p @ 60 */ + { 108000, 255000, 0, 202000, 0 }, /* 720p @ 30 */ +}; + +static const struct venus_resources sdm660_res = { + .freq_tbl = sdm660_freq_table, + .freq_tbl_size = ARRAY_SIZE(sdm660_freq_table), + .reg_tbl = sdm660_reg_preset, + .reg_tbl_size = ARRAY_SIZE(sdm660_reg_preset), + .bw_tbl_enc = sdm660_bw_table_enc, + .bw_tbl_enc_size = ARRAY_SIZE(sdm660_bw_table_enc), + .bw_tbl_dec = sdm660_bw_table_dec, + .bw_tbl_dec_size = ARRAY_SIZE(sdm660_bw_table_dec), + .clks = {"core", "iface", "bus", "bus_throttle" }, + .clks_num = 4, + .vcodec0_clks = { "vcodec0_core" }, + .vcodec1_clks = { "vcodec0_core" }, + .vcodec_clks_num = 1, + .vcodec_num = 1, + .max_load = 1036800, + .hfi_version = HFI_VERSION_3XX, + .vmem_id = VIDC_RESOURCE_NONE, + .vmem_size = 0, + .vmem_addr = 0, + .cp_start = 0, + .cp_size = 0x79000000, + .cp_nonpixel_start = 0x1000000, + .cp_nonpixel_size = 0x28000000, + .dma_mask = 0xd9000000 - 1, + .fwname = "qcom/venus-4.4/venus.mdt", +}; + +static const struct freq_tbl sdm845_freq_table[] = { + { 3110400, 533000000 }, /* 4096x2160@90 */ + { 2073600, 444000000 }, /* 4096x2160@60 */ + { 1944000, 404000000 }, /* 3840x2160@60 */ + { 972000, 330000000 }, /* 3840x2160@30 */ + { 489600, 200000000 }, /* 1920x1080@60 */ + { 244800, 100000000 }, /* 1920x1080@30 */ +}; + +static const struct bw_tbl sdm845_bw_table_enc[] = { + { 1944000, 1612000, 0, 2416000, 0 }, /* 3840x2160@60 */ + { 972000, 951000, 0, 1434000, 0 }, /* 3840x2160@30 */ + { 489600, 723000, 0, 973000, 0 }, /* 1920x1080@60 */ + { 244800, 370000, 0, 495000, 0 }, /* 1920x1080@30 */ +}; + +static const struct bw_tbl sdm845_bw_table_dec[] = { + { 2073600, 3929000, 0, 5551000, 0 }, /* 4096x2160@60 */ + { 1036800, 1987000, 0, 2797000, 0 }, /* 4096x2160@30 */ + { 489600, 1040000, 0, 1298000, 0 }, /* 1920x1080@60 */ + { 244800, 530000, 0, 659000, 0 }, /* 1920x1080@30 */ +}; + +static const struct venus_resources sdm845_res = { + .freq_tbl = sdm845_freq_table, + .freq_tbl_size = ARRAY_SIZE(sdm845_freq_table), + .bw_tbl_enc = sdm845_bw_table_enc, + .bw_tbl_enc_size = ARRAY_SIZE(sdm845_bw_table_enc), + .bw_tbl_dec = sdm845_bw_table_dec, + .bw_tbl_dec_size = ARRAY_SIZE(sdm845_bw_table_dec), + .clks = {"core", "iface", "bus" }, + .clks_num = 3, + .vcodec0_clks = { "core", "bus" }, + .vcodec1_clks = { "core", "bus" }, + .vcodec_clks_num = 2, + .max_load = 3110400, /* 4096x2160@90 */ + .hfi_version = HFI_VERSION_4XX, + .vpu_version = VPU_VERSION_AR50, + .vmem_id = VIDC_RESOURCE_NONE, + .vmem_size = 0, + .vmem_addr = 0, + .dma_mask = 0xe0000000 - 1, + .fwname = "qcom/venus-5.2/venus.mdt", +}; + +static const struct venus_resources sdm845_res_v2 = { + .freq_tbl = sdm845_freq_table, + .freq_tbl_size = ARRAY_SIZE(sdm845_freq_table), + .bw_tbl_enc = sdm845_bw_table_enc, + .bw_tbl_enc_size = ARRAY_SIZE(sdm845_bw_table_enc), + .bw_tbl_dec = sdm845_bw_table_dec, + .bw_tbl_dec_size = ARRAY_SIZE(sdm845_bw_table_dec), + .clks = {"core", "iface", "bus" }, + .clks_num = 3, + .vcodec0_clks = { "vcodec0_core", "vcodec0_bus" }, + .vcodec1_clks = { "vcodec1_core", "vcodec1_bus" }, + .vcodec_clks_num = 2, + .vcodec_pmdomains = { "venus", "vcodec0", "vcodec1" }, + .vcodec_pmdomains_num = 3, + .opp_pmdomain = (const char *[]) { "cx", NULL }, + .vcodec_num = 2, + .max_load = 3110400, /* 4096x2160@90 */ + .hfi_version = HFI_VERSION_4XX, + .vpu_version = VPU_VERSION_AR50, + .vmem_id = VIDC_RESOURCE_NONE, + .vmem_size = 0, + .vmem_addr = 0, + .dma_mask = 0xe0000000 - 1, + .cp_start = 0, + .cp_size = 0x70800000, + .cp_nonpixel_start = 0x1000000, + .cp_nonpixel_size = 0x24800000, + .fwname = "qcom/venus-5.2/venus.mdt", +}; + +static const struct freq_tbl sc7180_freq_table[] = { + { 0, 500000000 }, + { 0, 434000000 }, + { 0, 340000000 }, + { 0, 270000000 }, + { 0, 150000000 }, +}; + +static const struct bw_tbl sc7180_bw_table_enc[] = { + { 972000, 750000, 0, 0, 0 }, /* 3840x2160@30 */ + { 489600, 451000, 0, 0, 0 }, /* 1920x1080@60 */ + { 244800, 234000, 0, 0, 0 }, /* 1920x1080@30 */ +}; + +static const struct bw_tbl sc7180_bw_table_dec[] = { + { 1036800, 1386000, 0, 1875000, 0 }, /* 4096x2160@30 */ + { 489600, 865000, 0, 1146000, 0 }, /* 1920x1080@60 */ + { 244800, 530000, 0, 583000, 0 }, /* 1920x1080@30 */ +}; + +static const struct venus_resources sc7180_res = { + .freq_tbl = sc7180_freq_table, + .freq_tbl_size = ARRAY_SIZE(sc7180_freq_table), + .bw_tbl_enc = sc7180_bw_table_enc, + .bw_tbl_enc_size = ARRAY_SIZE(sc7180_bw_table_enc), + .bw_tbl_dec = sc7180_bw_table_dec, + .bw_tbl_dec_size = ARRAY_SIZE(sc7180_bw_table_dec), + .clks = {"core", "iface", "bus" }, + .clks_num = 3, + .vcodec0_clks = { "vcodec0_core", "vcodec0_bus" }, + .vcodec_clks_num = 2, + .vcodec_pmdomains = { "venus", "vcodec0" }, + .vcodec_pmdomains_num = 2, + .opp_pmdomain = (const char *[]) { "cx", NULL }, + .vcodec_num = 1, + .hfi_version = HFI_VERSION_4XX, + .vpu_version = VPU_VERSION_AR50, + .vmem_id = VIDC_RESOURCE_NONE, + .vmem_size = 0, + .vmem_addr = 0, + .dma_mask = 0xe0000000 - 1, + .cp_start = 0, + .cp_size = 0x70800000, + .cp_nonpixel_start = 0x1000000, + .cp_nonpixel_size = 0x24800000, + .fwname = "qcom/venus-5.4/venus.mdt", +}; + +static const struct freq_tbl sm8250_freq_table[] = { + { 0, 444000000 }, + { 0, 366000000 }, + { 0, 338000000 }, + { 0, 240000000 }, +}; + +static const struct bw_tbl sm8250_bw_table_enc[] = { + { 1944000, 1954000, 0, 3711000, 0 }, /* 3840x2160@60 */ + { 972000, 996000, 0, 1905000, 0 }, /* 3840x2160@30 */ + { 489600, 645000, 0, 977000, 0 }, /* 1920x1080@60 */ + { 244800, 332000, 0, 498000, 0 }, /* 1920x1080@30 */ +}; + +static const struct bw_tbl sm8250_bw_table_dec[] = { + { 2073600, 2403000, 0, 4113000, 0 }, /* 4096x2160@60 */ + { 1036800, 1224000, 0, 2079000, 0 }, /* 4096x2160@30 */ + { 489600, 812000, 0, 998000, 0 }, /* 1920x1080@60 */ + { 244800, 416000, 0, 509000, 0 }, /* 1920x1080@30 */ +}; + +static const struct reg_val sm8250_reg_preset[] = { + { 0xb0088, 0 }, +}; + +static const struct venus_resources sm8250_res = { + .freq_tbl = sm8250_freq_table, + .freq_tbl_size = ARRAY_SIZE(sm8250_freq_table), + .reg_tbl = sm8250_reg_preset, + .reg_tbl_size = ARRAY_SIZE(sm8250_reg_preset), + .bw_tbl_enc = sm8250_bw_table_enc, + .bw_tbl_enc_size = ARRAY_SIZE(sm8250_bw_table_enc), + .bw_tbl_dec = sm8250_bw_table_dec, + .bw_tbl_dec_size = ARRAY_SIZE(sm8250_bw_table_dec), + .clks = {"core", "iface"}, + .clks_num = 2, + .resets = { "bus", "core" }, + .resets_num = 2, + .vcodec0_clks = { "vcodec0_core" }, + .vcodec_clks_num = 1, + .vcodec_pmdomains = { "venus", "vcodec0" }, + .vcodec_pmdomains_num = 2, + .opp_pmdomain = (const char *[]) { "mx", NULL }, + .vcodec_num = 1, + .max_load = 7833600, + .hfi_version = HFI_VERSION_6XX, + .vpu_version = VPU_VERSION_IRIS2, + .num_vpp_pipes = 4, + .vmem_id = VIDC_RESOURCE_NONE, + .vmem_size = 0, + .vmem_addr = 0, + .dma_mask = 0xe0000000 - 1, + .fwname = "qcom/vpu-1.0/venus.mbn", +}; + +static const struct freq_tbl sc7280_freq_table[] = { + { 0, 460000000 }, + { 0, 424000000 }, + { 0, 335000000 }, + { 0, 240000000 }, + { 0, 133333333 }, +}; + +static const struct bw_tbl sc7280_bw_table_enc[] = { + { 1944000, 1896000, 0, 3657000, 0 }, /* 3840x2160@60 */ + { 972000, 968000, 0, 1848000, 0 }, /* 3840x2160@30 */ + { 489600, 618000, 0, 941000, 0 }, /* 1920x1080@60 */ + { 244800, 318000, 0, 480000, 0 }, /* 1920x1080@30 */ +}; + +static const struct bw_tbl sc7280_bw_table_dec[] = { + { 2073600, 2128000, 0, 3831000, 0 }, /* 4096x2160@60 */ + { 1036800, 1085000, 0, 1937000, 0 }, /* 4096x2160@30 */ + { 489600, 779000, 0, 998000, 0 }, /* 1920x1080@60 */ + { 244800, 400000, 0, 509000, 0 }, /* 1920x1080@30 */ +}; + +static const struct reg_val sm7280_reg_preset[] = { + { 0xb0088, 0 }, +}; + +static const struct hfi_ubwc_config sc7280_ubwc_config = { + 0, 0, {1, 1, 1, 0, 0, 0}, 8, 32, 14, 0, 0, {0, 0} +}; + +static const struct venus_resources sc7280_res = { + .freq_tbl = sc7280_freq_table, + .freq_tbl_size = ARRAY_SIZE(sc7280_freq_table), + .reg_tbl = sm7280_reg_preset, + .reg_tbl_size = ARRAY_SIZE(sm7280_reg_preset), + .bw_tbl_enc = sc7280_bw_table_enc, + .bw_tbl_enc_size = ARRAY_SIZE(sc7280_bw_table_enc), + .bw_tbl_dec = sc7280_bw_table_dec, + .bw_tbl_dec_size = ARRAY_SIZE(sc7280_bw_table_dec), + .ubwc_conf = &sc7280_ubwc_config, + .clks = {"core", "bus", "iface"}, + .clks_num = 3, + .vcodec0_clks = {"vcodec_core", "vcodec_bus"}, + .vcodec_clks_num = 2, + .vcodec_pmdomains = { "venus", "vcodec0" }, + .vcodec_pmdomains_num = 2, + .opp_pmdomain = (const char *[]) { "cx", NULL }, + .vcodec_num = 1, + .hfi_version = HFI_VERSION_6XX, + .vpu_version = VPU_VERSION_IRIS2_1, + .num_vpp_pipes = 1, + .vmem_id = VIDC_RESOURCE_NONE, + .vmem_size = 0, + .vmem_addr = 0, + .dma_mask = 0xe0000000 - 1, + .fwname = "qcom/vpu-2.0/venus.mbn", +}; + +static const struct of_device_id venus_dt_match[] = { + { .compatible = "qcom,msm8916-venus", .data = &msm8916_res, }, + { .compatible = "qcom,msm8996-venus", .data = &msm8996_res, }, + { .compatible = "qcom,sdm660-venus", .data = &sdm660_res, }, + { .compatible = "qcom,sdm845-venus", .data = &sdm845_res, }, + { .compatible = "qcom,sdm845-venus-v2", .data = &sdm845_res_v2, }, + { .compatible = "qcom,sc7180-venus", .data = &sc7180_res, }, + { .compatible = "qcom,sc7280-venus", .data = &sc7280_res, }, + { .compatible = "qcom,sm8250-venus", .data = &sm8250_res, }, + { } +}; +MODULE_DEVICE_TABLE(of, venus_dt_match); + +static struct platform_driver qcom_venus_driver = { + .probe = venus_probe, + .remove_new = venus_remove, + .driver = { + .name = "qcom-venus", + .of_match_table = venus_dt_match, + .pm = &venus_pm_ops, + }, + .shutdown = venus_core_shutdown, +}; +module_platform_driver(qcom_venus_driver); + +MODULE_ALIAS("platform:qcom-venus"); +MODULE_DESCRIPTION("Qualcomm Venus video encoder and decoder driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h new file mode 100644 index 0000000000..4a633261ec --- /dev/null +++ b/drivers/media/platform/qcom/venus/core.h @@ -0,0 +1,567 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ + +#ifndef __VENUS_CORE_H_ +#define __VENUS_CORE_H_ + +#include <linux/bitops.h> +#include <linux/list.h> +#include <media/videobuf2-v4l2.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-device.h> + +#include "dbgfs.h" +#include "hfi.h" +#include "hfi_platform.h" +#include "hfi_helper.h" + +#define VDBGL "VenusLow : " +#define VDBGM "VenusMed : " +#define VDBGH "VenusHigh: " +#define VDBGFW "VenusFW : " + +#define VIDC_CLKS_NUM_MAX 4 +#define VIDC_VCODEC_CLKS_NUM_MAX 2 +#define VIDC_PMDOMAINS_NUM_MAX 3 +#define VIDC_RESETS_NUM_MAX 2 + +extern int venus_fw_debug; + +struct freq_tbl { + unsigned int load; + unsigned long freq; +}; + +struct reg_val { + u32 reg; + u32 value; +}; + +struct bw_tbl { + u32 mbs_per_sec; + u32 avg; + u32 peak; + u32 avg_10bit; + u32 peak_10bit; +}; + +enum vpu_version { + VPU_VERSION_AR50, + VPU_VERSION_AR50_LITE, + VPU_VERSION_IRIS1, + VPU_VERSION_IRIS2, + VPU_VERSION_IRIS2_1, +}; + +struct venus_resources { + u64 dma_mask; + const struct freq_tbl *freq_tbl; + unsigned int freq_tbl_size; + const struct bw_tbl *bw_tbl_enc; + unsigned int bw_tbl_enc_size; + const struct bw_tbl *bw_tbl_dec; + unsigned int bw_tbl_dec_size; + const struct reg_val *reg_tbl; + unsigned int reg_tbl_size; + const struct hfi_ubwc_config *ubwc_conf; + const char * const clks[VIDC_CLKS_NUM_MAX]; + unsigned int clks_num; + const char * const vcodec0_clks[VIDC_VCODEC_CLKS_NUM_MAX]; + const char * const vcodec1_clks[VIDC_VCODEC_CLKS_NUM_MAX]; + unsigned int vcodec_clks_num; + const char * const vcodec_pmdomains[VIDC_PMDOMAINS_NUM_MAX]; + unsigned int vcodec_pmdomains_num; + const char **opp_pmdomain; + unsigned int vcodec_num; + const char * const resets[VIDC_RESETS_NUM_MAX]; + unsigned int resets_num; + enum hfi_version hfi_version; + enum vpu_version vpu_version; + u8 num_vpp_pipes; + u32 max_load; + unsigned int vmem_id; + u32 vmem_size; + u32 vmem_addr; + u32 cp_start; + u32 cp_size; + u32 cp_nonpixel_start; + u32 cp_nonpixel_size; + const char *fwname; +}; + +enum venus_fmt { + VENUS_FMT_NV12 = 0, + VENUS_FMT_QC08C = 1, + VENUS_FMT_QC10C = 2, + VENUS_FMT_P010 = 3, + VENUS_FMT_H264 = 4, + VENUS_FMT_VP8 = 5, + VENUS_FMT_VP9 = 6, + VENUS_FMT_HEVC = 7, + VENUS_FMT_VC1_ANNEX_G = 8, + VENUS_FMT_VC1_ANNEX_L = 9, + VENUS_FMT_MPEG4 = 10, + VENUS_FMT_MPEG2 = 11, + VENUS_FMT_H263 = 12, + VENUS_FMT_XVID = 13, +}; + +struct venus_format { + u32 pixfmt; + unsigned int num_planes; + u32 type; + u32 flags; +}; + +/** + * struct venus_core - holds core parameters valid for all instances + * + * @base: IO memory base address + * @vbif_base: IO memory vbif base address + * @cpu_base: IO memory cpu base address + * @cpu_cs_base: IO memory cpu_cs base address + * @cpu_ic_base: IO memory cpu_ic base address + * @wrapper_base: IO memory wrapper base address + * @wrapper_tz_base: IO memory wrapper TZ base address + * @aon_base: AON base address + * @irq: Venus irq + * @clks: an array of struct clk pointers + * @vcodec0_clks: an array of vcodec0 struct clk pointers + * @vcodec1_clks: an array of vcodec1 struct clk pointers + * @video_path: an interconnect handle to video to/from memory path + * @cpucfg_path: an interconnect handle to cpu configuration path + * @has_opp_table: does OPP table exist + * @pmdomains: an array of pmdomains struct device pointers + * @opp_dl_venus: an device-link for device OPP + * @opp_pmdomain: an OPP power-domain + * @resets: an array of reset signals + * @vdev_dec: a reference to video device structure for decoder instances + * @vdev_enc: a reference to video device structure for encoder instances + * @v4l2_dev: a holder for v4l2 device structure + * @res: a reference to venus resources structure + * @dev: convenience struct device pointer + * @dev_dec: convenience struct device pointer for decoder device + * @dev_enc: convenience struct device pointer for encoder device + * @use_tz: a flag that suggests presence of trustzone + * @fw: structure of firmware parameters + * @lock: a lock for this strucure + * @instances: a list_head of all instances + * @insts_count: num of instances + * @state: the state of the venus core + * @done: a completion for sync HFI operations + * @error: an error returned during last HFI sync operations + * @sys_error: an error flag that signal system error event + * @sys_err_done: a waitqueue to wait for system error recovery end + * @core_ops: the core operations + * @pm_ops: a pointer to pm operations + * @pm_lock: a lock for PM operations + * @enc_codecs: encoders supported by this core + * @dec_codecs: decoders supported by this core + * @max_sessions_supported: holds the maximum number of sessions + * @priv: a private filed for HFI operations + * @ops: the core HFI operations + * @work: a delayed work for handling system fatal error + * @caps: an array of supported HFI capabilities + * @codecs_count: platform codecs count + * @core0_usage_count: usage counter for core0 + * @core1_usage_count: usage counter for core1 + * @root: debugfs root directory + * @venus_ver: the venus firmware version + */ +struct venus_core { + void __iomem *base; + void __iomem *vbif_base; + void __iomem *cpu_base; + void __iomem *cpu_cs_base; + void __iomem *cpu_ic_base; + void __iomem *wrapper_base; + void __iomem *wrapper_tz_base; + void __iomem *aon_base; + int irq; + struct clk *clks[VIDC_CLKS_NUM_MAX]; + struct clk *vcodec0_clks[VIDC_VCODEC_CLKS_NUM_MAX]; + struct clk *vcodec1_clks[VIDC_VCODEC_CLKS_NUM_MAX]; + struct icc_path *video_path; + struct icc_path *cpucfg_path; + bool has_opp_table; + struct device *pmdomains[VIDC_PMDOMAINS_NUM_MAX]; + struct device_link *opp_dl_venus; + struct device *opp_pmdomain; + struct reset_control *resets[VIDC_RESETS_NUM_MAX]; + struct video_device *vdev_dec; + struct video_device *vdev_enc; + struct v4l2_device v4l2_dev; + const struct venus_resources *res; + struct device *dev; + struct device *dev_dec; + struct device *dev_enc; + unsigned int use_tz; + struct video_firmware { + struct device *dev; + struct iommu_domain *iommu_domain; + size_t mapped_mem_size; + phys_addr_t mem_phys; + size_t mem_size; + } fw; + struct mutex lock; + struct list_head instances; + atomic_t insts_count; + unsigned int state; + struct completion done; + unsigned int error; + unsigned long sys_error; + wait_queue_head_t sys_err_done; + const struct hfi_core_ops *core_ops; + const struct venus_pm_ops *pm_ops; + struct mutex pm_lock; + unsigned long enc_codecs; + unsigned long dec_codecs; + unsigned int max_sessions_supported; + void *priv; + const struct hfi_ops *ops; + struct delayed_work work; + struct hfi_plat_caps caps[MAX_CODEC_NUM]; + unsigned int codecs_count; + unsigned int core0_usage_count; + unsigned int core1_usage_count; + struct dentry *root; + struct venus_img_version { + u32 major; + u32 minor; + u32 rev; + } venus_ver; +}; + +struct vdec_controls { + u32 post_loop_deb_mode; + u32 profile; + u32 level; + u32 display_delay; + u32 display_delay_enable; + u64 conceal_color; +}; + +struct venc_controls { + u16 gop_size; + u32 num_p_frames; + u32 num_b_frames; + u32 bitrate_mode; + u32 bitrate; + u32 bitrate_peak; + u32 rc_enable; + u32 const_quality; + u32 frame_skip_mode; + + u32 h264_i_period; + u32 h264_entropy_mode; + u32 h264_i_qp; + u32 h264_p_qp; + u32 h264_b_qp; + u32 h264_min_qp; + u32 h264_max_qp; + u32 h264_i_min_qp; + u32 h264_i_max_qp; + u32 h264_p_min_qp; + u32 h264_p_max_qp; + u32 h264_b_min_qp; + u32 h264_b_max_qp; + u32 h264_loop_filter_mode; + s32 h264_loop_filter_alpha; + s32 h264_loop_filter_beta; + u32 h264_8x8_transform; + + u32 hevc_i_qp; + u32 hevc_p_qp; + u32 hevc_b_qp; + u32 hevc_min_qp; + u32 hevc_max_qp; + u32 hevc_i_min_qp; + u32 hevc_i_max_qp; + u32 hevc_p_min_qp; + u32 hevc_p_max_qp; + u32 hevc_b_min_qp; + u32 hevc_b_max_qp; + + u32 vp8_min_qp; + u32 vp8_max_qp; + + u32 multi_slice_mode; + u32 multi_slice_max_bytes; + u32 multi_slice_max_mb; + + u32 header_mode; + bool aud_enable; + u32 intra_refresh_type; + u32 intra_refresh_period; + + struct { + u32 h264; + u32 mpeg4; + u32 hevc; + u32 vp8; + u32 vp9; + } profile; + struct { + u32 h264; + u32 mpeg4; + u32 hevc; + u32 vp9; + } level; + + u32 base_priority_id; + u32 ltr_count; + struct v4l2_ctrl_hdr10_cll_info cll; + struct v4l2_ctrl_hdr10_mastering_display mastering; +}; + +struct venus_buffer { + struct vb2_v4l2_buffer vb; + struct list_head list; + dma_addr_t dma_addr; + u32 size; + struct list_head reg_list; + u32 flags; + struct list_head ref_list; +}; + +struct clock_data { + u32 core_id; + unsigned long freq; + unsigned long vpp_freq; + unsigned long vsp_freq; + unsigned long low_power_freq; +}; + +#define to_venus_buffer(ptr) container_of(ptr, struct venus_buffer, vb) + +enum venus_dec_state { + VENUS_DEC_STATE_DEINIT = 0, + VENUS_DEC_STATE_INIT = 1, + VENUS_DEC_STATE_CAPTURE_SETUP = 2, + VENUS_DEC_STATE_STOPPED = 3, + VENUS_DEC_STATE_SEEK = 4, + VENUS_DEC_STATE_DRAIN = 5, + VENUS_DEC_STATE_DECODING = 6, + VENUS_DEC_STATE_DRC = 7, +}; + +enum venus_enc_state { + VENUS_ENC_STATE_DEINIT = 0, + VENUS_ENC_STATE_INIT = 1, + VENUS_ENC_STATE_ENCODING = 2, + VENUS_ENC_STATE_STOPPED = 3, + VENUS_ENC_STATE_DRAIN = 4, +}; + +struct venus_ts_metadata { + bool used; + u64 ts_ns; + u64 ts_us; + u32 flags; + struct v4l2_timecode tc; +}; + +enum venus_inst_modes { + VENUS_LOW_POWER = BIT(0), +}; + +/** + * struct venus_inst - holds per instance parameters + * + * @list: used for attach an instance to the core + * @lock: instance lock + * @core: a reference to the core struct + * @clk_data: clock data per core ID + * @dpbbufs: a list of decoded picture buffers + * @internalbufs: a list of internal bufferes + * @registeredbufs: a list of registered capture bufferes + * @delayed_process: a list of delayed buffers + * @delayed_process_work: a work_struct for process delayed buffers + * @nonblock: nonblocking flag + * @ctrl_handler: v4l control handler + * @controls: a union of decoder and encoder control parameters + * @fh: a holder of v4l file handle structure + * @streamon_cap: stream on flag for capture queue + * @streamon_out: stream on flag for output queue + * @width: current capture width + * @height: current capture height + * @crop: current crop rectangle + * @fw_min_cnt: firmware minimum buffer count + * @out_width: current output width + * @out_height: current output height + * @colorspace: current color space + * @ycbcr_enc: current YCbCr encoding + * @quantization: current quantization + * @xfer_func: current xfer function + * @codec_state: current decoder API state (see DEC_STATE_) + * @enc_state: current encoder API state (see ENC_STATE_) + * @reconf_wait: wait queue for resolution change event + * @subscriptions: used to hold current events subscriptions + * @buf_count: used to count number of buffers (reqbuf(0)) + * @tss: timestamp metadata + * @payloads: cache plane payload to use it for clock/BW scaling + * @fps: holds current FPS + * @timeperframe: holds current time per frame structure + * @fmt_out: a reference to output format structure + * @fmt_cap: a reference to capture format structure + * @num_input_bufs: holds number of input buffers + * @num_output_bufs: holds number of output buffers + * @input_buf_size: holds input buffer size + * @output_buf_size: holds output buffer size + * @output2_buf_size: holds secondary decoder output buffer size + * @dpb_buftype: decoded picture buffer type + * @dpb_fmt: decoded picture buffer raw format + * @opb_buftype: output picture buffer type + * @opb_fmt: output picture buffer raw format + * @reconfig: a flag raised by decoder when the stream resolution changed + * @hfi_codec: current codec for this instance in HFI space + * @sequence_cap: a sequence counter for capture queue + * @sequence_out: a sequence counter for output queue + * @m2m_dev: a reference to m2m device structure + * @m2m_ctx: a reference to m2m context structure + * @ctx_q_lock: a lock to serialize video device ioctl calls + * @state: current state of the instance + * @done: a completion for sync HFI operation + * @error: an error returned during last HFI sync operation + * @session_error: a flag rised by HFI interface in case of session error + * @ops: HFI operations + * @priv: a private for HFI operations callbacks + * @session_type: the type of the session (decoder or encoder) + * @hprop: a union used as a holder by get property + * @core_acquired: the Core has been acquired + * @bit_depth: current bitstream bit-depth + * @pic_struct: bitstream progressive vs interlaced + * @next_buf_last: a flag to mark next queued capture buffer as last + * @drain_active: Drain sequence is in progress + * @flags: bitmask flags describing current instance mode + * @dpb_ids: DPB buffer ID's + */ +struct venus_inst { + struct list_head list; + struct mutex lock; + struct venus_core *core; + struct clock_data clk_data; + struct list_head dpbbufs; + struct list_head internalbufs; + struct list_head registeredbufs; + struct list_head delayed_process; + struct work_struct delayed_process_work; + bool nonblock; + + struct v4l2_ctrl_handler ctrl_handler; + union { + struct vdec_controls dec; + struct venc_controls enc; + } controls; + struct v4l2_fh fh; + unsigned int streamon_cap, streamon_out; + u32 width; + u32 height; + struct v4l2_rect crop; + u32 fw_min_cnt; + u32 out_width; + u32 out_height; + u32 colorspace; + u8 ycbcr_enc; + u8 quantization; + u8 xfer_func; + enum venus_dec_state codec_state; + enum venus_enc_state enc_state; + wait_queue_head_t reconf_wait; + unsigned int subscriptions; + int buf_count; + struct venus_ts_metadata tss[VIDEO_MAX_FRAME]; + unsigned long payloads[VIDEO_MAX_FRAME]; + u64 fps; + struct v4l2_fract timeperframe; + const struct venus_format *fmt_out; + const struct venus_format *fmt_cap; + unsigned int num_input_bufs; + unsigned int num_output_bufs; + unsigned int input_buf_size; + unsigned int output_buf_size; + unsigned int output2_buf_size; + u32 dpb_buftype; + u32 dpb_fmt; + u32 opb_buftype; + u32 opb_fmt; + bool reconfig; + u32 hfi_codec; + u32 sequence_cap; + u32 sequence_out; + struct v4l2_m2m_dev *m2m_dev; + struct v4l2_m2m_ctx *m2m_ctx; + struct mutex ctx_q_lock; + unsigned int state; + struct completion done; + unsigned int error; + bool session_error; + const struct hfi_inst_ops *ops; + u32 session_type; + union hfi_get_property hprop; + unsigned int core_acquired: 1; + unsigned int bit_depth; + unsigned int pic_struct; + bool next_buf_last; + bool drain_active; + enum venus_inst_modes flags; + struct ida dpb_ids; +}; + +#define IS_V1(core) ((core)->res->hfi_version == HFI_VERSION_1XX) +#define IS_V3(core) ((core)->res->hfi_version == HFI_VERSION_3XX) +#define IS_V4(core) ((core)->res->hfi_version == HFI_VERSION_4XX) +#define IS_V6(core) ((core)->res->hfi_version == HFI_VERSION_6XX) + +#define IS_AR50(core) ((core)->res->vpu_version == VPU_VERSION_AR50) +#define IS_AR50_LITE(core) ((core)->res->vpu_version == VPU_VERSION_AR50_LITE) +#define IS_IRIS1(core) ((core)->res->vpu_version == VPU_VERSION_IRIS1) +#define IS_IRIS2(core) ((core)->res->vpu_version == VPU_VERSION_IRIS2) +#define IS_IRIS2_1(core) ((core)->res->vpu_version == VPU_VERSION_IRIS2_1) + +#define ctrl_to_inst(ctrl) \ + container_of((ctrl)->handler, struct venus_inst, ctrl_handler) + +static inline struct venus_inst *to_inst(struct file *filp) +{ + return container_of(filp->private_data, struct venus_inst, fh); +} + +static inline void *to_hfi_priv(struct venus_core *core) +{ + return core->priv; +} + +static inline struct hfi_plat_caps * +venus_caps_by_codec(struct venus_core *core, u32 codec, u32 domain) +{ + unsigned int c; + + for (c = 0; c < core->codecs_count; c++) { + if (core->caps[c].codec == codec && + core->caps[c].domain == domain) + return &core->caps[c]; + } + + return NULL; +} + +static inline bool +is_fw_rev_or_newer(struct venus_core *core, u32 vmajor, u32 vminor, u32 vrev) +{ + return ((core)->venus_ver.major == vmajor && + (core)->venus_ver.minor == vminor && + (core)->venus_ver.rev >= vrev); +} + +static inline bool +is_fw_rev_or_older(struct venus_core *core, u32 vmajor, u32 vminor, u32 vrev) +{ + return ((core)->venus_ver.major == vmajor && + (core)->venus_ver.minor == vminor && + (core)->venus_ver.rev <= vrev); +} +#endif diff --git a/drivers/media/platform/qcom/venus/dbgfs.c b/drivers/media/platform/qcom/venus/dbgfs.c new file mode 100644 index 0000000000..726f4b730e --- /dev/null +++ b/drivers/media/platform/qcom/venus/dbgfs.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Linaro Ltd. + */ + +#include <linux/debugfs.h> +#include <linux/fault-inject.h> + +#include "core.h" + +#ifdef CONFIG_FAULT_INJECTION +DECLARE_FAULT_ATTR(venus_ssr_attr); +#endif + +void venus_dbgfs_init(struct venus_core *core) +{ + core->root = debugfs_create_dir("venus", NULL); + debugfs_create_x32("fw_level", 0644, core->root, &venus_fw_debug); + +#ifdef CONFIG_FAULT_INJECTION + fault_create_debugfs_attr("fail_ssr", core->root, &venus_ssr_attr); +#endif +} + +void venus_dbgfs_deinit(struct venus_core *core) +{ + debugfs_remove_recursive(core->root); +} diff --git a/drivers/media/platform/qcom/venus/dbgfs.h b/drivers/media/platform/qcom/venus/dbgfs.h new file mode 100644 index 0000000000..c87c1355d0 --- /dev/null +++ b/drivers/media/platform/qcom/venus/dbgfs.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2020 Linaro Ltd. */ + +#ifndef __VENUS_DBGFS_H__ +#define __VENUS_DBGFS_H__ + +#include <linux/fault-inject.h> + +struct venus_core; + +#ifdef CONFIG_FAULT_INJECTION +extern struct fault_attr venus_ssr_attr; +static inline bool venus_fault_inject_ssr(void) +{ + return should_fail(&venus_ssr_attr, 1); +} +#else +static inline bool venus_fault_inject_ssr(void) { return false; } +#endif + + +void venus_dbgfs_init(struct venus_core *core); +void venus_dbgfs_deinit(struct venus_core *core); + +#endif diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c new file mode 100644 index 0000000000..fe7da2b304 --- /dev/null +++ b/drivers/media/platform/qcom/venus/firmware.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017 Linaro Ltd. + */ + +#include <linux/device.h> +#include <linux/firmware.h> +#include <linux/kernel.h> +#include <linux/iommu.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_reserved_mem.h> +#include <linux/platform_device.h> +#include <linux/of_device.h> +#include <linux/firmware/qcom/qcom_scm.h> +#include <linux/sizes.h> +#include <linux/soc/qcom/mdt_loader.h> + +#include "core.h" +#include "firmware.h" +#include "hfi_venus_io.h" + +#define VENUS_PAS_ID 9 +#define VENUS_FW_MEM_SIZE (6 * SZ_1M) +#define VENUS_FW_START_ADDR 0x0 + +static void venus_reset_cpu(struct venus_core *core) +{ + u32 fw_size = core->fw.mapped_mem_size; + void __iomem *wrapper_base; + + if (IS_IRIS2_1(core)) + wrapper_base = core->wrapper_tz_base; + else + wrapper_base = core->wrapper_base; + + writel(0, wrapper_base + WRAPPER_FW_START_ADDR); + writel(fw_size, wrapper_base + WRAPPER_FW_END_ADDR); + writel(0, wrapper_base + WRAPPER_CPA_START_ADDR); + writel(fw_size, wrapper_base + WRAPPER_CPA_END_ADDR); + writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR); + writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR); + + if (IS_IRIS2_1(core)) { + /* Bring XTSS out of reset */ + writel(0, wrapper_base + WRAPPER_TZ_XTSS_SW_RESET); + } else { + writel(0x0, wrapper_base + WRAPPER_CPU_CGC_DIS); + writel(0x0, wrapper_base + WRAPPER_CPU_CLOCK_CONFIG); + + /* Bring ARM9 out of reset */ + writel(0, wrapper_base + WRAPPER_A9SS_SW_RESET); + } +} + +int venus_set_hw_state(struct venus_core *core, bool resume) +{ + int ret; + + if (core->use_tz) { + ret = qcom_scm_set_remote_state(resume, 0); + if (resume && ret == -EINVAL) + ret = 0; + return ret; + } + + if (resume) { + venus_reset_cpu(core); + } else { + if (IS_IRIS2_1(core)) + writel(WRAPPER_XTSS_SW_RESET_BIT, + core->wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET); + else + writel(WRAPPER_A9SS_SW_RESET_BIT, + core->wrapper_base + WRAPPER_A9SS_SW_RESET); + } + + return 0; +} + +static int venus_load_fw(struct venus_core *core, const char *fwname, + phys_addr_t *mem_phys, size_t *mem_size) +{ + const struct firmware *mdt; + struct reserved_mem *rmem; + struct device_node *node; + struct device *dev; + ssize_t fw_size; + void *mem_va; + int ret; + + *mem_phys = 0; + *mem_size = 0; + + dev = core->dev; + node = of_parse_phandle(dev->of_node, "memory-region", 0); + if (!node) { + dev_err(dev, "no memory-region specified\n"); + return -EINVAL; + } + + rmem = of_reserved_mem_lookup(node); + of_node_put(node); + if (!rmem) { + dev_err(dev, "failed to lookup reserved memory-region\n"); + return -EINVAL; + } + + ret = request_firmware(&mdt, fwname, dev); + if (ret < 0) + return ret; + + fw_size = qcom_mdt_get_size(mdt); + if (fw_size < 0) { + ret = fw_size; + goto err_release_fw; + } + + *mem_phys = rmem->base; + *mem_size = rmem->size; + + if (*mem_size < fw_size || fw_size > VENUS_FW_MEM_SIZE) { + ret = -EINVAL; + goto err_release_fw; + } + + mem_va = memremap(*mem_phys, *mem_size, MEMREMAP_WC); + if (!mem_va) { + dev_err(dev, "unable to map memory region %pa size %#zx\n", mem_phys, *mem_size); + ret = -ENOMEM; + goto err_release_fw; + } + + if (core->use_tz) + ret = qcom_mdt_load(dev, mdt, fwname, VENUS_PAS_ID, + mem_va, *mem_phys, *mem_size, NULL); + else + ret = qcom_mdt_load_no_init(dev, mdt, fwname, VENUS_PAS_ID, + mem_va, *mem_phys, *mem_size, NULL); + + memunmap(mem_va); +err_release_fw: + release_firmware(mdt); + return ret; +} + +static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys, + size_t mem_size) +{ + struct iommu_domain *iommu; + struct device *dev; + int ret; + + dev = core->fw.dev; + if (!dev) + return -EPROBE_DEFER; + + iommu = core->fw.iommu_domain; + core->fw.mapped_mem_size = mem_size; + + ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size, + IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV, GFP_KERNEL); + if (ret) { + dev_err(dev, "could not map video firmware region\n"); + return ret; + } + + venus_reset_cpu(core); + + return 0; +} + +static int venus_shutdown_no_tz(struct venus_core *core) +{ + const size_t mapped = core->fw.mapped_mem_size; + struct iommu_domain *iommu; + size_t unmapped; + u32 reg; + struct device *dev = core->fw.dev; + void __iomem *wrapper_base = core->wrapper_base; + void __iomem *wrapper_tz_base = core->wrapper_tz_base; + + if (IS_IRIS2_1(core)) { + /* Assert the reset to XTSS */ + reg = readl(wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET); + reg |= WRAPPER_XTSS_SW_RESET_BIT; + writel(reg, wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET); + } else { + /* Assert the reset to ARM9 */ + reg = readl(wrapper_base + WRAPPER_A9SS_SW_RESET); + reg |= WRAPPER_A9SS_SW_RESET_BIT; + writel(reg, wrapper_base + WRAPPER_A9SS_SW_RESET); + } + + iommu = core->fw.iommu_domain; + + if (core->fw.mapped_mem_size && iommu) { + unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, mapped); + + if (unmapped != mapped) + dev_err(dev, "failed to unmap firmware\n"); + else + core->fw.mapped_mem_size = 0; + } + + return 0; +} + +int venus_boot(struct venus_core *core) +{ + struct device *dev = core->dev; + const struct venus_resources *res = core->res; + const char *fwpath = NULL; + phys_addr_t mem_phys; + size_t mem_size; + int ret; + + if (!IS_ENABLED(CONFIG_QCOM_MDT_LOADER) || + (core->use_tz && !qcom_scm_is_available())) + return -EPROBE_DEFER; + + ret = of_property_read_string_index(dev->of_node, "firmware-name", 0, + &fwpath); + if (ret) + fwpath = core->res->fwname; + + ret = venus_load_fw(core, fwpath, &mem_phys, &mem_size); + if (ret) { + dev_err(dev, "fail to load video firmware\n"); + return -EINVAL; + } + + core->fw.mem_size = mem_size; + core->fw.mem_phys = mem_phys; + + if (core->use_tz) + ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID); + else + ret = venus_boot_no_tz(core, mem_phys, mem_size); + + if (ret) + return ret; + + if (core->use_tz && res->cp_size) { + /* + * Clues for porting using downstream data: + * cp_start = 0 + * cp_size = venus_ns/virtual-addr-pool[0] - yes, address and not size! + * This works, as the non-secure context bank is placed + * contiguously right after the Content Protection region. + * + * cp_nonpixel_start = venus_sec_non_pixel/virtual-addr-pool[0] + * cp_nonpixel_size = venus_sec_non_pixel/virtual-addr-pool[1] + */ + ret = qcom_scm_mem_protect_video_var(res->cp_start, + res->cp_size, + res->cp_nonpixel_start, + res->cp_nonpixel_size); + if (ret) { + qcom_scm_pas_shutdown(VENUS_PAS_ID); + dev_err(dev, "set virtual address ranges fail (%d)\n", + ret); + return ret; + } + } + + return 0; +} + +int venus_shutdown(struct venus_core *core) +{ + int ret; + + if (core->use_tz) + ret = qcom_scm_pas_shutdown(VENUS_PAS_ID); + else + ret = venus_shutdown_no_tz(core); + + return ret; +} + +int venus_firmware_init(struct venus_core *core) +{ + struct platform_device_info info; + struct iommu_domain *iommu_dom; + struct platform_device *pdev; + struct device_node *np; + int ret; + + np = of_get_child_by_name(core->dev->of_node, "video-firmware"); + if (!np) { + core->use_tz = true; + return 0; + } + + memset(&info, 0, sizeof(info)); + info.fwnode = &np->fwnode; + info.parent = core->dev; + info.name = np->name; + info.dma_mask = DMA_BIT_MASK(32); + + pdev = platform_device_register_full(&info); + if (IS_ERR(pdev)) { + of_node_put(np); + return PTR_ERR(pdev); + } + + pdev->dev.of_node = np; + + ret = of_dma_configure(&pdev->dev, np, true); + if (ret) { + dev_err(core->dev, "dma configure fail\n"); + goto err_unregister; + } + + core->fw.dev = &pdev->dev; + + iommu_dom = iommu_domain_alloc(&platform_bus_type); + if (!iommu_dom) { + dev_err(core->fw.dev, "Failed to allocate iommu domain\n"); + ret = -ENOMEM; + goto err_unregister; + } + + ret = iommu_attach_device(iommu_dom, core->fw.dev); + if (ret) { + dev_err(core->fw.dev, "could not attach device\n"); + goto err_iommu_free; + } + + core->fw.iommu_domain = iommu_dom; + + of_node_put(np); + + return 0; + +err_iommu_free: + iommu_domain_free(iommu_dom); +err_unregister: + platform_device_unregister(pdev); + of_node_put(np); + return ret; +} + +void venus_firmware_deinit(struct venus_core *core) +{ + struct iommu_domain *iommu; + + if (!core->fw.dev) + return; + + iommu = core->fw.iommu_domain; + + iommu_detach_device(iommu, core->fw.dev); + + if (core->fw.iommu_domain) { + iommu_domain_free(iommu); + core->fw.iommu_domain = NULL; + } + + platform_device_unregister(to_platform_device(core->fw.dev)); +} diff --git a/drivers/media/platform/qcom/venus/firmware.h b/drivers/media/platform/qcom/venus/firmware.h new file mode 100644 index 0000000000..aaccd847fa --- /dev/null +++ b/drivers/media/platform/qcom/venus/firmware.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Linaro Ltd. + */ +#ifndef __VENUS_FIRMWARE_H__ +#define __VENUS_FIRMWARE_H__ + +struct device; + +int venus_firmware_init(struct venus_core *core); +void venus_firmware_deinit(struct venus_core *core); +int venus_boot(struct venus_core *core); +int venus_shutdown(struct venus_core *core); +int venus_set_hw_state(struct venus_core *core, bool suspend); + +static inline int venus_set_hw_state_suspend(struct venus_core *core) +{ + return venus_set_hw_state(core, false); +} + +static inline int venus_set_hw_state_resume(struct venus_core *core) +{ + return venus_set_hw_state(core, true); +} + +#endif diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c new file mode 100644 index 0000000000..8295542e1a --- /dev/null +++ b/drivers/media/platform/qcom/venus/helpers.c @@ -0,0 +1,1856 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#include <linux/idr.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/kernel.h> +#include <media/videobuf2-dma-contig.h> +#include <media/v4l2-mem2mem.h> +#include <asm/div64.h> + +#include "core.h" +#include "helpers.h" +#include "hfi_helper.h" +#include "pm_helpers.h" +#include "hfi_platform.h" +#include "hfi_parser.h" + +#define NUM_MBS_720P (((ALIGN(1280, 16)) >> 4) * ((ALIGN(736, 16)) >> 4)) +#define NUM_MBS_4K (((ALIGN(4096, 16)) >> 4) * ((ALIGN(2304, 16)) >> 4)) + +enum dpb_buf_owner { + DRIVER, + FIRMWARE, +}; + +struct intbuf { + struct list_head list; + u32 type; + size_t size; + void *va; + dma_addr_t da; + unsigned long attrs; + enum dpb_buf_owner owned_by; + u32 dpb_out_tag; +}; + +bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt) +{ + struct venus_core *core = inst->core; + u32 session_type = inst->session_type; + u32 codec; + + switch (v4l2_pixfmt) { + case V4L2_PIX_FMT_H264: + codec = HFI_VIDEO_CODEC_H264; + break; + case V4L2_PIX_FMT_H263: + codec = HFI_VIDEO_CODEC_H263; + break; + case V4L2_PIX_FMT_MPEG1: + codec = HFI_VIDEO_CODEC_MPEG1; + break; + case V4L2_PIX_FMT_MPEG2: + codec = HFI_VIDEO_CODEC_MPEG2; + break; + case V4L2_PIX_FMT_MPEG4: + codec = HFI_VIDEO_CODEC_MPEG4; + break; + case V4L2_PIX_FMT_VC1_ANNEX_G: + case V4L2_PIX_FMT_VC1_ANNEX_L: + codec = HFI_VIDEO_CODEC_VC1; + break; + case V4L2_PIX_FMT_VP8: + codec = HFI_VIDEO_CODEC_VP8; + break; + case V4L2_PIX_FMT_VP9: + codec = HFI_VIDEO_CODEC_VP9; + break; + case V4L2_PIX_FMT_XVID: + codec = HFI_VIDEO_CODEC_DIVX; + break; + case V4L2_PIX_FMT_HEVC: + codec = HFI_VIDEO_CODEC_HEVC; + break; + default: + return false; + } + + if (session_type == VIDC_SESSION_TYPE_ENC && core->enc_codecs & codec) + return true; + + if (session_type == VIDC_SESSION_TYPE_DEC && core->dec_codecs & codec) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(venus_helper_check_codec); + +static void free_dpb_buf(struct venus_inst *inst, struct intbuf *buf) +{ + ida_free(&inst->dpb_ids, buf->dpb_out_tag); + + list_del_init(&buf->list); + dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da, + buf->attrs); + kfree(buf); +} + +int venus_helper_queue_dpb_bufs(struct venus_inst *inst) +{ + struct intbuf *buf, *next; + unsigned int dpb_size = 0; + int ret = 0; + + if (inst->dpb_buftype == HFI_BUFFER_OUTPUT) + dpb_size = inst->output_buf_size; + else if (inst->dpb_buftype == HFI_BUFFER_OUTPUT2) + dpb_size = inst->output2_buf_size; + + list_for_each_entry_safe(buf, next, &inst->dpbbufs, list) { + struct hfi_frame_data fdata; + + memset(&fdata, 0, sizeof(fdata)); + fdata.alloc_len = buf->size; + fdata.device_addr = buf->da; + fdata.buffer_type = buf->type; + + if (buf->owned_by == FIRMWARE) + continue; + + /* free buffer from previous sequence which was released later */ + if (dpb_size > buf->size) { + free_dpb_buf(inst, buf); + continue; + } + + fdata.clnt_data = buf->dpb_out_tag; + + ret = hfi_session_process_buf(inst, &fdata); + if (ret) + goto fail; + + buf->owned_by = FIRMWARE; + } + +fail: + return ret; +} +EXPORT_SYMBOL_GPL(venus_helper_queue_dpb_bufs); + +int venus_helper_free_dpb_bufs(struct venus_inst *inst) +{ + struct intbuf *buf, *n; + + list_for_each_entry_safe(buf, n, &inst->dpbbufs, list) { + if (buf->owned_by == FIRMWARE) + continue; + free_dpb_buf(inst, buf); + } + + if (list_empty(&inst->dpbbufs)) + INIT_LIST_HEAD(&inst->dpbbufs); + + return 0; +} +EXPORT_SYMBOL_GPL(venus_helper_free_dpb_bufs); + +int venus_helper_alloc_dpb_bufs(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + struct device *dev = core->dev; + enum hfi_version ver = core->res->hfi_version; + struct hfi_buffer_requirements bufreq; + u32 buftype = inst->dpb_buftype; + unsigned int dpb_size = 0; + struct intbuf *buf; + unsigned int i; + u32 count; + int ret; + int id; + + /* no need to allocate dpb buffers */ + if (!inst->dpb_fmt) + return 0; + + if (inst->dpb_buftype == HFI_BUFFER_OUTPUT) + dpb_size = inst->output_buf_size; + else if (inst->dpb_buftype == HFI_BUFFER_OUTPUT2) + dpb_size = inst->output2_buf_size; + + if (!dpb_size) + return 0; + + ret = venus_helper_get_bufreq(inst, buftype, &bufreq); + if (ret) + return ret; + + count = hfi_bufreq_get_count_min(&bufreq, ver); + + for (i = 0; i < count; i++) { + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto fail; + } + + buf->type = buftype; + buf->size = dpb_size; + buf->attrs = DMA_ATTR_WRITE_COMBINE | + DMA_ATTR_NO_KERNEL_MAPPING; + buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL, + buf->attrs); + if (!buf->va) { + ret = -ENOMEM; + goto fail; + } + buf->owned_by = DRIVER; + + id = ida_alloc_min(&inst->dpb_ids, VB2_MAX_FRAME, GFP_KERNEL); + if (id < 0) { + ret = id; + goto fail; + } + + buf->dpb_out_tag = id; + + list_add_tail(&buf->list, &inst->dpbbufs); + } + + return 0; + +fail: + kfree(buf); + venus_helper_free_dpb_bufs(inst); + return ret; +} +EXPORT_SYMBOL_GPL(venus_helper_alloc_dpb_bufs); + +static int intbufs_set_buffer(struct venus_inst *inst, u32 type) +{ + struct venus_core *core = inst->core; + struct device *dev = core->dev; + struct hfi_buffer_requirements bufreq; + struct hfi_buffer_desc bd; + struct intbuf *buf; + unsigned int i; + int ret; + + ret = venus_helper_get_bufreq(inst, type, &bufreq); + if (ret) + return 0; + + if (!bufreq.size) + return 0; + + for (i = 0; i < bufreq.count_actual; i++) { + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto fail; + } + + buf->type = bufreq.type; + buf->size = bufreq.size; + buf->attrs = DMA_ATTR_WRITE_COMBINE | + DMA_ATTR_NO_KERNEL_MAPPING; + buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL, + buf->attrs); + if (!buf->va) { + ret = -ENOMEM; + goto fail; + } + + memset(&bd, 0, sizeof(bd)); + bd.buffer_size = buf->size; + bd.buffer_type = buf->type; + bd.num_buffers = 1; + bd.device_addr = buf->da; + + ret = hfi_session_set_buffers(inst, &bd); + if (ret) { + dev_err(dev, "set session buffers failed\n"); + goto dma_free; + } + + list_add_tail(&buf->list, &inst->internalbufs); + } + + return 0; + +dma_free: + dma_free_attrs(dev, buf->size, buf->va, buf->da, buf->attrs); +fail: + kfree(buf); + return ret; +} + +static int intbufs_unset_buffers(struct venus_inst *inst) +{ + struct hfi_buffer_desc bd = {0}; + struct intbuf *buf, *n; + int ret = 0; + + list_for_each_entry_safe(buf, n, &inst->internalbufs, list) { + bd.buffer_size = buf->size; + bd.buffer_type = buf->type; + bd.num_buffers = 1; + bd.device_addr = buf->da; + bd.response_required = true; + + ret = hfi_session_unset_buffers(inst, &bd); + + list_del_init(&buf->list); + dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da, + buf->attrs); + kfree(buf); + } + + return ret; +} + +static const unsigned int intbuf_types_1xx[] = { + HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_1XX), + HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_1XX), + HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_1XX), + HFI_BUFFER_INTERNAL_PERSIST, + HFI_BUFFER_INTERNAL_PERSIST_1, +}; + +static const unsigned int intbuf_types_4xx[] = { + HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_4XX), + HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_4XX), + HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_4XX), + HFI_BUFFER_INTERNAL_PERSIST, + HFI_BUFFER_INTERNAL_PERSIST_1, +}; + +static const unsigned int intbuf_types_6xx[] = { + HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_6XX), + HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_6XX), + HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_6XX), + HFI_BUFFER_INTERNAL_PERSIST, + HFI_BUFFER_INTERNAL_PERSIST_1, +}; + +int venus_helper_intbufs_alloc(struct venus_inst *inst) +{ + const unsigned int *intbuf; + size_t arr_sz, i; + int ret; + + if (IS_V6(inst->core)) { + arr_sz = ARRAY_SIZE(intbuf_types_6xx); + intbuf = intbuf_types_6xx; + } else if (IS_V4(inst->core)) { + arr_sz = ARRAY_SIZE(intbuf_types_4xx); + intbuf = intbuf_types_4xx; + } else { + arr_sz = ARRAY_SIZE(intbuf_types_1xx); + intbuf = intbuf_types_1xx; + } + + for (i = 0; i < arr_sz; i++) { + ret = intbufs_set_buffer(inst, intbuf[i]); + if (ret) + goto error; + } + + return 0; + +error: + intbufs_unset_buffers(inst); + return ret; +} +EXPORT_SYMBOL_GPL(venus_helper_intbufs_alloc); + +int venus_helper_intbufs_free(struct venus_inst *inst) +{ + return intbufs_unset_buffers(inst); +} +EXPORT_SYMBOL_GPL(venus_helper_intbufs_free); + +int venus_helper_intbufs_realloc(struct venus_inst *inst) +{ + enum hfi_version ver = inst->core->res->hfi_version; + struct hfi_buffer_desc bd; + struct intbuf *buf, *n; + int ret; + + list_for_each_entry_safe(buf, n, &inst->internalbufs, list) { + if (buf->type == HFI_BUFFER_INTERNAL_PERSIST || + buf->type == HFI_BUFFER_INTERNAL_PERSIST_1) + continue; + + memset(&bd, 0, sizeof(bd)); + bd.buffer_size = buf->size; + bd.buffer_type = buf->type; + bd.num_buffers = 1; + bd.device_addr = buf->da; + bd.response_required = true; + + ret = hfi_session_unset_buffers(inst, &bd); + + dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da, + buf->attrs); + + list_del_init(&buf->list); + kfree(buf); + } + + ret = intbufs_set_buffer(inst, HFI_BUFFER_INTERNAL_SCRATCH(ver)); + if (ret) + goto err; + + ret = intbufs_set_buffer(inst, HFI_BUFFER_INTERNAL_SCRATCH_1(ver)); + if (ret) + goto err; + + ret = intbufs_set_buffer(inst, HFI_BUFFER_INTERNAL_SCRATCH_2(ver)); + if (ret) + goto err; + + return 0; +err: + return ret; +} +EXPORT_SYMBOL_GPL(venus_helper_intbufs_realloc); + +static void fill_buffer_desc(const struct venus_buffer *buf, + struct hfi_buffer_desc *bd, bool response) +{ + memset(bd, 0, sizeof(*bd)); + bd->buffer_type = HFI_BUFFER_OUTPUT; + bd->buffer_size = buf->size; + bd->num_buffers = 1; + bd->device_addr = buf->dma_addr; + bd->response_required = response; +} + +static void return_buf_error(struct venus_inst *inst, + struct vb2_v4l2_buffer *vbuf) +{ + struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; + + if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf); + else + v4l2_m2m_dst_buf_remove_by_buf(m2m_ctx, vbuf); + + v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); +} + +static void +put_ts_metadata(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf) +{ + struct vb2_buffer *vb = &vbuf->vb2_buf; + unsigned int i; + int slot = -1; + u64 ts_us = vb->timestamp; + + for (i = 0; i < ARRAY_SIZE(inst->tss); i++) { + if (!inst->tss[i].used) { + slot = i; + break; + } + } + + if (slot == -1) { + dev_dbg(inst->core->dev, VDBGL "no free slot\n"); + return; + } + + do_div(ts_us, NSEC_PER_USEC); + + inst->tss[slot].used = true; + inst->tss[slot].flags = vbuf->flags; + inst->tss[slot].tc = vbuf->timecode; + inst->tss[slot].ts_us = ts_us; + inst->tss[slot].ts_ns = vb->timestamp; +} + +void venus_helper_get_ts_metadata(struct venus_inst *inst, u64 timestamp_us, + struct vb2_v4l2_buffer *vbuf) +{ + struct vb2_buffer *vb = &vbuf->vb2_buf; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(inst->tss); ++i) { + if (!inst->tss[i].used) + continue; + + if (inst->tss[i].ts_us != timestamp_us) + continue; + + inst->tss[i].used = false; + vbuf->flags |= inst->tss[i].flags; + vbuf->timecode = inst->tss[i].tc; + vb->timestamp = inst->tss[i].ts_ns; + break; + } +} +EXPORT_SYMBOL_GPL(venus_helper_get_ts_metadata); + +static int +session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf) +{ + struct venus_buffer *buf = to_venus_buffer(vbuf); + struct vb2_buffer *vb = &vbuf->vb2_buf; + unsigned int type = vb->type; + struct hfi_frame_data fdata; + + memset(&fdata, 0, sizeof(fdata)); + fdata.alloc_len = buf->size; + fdata.device_addr = buf->dma_addr; + fdata.timestamp = vb->timestamp; + do_div(fdata.timestamp, NSEC_PER_USEC); + fdata.flags = 0; + fdata.clnt_data = vbuf->vb2_buf.index; + + if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + fdata.buffer_type = HFI_BUFFER_INPUT; + fdata.filled_len = vb2_get_plane_payload(vb, 0); + fdata.offset = vb->planes[0].data_offset; + + if (vbuf->flags & V4L2_BUF_FLAG_LAST || !fdata.filled_len) + fdata.flags |= HFI_BUFFERFLAG_EOS; + + if (inst->session_type == VIDC_SESSION_TYPE_DEC) + put_ts_metadata(inst, vbuf); + + venus_pm_load_scale(inst); + } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + if (inst->session_type == VIDC_SESSION_TYPE_ENC) + fdata.buffer_type = HFI_BUFFER_OUTPUT; + else + fdata.buffer_type = inst->opb_buftype; + fdata.filled_len = 0; + fdata.offset = 0; + } + + return hfi_session_process_buf(inst, &fdata); +} + +static bool is_dynamic_bufmode(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + struct hfi_plat_caps *caps; + + /* + * v4 doesn't send BUFFER_ALLOC_MODE_SUPPORTED property and supports + * dynamic buffer mode by default for HFI_BUFFER_OUTPUT/OUTPUT2. + */ + if (IS_V4(core) || IS_V6(core)) + return true; + + caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type); + if (!caps) + return false; + + return caps->cap_bufs_mode_dynamic; +} + +int venus_helper_unregister_bufs(struct venus_inst *inst) +{ + struct venus_buffer *buf, *n; + struct hfi_buffer_desc bd; + int ret = 0; + + if (is_dynamic_bufmode(inst)) + return 0; + + list_for_each_entry_safe(buf, n, &inst->registeredbufs, reg_list) { + fill_buffer_desc(buf, &bd, true); + ret = hfi_session_unset_buffers(inst, &bd); + list_del_init(&buf->reg_list); + } + + return ret; +} +EXPORT_SYMBOL_GPL(venus_helper_unregister_bufs); + +static int session_register_bufs(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + struct device *dev = core->dev; + struct hfi_buffer_desc bd; + struct venus_buffer *buf; + int ret = 0; + + if (is_dynamic_bufmode(inst)) + return 0; + + list_for_each_entry(buf, &inst->registeredbufs, reg_list) { + fill_buffer_desc(buf, &bd, false); + ret = hfi_session_set_buffers(inst, &bd); + if (ret) { + dev_err(dev, "%s: set buffer failed\n", __func__); + break; + } + } + + return ret; +} + +static u32 to_hfi_raw_fmt(u32 v4l2_fmt) +{ + switch (v4l2_fmt) { + case V4L2_PIX_FMT_NV12: + return HFI_COLOR_FORMAT_NV12; + case V4L2_PIX_FMT_NV21: + return HFI_COLOR_FORMAT_NV21; + case V4L2_PIX_FMT_QC08C: + return HFI_COLOR_FORMAT_NV12_UBWC; + case V4L2_PIX_FMT_QC10C: + return HFI_COLOR_FORMAT_YUV420_TP10_UBWC; + case V4L2_PIX_FMT_P010: + return HFI_COLOR_FORMAT_P010; + default: + break; + } + + return 0; +} + +static int platform_get_bufreq(struct venus_inst *inst, u32 buftype, + struct hfi_buffer_requirements *req) +{ + enum hfi_version version = inst->core->res->hfi_version; + const struct hfi_platform *hfi_plat; + struct hfi_plat_buffers_params params; + bool is_dec = inst->session_type == VIDC_SESSION_TYPE_DEC; + struct venc_controls *enc_ctr = &inst->controls.enc; + + hfi_plat = hfi_platform_get(version); + + if (!hfi_plat || !hfi_plat->bufreq) + return -EINVAL; + + params.version = version; + params.num_vpp_pipes = inst->core->res->num_vpp_pipes; + + if (is_dec) { + params.width = inst->width; + params.height = inst->height; + params.out_width = inst->out_width; + params.out_height = inst->out_height; + params.codec = inst->fmt_out->pixfmt; + params.hfi_color_fmt = to_hfi_raw_fmt(inst->fmt_cap->pixfmt); + params.dec.max_mbs_per_frame = mbs_per_frame_max(inst); + params.dec.buffer_size_limit = 0; + params.dec.is_secondary_output = + inst->opb_buftype == HFI_BUFFER_OUTPUT2; + if (params.dec.is_secondary_output) + params.hfi_dpb_color_fmt = inst->dpb_fmt; + params.dec.is_interlaced = + inst->pic_struct != HFI_INTERLACE_FRAME_PROGRESSIVE; + } else { + params.width = inst->out_width; + params.height = inst->out_height; + params.codec = inst->fmt_cap->pixfmt; + params.hfi_color_fmt = to_hfi_raw_fmt(inst->fmt_out->pixfmt); + params.enc.work_mode = VIDC_WORK_MODE_2; + params.enc.rc_type = HFI_RATE_CONTROL_OFF; + if (enc_ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ) + params.enc.rc_type = HFI_RATE_CONTROL_CQ; + params.enc.num_b_frames = enc_ctr->num_b_frames; + params.enc.is_tenbit = inst->bit_depth == VIDC_BITDEPTH_10; + } + + return hfi_plat->bufreq(¶ms, inst->session_type, buftype, req); +} + +int venus_helper_get_bufreq(struct venus_inst *inst, u32 type, + struct hfi_buffer_requirements *req) +{ + u32 ptype = HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS; + enum hfi_version ver = inst->core->res->hfi_version; + union hfi_get_property hprop; + unsigned int i; + int ret; + + memset(req, 0, sizeof(*req)); + + if (type == HFI_BUFFER_OUTPUT || type == HFI_BUFFER_OUTPUT2) + hfi_bufreq_set_count_min(req, ver, inst->fw_min_cnt); + + ret = platform_get_bufreq(inst, type, req); + if (!ret) { + if (type == HFI_BUFFER_OUTPUT || type == HFI_BUFFER_OUTPUT2) + inst->fw_min_cnt = hfi_bufreq_get_count_min(req, ver); + return 0; + } + + ret = hfi_session_get_property(inst, ptype, &hprop); + if (ret) + return ret; + + ret = -EINVAL; + + for (i = 0; i < HFI_BUFFER_TYPE_MAX; i++) { + if (hprop.bufreq[i].type != type) + continue; + + memcpy(req, &hprop.bufreq[i], sizeof(*req)); + ret = 0; + break; + } + + return ret; +} +EXPORT_SYMBOL_GPL(venus_helper_get_bufreq); + +struct id_mapping { + u32 hfi_id; + u32 v4l2_id; +}; + +static const struct id_mapping mpeg4_profiles[] = { + { HFI_MPEG4_PROFILE_SIMPLE, V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE }, + { HFI_MPEG4_PROFILE_ADVANCEDSIMPLE, V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE }, +}; + +static const struct id_mapping mpeg4_levels[] = { + { HFI_MPEG4_LEVEL_0, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0 }, + { HFI_MPEG4_LEVEL_0b, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B }, + { HFI_MPEG4_LEVEL_1, V4L2_MPEG_VIDEO_MPEG4_LEVEL_1 }, + { HFI_MPEG4_LEVEL_2, V4L2_MPEG_VIDEO_MPEG4_LEVEL_2 }, + { HFI_MPEG4_LEVEL_3, V4L2_MPEG_VIDEO_MPEG4_LEVEL_3 }, + { HFI_MPEG4_LEVEL_4, V4L2_MPEG_VIDEO_MPEG4_LEVEL_4 }, + { HFI_MPEG4_LEVEL_5, V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 }, +}; + +static const struct id_mapping mpeg2_profiles[] = { + { HFI_MPEG2_PROFILE_SIMPLE, V4L2_MPEG_VIDEO_MPEG2_PROFILE_SIMPLE }, + { HFI_MPEG2_PROFILE_MAIN, V4L2_MPEG_VIDEO_MPEG2_PROFILE_MAIN }, + { HFI_MPEG2_PROFILE_SNR, V4L2_MPEG_VIDEO_MPEG2_PROFILE_SNR_SCALABLE }, + { HFI_MPEG2_PROFILE_SPATIAL, V4L2_MPEG_VIDEO_MPEG2_PROFILE_SPATIALLY_SCALABLE }, + { HFI_MPEG2_PROFILE_HIGH, V4L2_MPEG_VIDEO_MPEG2_PROFILE_HIGH }, +}; + +static const struct id_mapping mpeg2_levels[] = { + { HFI_MPEG2_LEVEL_LL, V4L2_MPEG_VIDEO_MPEG2_LEVEL_LOW }, + { HFI_MPEG2_LEVEL_ML, V4L2_MPEG_VIDEO_MPEG2_LEVEL_MAIN }, + { HFI_MPEG2_LEVEL_H14, V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH_1440 }, + { HFI_MPEG2_LEVEL_HL, V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH }, +}; + +static const struct id_mapping h264_profiles[] = { + { HFI_H264_PROFILE_BASELINE, V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE }, + { HFI_H264_PROFILE_MAIN, V4L2_MPEG_VIDEO_H264_PROFILE_MAIN }, + { HFI_H264_PROFILE_HIGH, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH }, + { HFI_H264_PROFILE_STEREO_HIGH, V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH }, + { HFI_H264_PROFILE_MULTIVIEW_HIGH, V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH }, + { HFI_H264_PROFILE_CONSTRAINED_BASE, V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE }, + { HFI_H264_PROFILE_CONSTRAINED_HIGH, V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH }, +}; + +static const struct id_mapping h264_levels[] = { + { HFI_H264_LEVEL_1, V4L2_MPEG_VIDEO_H264_LEVEL_1_0 }, + { HFI_H264_LEVEL_1b, V4L2_MPEG_VIDEO_H264_LEVEL_1B }, + { HFI_H264_LEVEL_11, V4L2_MPEG_VIDEO_H264_LEVEL_1_1 }, + { HFI_H264_LEVEL_12, V4L2_MPEG_VIDEO_H264_LEVEL_1_2 }, + { HFI_H264_LEVEL_13, V4L2_MPEG_VIDEO_H264_LEVEL_1_3 }, + { HFI_H264_LEVEL_2, V4L2_MPEG_VIDEO_H264_LEVEL_2_0 }, + { HFI_H264_LEVEL_21, V4L2_MPEG_VIDEO_H264_LEVEL_2_1 }, + { HFI_H264_LEVEL_22, V4L2_MPEG_VIDEO_H264_LEVEL_2_2 }, + { HFI_H264_LEVEL_3, V4L2_MPEG_VIDEO_H264_LEVEL_3_0 }, + { HFI_H264_LEVEL_31, V4L2_MPEG_VIDEO_H264_LEVEL_3_1 }, + { HFI_H264_LEVEL_32, V4L2_MPEG_VIDEO_H264_LEVEL_3_2 }, + { HFI_H264_LEVEL_4, V4L2_MPEG_VIDEO_H264_LEVEL_4_0 }, + { HFI_H264_LEVEL_41, V4L2_MPEG_VIDEO_H264_LEVEL_4_1 }, + { HFI_H264_LEVEL_42, V4L2_MPEG_VIDEO_H264_LEVEL_4_2 }, + { HFI_H264_LEVEL_5, V4L2_MPEG_VIDEO_H264_LEVEL_5_0 }, + { HFI_H264_LEVEL_51, V4L2_MPEG_VIDEO_H264_LEVEL_5_1 }, + { HFI_H264_LEVEL_52, V4L2_MPEG_VIDEO_H264_LEVEL_5_1 }, +}; + +static const struct id_mapping hevc_profiles[] = { + { HFI_HEVC_PROFILE_MAIN, V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN }, + { HFI_HEVC_PROFILE_MAIN_STILL_PIC, V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE }, + { HFI_HEVC_PROFILE_MAIN10, V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10 }, +}; + +static const struct id_mapping hevc_levels[] = { + { HFI_HEVC_LEVEL_1, V4L2_MPEG_VIDEO_HEVC_LEVEL_1 }, + { HFI_HEVC_LEVEL_2, V4L2_MPEG_VIDEO_HEVC_LEVEL_2 }, + { HFI_HEVC_LEVEL_21, V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1 }, + { HFI_HEVC_LEVEL_3, V4L2_MPEG_VIDEO_HEVC_LEVEL_3 }, + { HFI_HEVC_LEVEL_31, V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1 }, + { HFI_HEVC_LEVEL_4, V4L2_MPEG_VIDEO_HEVC_LEVEL_4 }, + { HFI_HEVC_LEVEL_41, V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1 }, + { HFI_HEVC_LEVEL_5, V4L2_MPEG_VIDEO_HEVC_LEVEL_5 }, + { HFI_HEVC_LEVEL_51, V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1 }, + { HFI_HEVC_LEVEL_52, V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2 }, + { HFI_HEVC_LEVEL_6, V4L2_MPEG_VIDEO_HEVC_LEVEL_6 }, + { HFI_HEVC_LEVEL_61, V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1 }, + { HFI_HEVC_LEVEL_62, V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2 }, +}; + +static const struct id_mapping vp8_profiles[] = { + { HFI_VPX_PROFILE_VERSION_0, V4L2_MPEG_VIDEO_VP8_PROFILE_0 }, + { HFI_VPX_PROFILE_VERSION_1, V4L2_MPEG_VIDEO_VP8_PROFILE_1 }, + { HFI_VPX_PROFILE_VERSION_2, V4L2_MPEG_VIDEO_VP8_PROFILE_2 }, + { HFI_VPX_PROFILE_VERSION_3, V4L2_MPEG_VIDEO_VP8_PROFILE_3 }, +}; + +static const struct id_mapping vp9_profiles[] = { + { HFI_VP9_PROFILE_P0, V4L2_MPEG_VIDEO_VP9_PROFILE_0 }, + { HFI_VP9_PROFILE_P2_10B, V4L2_MPEG_VIDEO_VP9_PROFILE_2 }, +}; + +static const struct id_mapping vp9_levels[] = { + { HFI_VP9_LEVEL_1, V4L2_MPEG_VIDEO_VP9_LEVEL_1_0 }, + { HFI_VP9_LEVEL_11, V4L2_MPEG_VIDEO_VP9_LEVEL_1_1 }, + { HFI_VP9_LEVEL_2, V4L2_MPEG_VIDEO_VP9_LEVEL_2_0}, + { HFI_VP9_LEVEL_21, V4L2_MPEG_VIDEO_VP9_LEVEL_2_1 }, + { HFI_VP9_LEVEL_3, V4L2_MPEG_VIDEO_VP9_LEVEL_3_0}, + { HFI_VP9_LEVEL_31, V4L2_MPEG_VIDEO_VP9_LEVEL_3_1 }, + { HFI_VP9_LEVEL_4, V4L2_MPEG_VIDEO_VP9_LEVEL_4_0 }, + { HFI_VP9_LEVEL_41, V4L2_MPEG_VIDEO_VP9_LEVEL_4_1 }, + { HFI_VP9_LEVEL_5, V4L2_MPEG_VIDEO_VP9_LEVEL_5_0 }, + { HFI_VP9_LEVEL_51, V4L2_MPEG_VIDEO_VP9_LEVEL_5_1 }, + { HFI_VP9_LEVEL_6, V4L2_MPEG_VIDEO_VP9_LEVEL_6_0 }, + { HFI_VP9_LEVEL_61, V4L2_MPEG_VIDEO_VP9_LEVEL_6_1 }, +}; + +static u32 find_v4l2_id(u32 hfi_id, const struct id_mapping *array, unsigned int array_sz) +{ + unsigned int i; + + if (!array || !array_sz) + return 0; + + for (i = 0; i < array_sz; i++) + if (hfi_id == array[i].hfi_id) + return array[i].v4l2_id; + + return 0; +} + +static u32 find_hfi_id(u32 v4l2_id, const struct id_mapping *array, unsigned int array_sz) +{ + unsigned int i; + + if (!array || !array_sz) + return 0; + + for (i = 0; i < array_sz; i++) + if (v4l2_id == array[i].v4l2_id) + return array[i].hfi_id; + + return 0; +} + +static void +v4l2_id_profile_level(u32 hfi_codec, struct hfi_profile_level *pl, u32 *profile, u32 *level) +{ + u32 hfi_pf = pl->profile; + u32 hfi_lvl = pl->level; + + switch (hfi_codec) { + case HFI_VIDEO_CODEC_H264: + *profile = find_v4l2_id(hfi_pf, h264_profiles, ARRAY_SIZE(h264_profiles)); + *level = find_v4l2_id(hfi_lvl, h264_levels, ARRAY_SIZE(h264_levels)); + break; + case HFI_VIDEO_CODEC_MPEG2: + *profile = find_v4l2_id(hfi_pf, mpeg2_profiles, ARRAY_SIZE(mpeg2_profiles)); + *level = find_v4l2_id(hfi_lvl, mpeg2_levels, ARRAY_SIZE(mpeg2_levels)); + break; + case HFI_VIDEO_CODEC_MPEG4: + *profile = find_v4l2_id(hfi_pf, mpeg4_profiles, ARRAY_SIZE(mpeg4_profiles)); + *level = find_v4l2_id(hfi_lvl, mpeg4_levels, ARRAY_SIZE(mpeg4_levels)); + break; + case HFI_VIDEO_CODEC_VP8: + *profile = find_v4l2_id(hfi_pf, vp8_profiles, ARRAY_SIZE(vp8_profiles)); + *level = 0; + break; + case HFI_VIDEO_CODEC_VP9: + *profile = find_v4l2_id(hfi_pf, vp9_profiles, ARRAY_SIZE(vp9_profiles)); + *level = find_v4l2_id(hfi_lvl, vp9_levels, ARRAY_SIZE(vp9_levels)); + break; + case HFI_VIDEO_CODEC_HEVC: + *profile = find_v4l2_id(hfi_pf, hevc_profiles, ARRAY_SIZE(hevc_profiles)); + *level = find_v4l2_id(hfi_lvl, hevc_levels, ARRAY_SIZE(hevc_levels)); + break; + default: + break; + } +} + +static void +hfi_id_profile_level(u32 hfi_codec, u32 v4l2_pf, u32 v4l2_lvl, struct hfi_profile_level *pl) +{ + switch (hfi_codec) { + case HFI_VIDEO_CODEC_H264: + pl->profile = find_hfi_id(v4l2_pf, h264_profiles, ARRAY_SIZE(h264_profiles)); + pl->level = find_hfi_id(v4l2_lvl, h264_levels, ARRAY_SIZE(h264_levels)); + break; + case HFI_VIDEO_CODEC_MPEG2: + pl->profile = find_hfi_id(v4l2_pf, mpeg2_profiles, ARRAY_SIZE(mpeg2_profiles)); + pl->level = find_hfi_id(v4l2_lvl, mpeg2_levels, ARRAY_SIZE(mpeg2_levels)); + break; + case HFI_VIDEO_CODEC_MPEG4: + pl->profile = find_hfi_id(v4l2_pf, mpeg4_profiles, ARRAY_SIZE(mpeg4_profiles)); + pl->level = find_hfi_id(v4l2_lvl, mpeg4_levels, ARRAY_SIZE(mpeg4_levels)); + break; + case HFI_VIDEO_CODEC_VP8: + pl->profile = find_hfi_id(v4l2_pf, vp8_profiles, ARRAY_SIZE(vp8_profiles)); + pl->level = 0; + break; + case HFI_VIDEO_CODEC_VP9: + pl->profile = find_hfi_id(v4l2_pf, vp9_profiles, ARRAY_SIZE(vp9_profiles)); + pl->level = find_hfi_id(v4l2_lvl, vp9_levels, ARRAY_SIZE(vp9_levels)); + break; + case HFI_VIDEO_CODEC_HEVC: + pl->profile = find_hfi_id(v4l2_pf, hevc_profiles, ARRAY_SIZE(hevc_profiles)); + pl->level = find_hfi_id(v4l2_lvl, hevc_levels, ARRAY_SIZE(hevc_levels)); + break; + default: + break; + } +} + +int venus_helper_get_profile_level(struct venus_inst *inst, u32 *profile, u32 *level) +{ + const u32 ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT; + union hfi_get_property hprop; + int ret; + + ret = hfi_session_get_property(inst, ptype, &hprop); + if (ret) + return ret; + + v4l2_id_profile_level(inst->hfi_codec, &hprop.profile_level, profile, level); + + return 0; +} +EXPORT_SYMBOL_GPL(venus_helper_get_profile_level); + +int venus_helper_set_profile_level(struct venus_inst *inst, u32 profile, u32 level) +{ + const u32 ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT; + struct hfi_profile_level pl; + + hfi_id_profile_level(inst->hfi_codec, profile, level, &pl); + + return hfi_session_set_property(inst, ptype, &pl); +} +EXPORT_SYMBOL_GPL(venus_helper_set_profile_level); + +static u32 get_framesize_raw_nv12(u32 width, u32 height) +{ + u32 y_stride, uv_stride, y_plane; + u32 y_sclines, uv_sclines, uv_plane; + u32 size; + + y_stride = ALIGN(width, 128); + uv_stride = ALIGN(width, 128); + y_sclines = ALIGN(height, 32); + uv_sclines = ALIGN(((height + 1) >> 1), 16); + + y_plane = y_stride * y_sclines; + uv_plane = uv_stride * uv_sclines + SZ_4K; + size = y_plane + uv_plane + SZ_8K; + + return ALIGN(size, SZ_4K); +} + +static u32 get_framesize_raw_nv12_ubwc(u32 width, u32 height) +{ + u32 y_meta_stride, y_meta_plane; + u32 y_stride, y_plane; + u32 uv_meta_stride, uv_meta_plane; + u32 uv_stride, uv_plane; + u32 extradata = SZ_16K; + + y_meta_stride = ALIGN(DIV_ROUND_UP(width, 32), 64); + y_meta_plane = y_meta_stride * ALIGN(DIV_ROUND_UP(height, 8), 16); + y_meta_plane = ALIGN(y_meta_plane, SZ_4K); + + y_stride = ALIGN(width, 128); + y_plane = ALIGN(y_stride * ALIGN(height, 32), SZ_4K); + + uv_meta_stride = ALIGN(DIV_ROUND_UP(width / 2, 16), 64); + uv_meta_plane = uv_meta_stride * ALIGN(DIV_ROUND_UP(height / 2, 8), 16); + uv_meta_plane = ALIGN(uv_meta_plane, SZ_4K); + + uv_stride = ALIGN(width, 128); + uv_plane = ALIGN(uv_stride * ALIGN(height / 2, 32), SZ_4K); + + return ALIGN(y_meta_plane + y_plane + uv_meta_plane + uv_plane + + max(extradata, y_stride * 48), SZ_4K); +} + +static u32 get_framesize_raw_p010(u32 width, u32 height) +{ + u32 y_plane, uv_plane, y_stride, uv_stride, y_sclines, uv_sclines; + + y_stride = ALIGN(width * 2, 128); + uv_stride = ALIGN(width * 2, 128); + y_sclines = ALIGN(height, 32); + uv_sclines = ALIGN((height + 1) >> 1, 16); + y_plane = y_stride * y_sclines; + uv_plane = uv_stride * uv_sclines; + + return ALIGN((y_plane + uv_plane), SZ_4K); +} + +static u32 get_framesize_raw_p010_ubwc(u32 width, u32 height) +{ + u32 y_stride, uv_stride, y_sclines, uv_sclines; + u32 y_ubwc_plane, uv_ubwc_plane; + u32 y_meta_stride, y_meta_scanlines; + u32 uv_meta_stride, uv_meta_scanlines; + u32 y_meta_plane, uv_meta_plane; + u32 size; + + y_stride = ALIGN(width * 2, 256); + uv_stride = ALIGN(width * 2, 256); + y_sclines = ALIGN(height, 16); + uv_sclines = ALIGN((height + 1) >> 1, 16); + + y_ubwc_plane = ALIGN(y_stride * y_sclines, SZ_4K); + uv_ubwc_plane = ALIGN(uv_stride * uv_sclines, SZ_4K); + y_meta_stride = ALIGN(DIV_ROUND_UP(width, 32), 64); + y_meta_scanlines = ALIGN(DIV_ROUND_UP(height, 4), 16); + y_meta_plane = ALIGN(y_meta_stride * y_meta_scanlines, SZ_4K); + uv_meta_stride = ALIGN(DIV_ROUND_UP((width + 1) >> 1, 16), 64); + uv_meta_scanlines = ALIGN(DIV_ROUND_UP((height + 1) >> 1, 4), 16); + uv_meta_plane = ALIGN(uv_meta_stride * uv_meta_scanlines, SZ_4K); + + size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane + uv_meta_plane; + + return ALIGN(size, SZ_4K); +} + +static u32 get_framesize_raw_yuv420_tp10_ubwc(u32 width, u32 height) +{ + u32 y_stride, uv_stride, y_sclines, uv_sclines; + u32 y_ubwc_plane, uv_ubwc_plane; + u32 y_meta_stride, y_meta_scanlines; + u32 uv_meta_stride, uv_meta_scanlines; + u32 y_meta_plane, uv_meta_plane; + u32 extradata = SZ_16K; + u32 size; + + y_stride = ALIGN(width * 4 / 3, 256); + uv_stride = ALIGN(width * 4 / 3, 256); + y_sclines = ALIGN(height, 16); + uv_sclines = ALIGN((height + 1) >> 1, 16); + + y_ubwc_plane = ALIGN(y_stride * y_sclines, SZ_4K); + uv_ubwc_plane = ALIGN(uv_stride * uv_sclines, SZ_4K); + y_meta_stride = ALIGN(DIV_ROUND_UP(width, 48), 64); + y_meta_scanlines = ALIGN(DIV_ROUND_UP(height, 4), 16); + y_meta_plane = ALIGN(y_meta_stride * y_meta_scanlines, SZ_4K); + uv_meta_stride = ALIGN(DIV_ROUND_UP((width + 1) >> 1, 24), 64); + uv_meta_scanlines = ALIGN(DIV_ROUND_UP((height + 1) >> 1, 4), 16); + uv_meta_plane = ALIGN(uv_meta_stride * uv_meta_scanlines, SZ_4K); + + size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane + uv_meta_plane; + size += max(extradata + SZ_8K, y_stride * 48); + + return ALIGN(size, SZ_4K); +} + +u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height) +{ + switch (hfi_fmt) { + case HFI_COLOR_FORMAT_NV12: + case HFI_COLOR_FORMAT_NV21: + return get_framesize_raw_nv12(width, height); + case HFI_COLOR_FORMAT_NV12_UBWC: + return get_framesize_raw_nv12_ubwc(width, height); + case HFI_COLOR_FORMAT_P010: + return get_framesize_raw_p010(width, height); + case HFI_COLOR_FORMAT_P010_UBWC: + return get_framesize_raw_p010_ubwc(width, height); + case HFI_COLOR_FORMAT_YUV420_TP10_UBWC: + return get_framesize_raw_yuv420_tp10_ubwc(width, height); + default: + return 0; + } +} +EXPORT_SYMBOL_GPL(venus_helper_get_framesz_raw); + +u32 venus_helper_get_framesz(u32 v4l2_fmt, u32 width, u32 height) +{ + u32 hfi_fmt, sz; + bool compressed; + + switch (v4l2_fmt) { + case V4L2_PIX_FMT_MPEG: + case V4L2_PIX_FMT_H264: + case V4L2_PIX_FMT_H264_NO_SC: + case V4L2_PIX_FMT_H264_MVC: + case V4L2_PIX_FMT_H263: + case V4L2_PIX_FMT_MPEG1: + case V4L2_PIX_FMT_MPEG2: + case V4L2_PIX_FMT_MPEG4: + case V4L2_PIX_FMT_XVID: + case V4L2_PIX_FMT_VC1_ANNEX_G: + case V4L2_PIX_FMT_VC1_ANNEX_L: + case V4L2_PIX_FMT_VP8: + case V4L2_PIX_FMT_VP9: + case V4L2_PIX_FMT_HEVC: + compressed = true; + break; + default: + compressed = false; + break; + } + + if (compressed) { + sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2 / 2; + if (width < 1280 || height < 720) + sz *= 8; + return ALIGN(sz, SZ_4K); + } + + hfi_fmt = to_hfi_raw_fmt(v4l2_fmt); + if (!hfi_fmt) + return 0; + + return venus_helper_get_framesz_raw(hfi_fmt, width, height); +} +EXPORT_SYMBOL_GPL(venus_helper_get_framesz); + +int venus_helper_set_input_resolution(struct venus_inst *inst, + unsigned int width, unsigned int height) +{ + u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE; + struct hfi_framesize fs; + + fs.buffer_type = HFI_BUFFER_INPUT; + fs.width = width; + fs.height = height; + + return hfi_session_set_property(inst, ptype, &fs); +} +EXPORT_SYMBOL_GPL(venus_helper_set_input_resolution); + +int venus_helper_set_output_resolution(struct venus_inst *inst, + unsigned int width, unsigned int height, + u32 buftype) +{ + u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE; + struct hfi_framesize fs; + + fs.buffer_type = buftype; + fs.width = width; + fs.height = height; + + return hfi_session_set_property(inst, ptype, &fs); +} +EXPORT_SYMBOL_GPL(venus_helper_set_output_resolution); + +static u32 venus_helper_get_work_mode(struct venus_inst *inst) +{ + u32 mode; + u32 num_mbs; + + mode = VIDC_WORK_MODE_2; + if (inst->session_type == VIDC_SESSION_TYPE_DEC) { + num_mbs = (ALIGN(inst->height, 16) * ALIGN(inst->width, 16)) / 256; + if (inst->hfi_codec == HFI_VIDEO_CODEC_MPEG2 || + inst->pic_struct != HFI_INTERLACE_FRAME_PROGRESSIVE || + num_mbs <= NUM_MBS_720P) + mode = VIDC_WORK_MODE_1; + } else { + num_mbs = (ALIGN(inst->out_height, 16) * ALIGN(inst->out_width, 16)) / 256; + if (inst->hfi_codec == HFI_VIDEO_CODEC_VP8 && + num_mbs <= NUM_MBS_4K) + mode = VIDC_WORK_MODE_1; + } + + return mode; +} + +int venus_helper_set_work_mode(struct venus_inst *inst) +{ + const u32 ptype = HFI_PROPERTY_PARAM_WORK_MODE; + struct hfi_video_work_mode wm; + u32 mode; + + if (!IS_V4(inst->core) && !IS_V6(inst->core)) + return 0; + + mode = venus_helper_get_work_mode(inst); + wm.video_work_mode = mode; + return hfi_session_set_property(inst, ptype, &wm); +} +EXPORT_SYMBOL_GPL(venus_helper_set_work_mode); + +int venus_helper_set_format_constraints(struct venus_inst *inst) +{ + const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO; + struct hfi_uncompressed_plane_actual_constraints_info pconstraint; + + if (!IS_V6(inst->core)) + return 0; + + if (inst->opb_fmt == HFI_COLOR_FORMAT_NV12_UBWC || + inst->opb_fmt == HFI_COLOR_FORMAT_YUV420_TP10_UBWC) + return 0; + + pconstraint.buffer_type = HFI_BUFFER_OUTPUT2; + pconstraint.num_planes = 2; + pconstraint.plane_format[0].stride_multiples = 128; + pconstraint.plane_format[0].max_stride = 8192; + pconstraint.plane_format[0].min_plane_buffer_height_multiple = 32; + pconstraint.plane_format[0].buffer_alignment = 256; + + pconstraint.plane_format[1].stride_multiples = 128; + pconstraint.plane_format[1].max_stride = 8192; + pconstraint.plane_format[1].min_plane_buffer_height_multiple = 16; + pconstraint.plane_format[1].buffer_alignment = 256; + + return hfi_session_set_property(inst, ptype, &pconstraint); +} +EXPORT_SYMBOL_GPL(venus_helper_set_format_constraints); + +int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs, + unsigned int output_bufs, + unsigned int output2_bufs) +{ + u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL; + struct hfi_buffer_count_actual buf_count; + int ret; + + buf_count.type = HFI_BUFFER_INPUT; + buf_count.count_actual = input_bufs; + + ret = hfi_session_set_property(inst, ptype, &buf_count); + if (ret) + return ret; + + buf_count.type = HFI_BUFFER_OUTPUT; + buf_count.count_actual = output_bufs; + + ret = hfi_session_set_property(inst, ptype, &buf_count); + if (ret) + return ret; + + if (output2_bufs) { + buf_count.type = HFI_BUFFER_OUTPUT2; + buf_count.count_actual = output2_bufs; + + ret = hfi_session_set_property(inst, ptype, &buf_count); + } + + return ret; +} +EXPORT_SYMBOL_GPL(venus_helper_set_num_bufs); + +int venus_helper_set_raw_format(struct venus_inst *inst, u32 hfi_format, + u32 buftype) +{ + const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT; + struct hfi_uncompressed_format_select fmt; + + fmt.buffer_type = buftype; + fmt.format = hfi_format; + + return hfi_session_set_property(inst, ptype, &fmt); +} +EXPORT_SYMBOL_GPL(venus_helper_set_raw_format); + +int venus_helper_set_color_format(struct venus_inst *inst, u32 pixfmt) +{ + u32 hfi_format, buftype; + + if (inst->session_type == VIDC_SESSION_TYPE_DEC) + buftype = HFI_BUFFER_OUTPUT; + else if (inst->session_type == VIDC_SESSION_TYPE_ENC) + buftype = HFI_BUFFER_INPUT; + else + return -EINVAL; + + hfi_format = to_hfi_raw_fmt(pixfmt); + if (!hfi_format) + return -EINVAL; + + return venus_helper_set_raw_format(inst, hfi_format, buftype); +} +EXPORT_SYMBOL_GPL(venus_helper_set_color_format); + +int venus_helper_set_multistream(struct venus_inst *inst, bool out_en, + bool out2_en) +{ + struct hfi_multi_stream multi = {0}; + u32 ptype = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM; + int ret; + + multi.buffer_type = HFI_BUFFER_OUTPUT; + multi.enable = out_en; + + ret = hfi_session_set_property(inst, ptype, &multi); + if (ret) + return ret; + + multi.buffer_type = HFI_BUFFER_OUTPUT2; + multi.enable = out2_en; + + return hfi_session_set_property(inst, ptype, &multi); +} +EXPORT_SYMBOL_GPL(venus_helper_set_multistream); + +int venus_helper_set_dyn_bufmode(struct venus_inst *inst) +{ + const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE; + struct hfi_buffer_alloc_mode mode; + int ret; + + if (!is_dynamic_bufmode(inst)) + return 0; + + mode.type = HFI_BUFFER_OUTPUT; + mode.mode = HFI_BUFFER_MODE_DYNAMIC; + + ret = hfi_session_set_property(inst, ptype, &mode); + if (ret) + return ret; + + mode.type = HFI_BUFFER_OUTPUT2; + + return hfi_session_set_property(inst, ptype, &mode); +} +EXPORT_SYMBOL_GPL(venus_helper_set_dyn_bufmode); + +int venus_helper_set_bufsize(struct venus_inst *inst, u32 bufsize, u32 buftype) +{ + const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL; + struct hfi_buffer_size_actual bufsz; + + bufsz.type = buftype; + bufsz.size = bufsize; + + return hfi_session_set_property(inst, ptype, &bufsz); +} +EXPORT_SYMBOL_GPL(venus_helper_set_bufsize); + +unsigned int venus_helper_get_opb_size(struct venus_inst *inst) +{ + /* the encoder has only one output */ + if (inst->session_type == VIDC_SESSION_TYPE_ENC) + return inst->output_buf_size; + + if (inst->opb_buftype == HFI_BUFFER_OUTPUT) + return inst->output_buf_size; + else if (inst->opb_buftype == HFI_BUFFER_OUTPUT2) + return inst->output2_buf_size; + + return 0; +} +EXPORT_SYMBOL_GPL(venus_helper_get_opb_size); + +static void delayed_process_buf_func(struct work_struct *work) +{ + struct venus_buffer *buf, *n; + struct venus_inst *inst; + int ret; + + inst = container_of(work, struct venus_inst, delayed_process_work); + + mutex_lock(&inst->lock); + + if (!(inst->streamon_out & inst->streamon_cap)) + goto unlock; + + list_for_each_entry_safe(buf, n, &inst->delayed_process, ref_list) { + if (buf->flags & HFI_BUFFERFLAG_READONLY) + continue; + + ret = session_process_buf(inst, &buf->vb); + if (ret) + return_buf_error(inst, &buf->vb); + + list_del_init(&buf->ref_list); + } +unlock: + mutex_unlock(&inst->lock); +} + +void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx) +{ + struct venus_buffer *buf; + + list_for_each_entry(buf, &inst->registeredbufs, reg_list) { + if (buf->vb.vb2_buf.index == idx) { + buf->flags &= ~HFI_BUFFERFLAG_READONLY; + schedule_work(&inst->delayed_process_work); + break; + } + } +} +EXPORT_SYMBOL_GPL(venus_helper_release_buf_ref); + +void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf) +{ + struct venus_buffer *buf = to_venus_buffer(vbuf); + + buf->flags |= HFI_BUFFERFLAG_READONLY; +} +EXPORT_SYMBOL_GPL(venus_helper_acquire_buf_ref); + +static int is_buf_refed(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf) +{ + struct venus_buffer *buf = to_venus_buffer(vbuf); + + if (buf->flags & HFI_BUFFERFLAG_READONLY) { + list_add_tail(&buf->ref_list, &inst->delayed_process); + schedule_work(&inst->delayed_process_work); + return 1; + } + + return 0; +} + +struct vb2_v4l2_buffer * +venus_helper_find_buf(struct venus_inst *inst, unsigned int type, u32 idx) +{ + struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; + + if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + return v4l2_m2m_src_buf_remove_by_idx(m2m_ctx, idx); + else + return v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, idx); +} +EXPORT_SYMBOL_GPL(venus_helper_find_buf); + +void venus_helper_change_dpb_owner(struct venus_inst *inst, + struct vb2_v4l2_buffer *vbuf, unsigned int type, + unsigned int buf_type, u32 tag) +{ + struct intbuf *dpb_buf; + + if (!V4L2_TYPE_IS_CAPTURE(type) || + buf_type != inst->dpb_buftype) + return; + + list_for_each_entry(dpb_buf, &inst->dpbbufs, list) + if (dpb_buf->dpb_out_tag == tag) { + dpb_buf->owned_by = DRIVER; + break; + } +} +EXPORT_SYMBOL_GPL(venus_helper_change_dpb_owner); + +int venus_helper_vb2_buf_init(struct vb2_buffer *vb) +{ + struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct venus_buffer *buf = to_venus_buffer(vbuf); + + buf->size = vb2_plane_size(vb, 0); + buf->dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0); + + if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + list_add_tail(&buf->reg_list, &inst->registeredbufs); + + return 0; +} +EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_init); + +int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb) +{ + struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); + unsigned int out_buf_size = venus_helper_get_opb_size(inst); + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + + if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { + if (vbuf->field == V4L2_FIELD_ANY) + vbuf->field = V4L2_FIELD_NONE; + if (vbuf->field != V4L2_FIELD_NONE) { + dev_err(inst->core->dev, "%s field isn't supported\n", + __func__); + return -EINVAL; + } + } + + if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + vb2_plane_size(vb, 0) < out_buf_size) + return -EINVAL; + if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + vb2_plane_size(vb, 0) < inst->input_buf_size) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_prepare); + +static void cache_payload(struct venus_inst *inst, struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + unsigned int idx = vbuf->vb2_buf.index; + + if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + inst->payloads[idx] = vb2_get_plane_payload(vb, 0); +} + +void venus_helper_vb2_buf_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); + struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; + int ret; + + v4l2_m2m_buf_queue(m2m_ctx, vbuf); + + /* Skip processing queued capture buffers after LAST flag */ + if (inst->session_type == VIDC_SESSION_TYPE_DEC && + V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type) && + inst->codec_state == VENUS_DEC_STATE_DRC) + return; + + cache_payload(inst, vb); + + if (inst->session_type == VIDC_SESSION_TYPE_ENC && + !(inst->streamon_out && inst->streamon_cap)) + return; + + if (vb2_start_streaming_called(vb->vb2_queue)) { + ret = is_buf_refed(inst, vbuf); + if (ret) + return; + + ret = session_process_buf(inst, vbuf); + if (ret) + return_buf_error(inst, vbuf); + } +} +EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_queue); + +void venus_helper_buffers_done(struct venus_inst *inst, unsigned int type, + enum vb2_buffer_state state) +{ + struct vb2_v4l2_buffer *buf; + + if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + while ((buf = v4l2_m2m_src_buf_remove(inst->m2m_ctx))) + v4l2_m2m_buf_done(buf, state); + } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx))) + v4l2_m2m_buf_done(buf, state); + } +} +EXPORT_SYMBOL_GPL(venus_helper_buffers_done); + +void venus_helper_vb2_stop_streaming(struct vb2_queue *q) +{ + struct venus_inst *inst = vb2_get_drv_priv(q); + struct venus_core *core = inst->core; + int ret; + + mutex_lock(&inst->lock); + + if (inst->streamon_out & inst->streamon_cap) { + ret = hfi_session_stop(inst); + ret |= hfi_session_unload_res(inst); + ret |= venus_helper_unregister_bufs(inst); + ret |= venus_helper_intbufs_free(inst); + ret |= hfi_session_deinit(inst); + + if (inst->session_error || test_bit(0, &core->sys_error)) + ret = -EIO; + + if (ret) + hfi_session_abort(inst); + + venus_helper_free_dpb_bufs(inst); + + venus_pm_load_scale(inst); + INIT_LIST_HEAD(&inst->registeredbufs); + } + + venus_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + VB2_BUF_STATE_ERROR); + venus_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + VB2_BUF_STATE_ERROR); + + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + inst->streamon_out = 0; + else + inst->streamon_cap = 0; + + venus_pm_release_core(inst); + + inst->session_error = 0; + + mutex_unlock(&inst->lock); +} +EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming); + +void venus_helper_vb2_queue_error(struct venus_inst *inst) +{ + struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; + struct vb2_queue *q; + + q = v4l2_m2m_get_src_vq(m2m_ctx); + vb2_queue_error(q); + q = v4l2_m2m_get_dst_vq(m2m_ctx); + vb2_queue_error(q); +} +EXPORT_SYMBOL_GPL(venus_helper_vb2_queue_error); + +int venus_helper_process_initial_cap_bufs(struct venus_inst *inst) +{ + struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; + struct v4l2_m2m_buffer *buf, *n; + int ret; + + v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) { + ret = session_process_buf(inst, &buf->vb); + if (ret) { + return_buf_error(inst, &buf->vb); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(venus_helper_process_initial_cap_bufs); + +int venus_helper_process_initial_out_bufs(struct venus_inst *inst) +{ + struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; + struct v4l2_m2m_buffer *buf, *n; + int ret; + + v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) { + ret = session_process_buf(inst, &buf->vb); + if (ret) { + return_buf_error(inst, &buf->vb); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(venus_helper_process_initial_out_bufs); + +int venus_helper_vb2_start_streaming(struct venus_inst *inst) +{ + int ret; + + ret = venus_helper_intbufs_alloc(inst); + if (ret) + return ret; + + ret = session_register_bufs(inst); + if (ret) + goto err_bufs_free; + + venus_pm_load_scale(inst); + + ret = hfi_session_load_res(inst); + if (ret) + goto err_unreg_bufs; + + ret = hfi_session_start(inst); + if (ret) + goto err_unload_res; + + return 0; + +err_unload_res: + hfi_session_unload_res(inst); +err_unreg_bufs: + venus_helper_unregister_bufs(inst); +err_bufs_free: + venus_helper_intbufs_free(inst); + return ret; +} +EXPORT_SYMBOL_GPL(venus_helper_vb2_start_streaming); + +void venus_helper_m2m_device_run(void *priv) +{ + struct venus_inst *inst = priv; + struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; + struct v4l2_m2m_buffer *buf, *n; + int ret; + + mutex_lock(&inst->lock); + + v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) { + ret = session_process_buf(inst, &buf->vb); + if (ret) + return_buf_error(inst, &buf->vb); + } + + v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) { + ret = session_process_buf(inst, &buf->vb); + if (ret) + return_buf_error(inst, &buf->vb); + } + + mutex_unlock(&inst->lock); +} +EXPORT_SYMBOL_GPL(venus_helper_m2m_device_run); + +void venus_helper_m2m_job_abort(void *priv) +{ + struct venus_inst *inst = priv; + + v4l2_m2m_job_finish(inst->m2m_dev, inst->m2m_ctx); +} +EXPORT_SYMBOL_GPL(venus_helper_m2m_job_abort); + +int venus_helper_session_init(struct venus_inst *inst) +{ + enum hfi_version version = inst->core->res->hfi_version; + u32 session_type = inst->session_type; + u32 codec; + int ret; + + codec = inst->session_type == VIDC_SESSION_TYPE_DEC ? + inst->fmt_out->pixfmt : inst->fmt_cap->pixfmt; + + ret = hfi_session_init(inst, codec); + if (ret) + return ret; + + inst->clk_data.vpp_freq = hfi_platform_get_codec_vpp_freq(version, codec, + session_type); + inst->clk_data.vsp_freq = hfi_platform_get_codec_vsp_freq(version, codec, + session_type); + inst->clk_data.low_power_freq = hfi_platform_get_codec_lp_freq(version, codec, + session_type); + + return 0; +} +EXPORT_SYMBOL_GPL(venus_helper_session_init); + +void venus_helper_init_instance(struct venus_inst *inst) +{ + if (inst->session_type == VIDC_SESSION_TYPE_DEC) { + INIT_LIST_HEAD(&inst->delayed_process); + INIT_WORK(&inst->delayed_process_work, + delayed_process_buf_func); + } +} +EXPORT_SYMBOL_GPL(venus_helper_init_instance); + +static bool find_fmt_from_caps(struct hfi_plat_caps *caps, u32 buftype, u32 fmt) +{ + unsigned int i; + + for (i = 0; i < caps->num_fmts; i++) { + if (caps->fmts[i].buftype == buftype && + caps->fmts[i].fmt == fmt) + return true; + } + + return false; +} + +int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt, + u32 *out_fmt, u32 *out2_fmt, bool ubwc) +{ + struct venus_core *core = inst->core; + struct hfi_plat_caps *caps; + u32 ubwc_fmt, fmt = to_hfi_raw_fmt(v4l2_fmt); + bool found, found_ubwc; + + *out_fmt = *out2_fmt = 0; + + if (!fmt) + return -EINVAL; + + caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type); + if (!caps) + return -EINVAL; + + if (inst->bit_depth == VIDC_BITDEPTH_10 && inst->session_type == VIDC_SESSION_TYPE_DEC) { + found_ubwc = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, + HFI_COLOR_FORMAT_YUV420_TP10_UBWC); + found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt); + if (found_ubwc && found) { + /* + * Hard-code DPB buffers to be 10bit UBWC + * until V4L2 is able to expose compressed/tiled + * formats to applications. + */ + *out_fmt = HFI_COLOR_FORMAT_YUV420_TP10_UBWC; + *out2_fmt = fmt; + return 0; + } + } + + if (ubwc) { + ubwc_fmt = fmt | HFI_COLOR_FORMAT_UBWC_BASE; + found_ubwc = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, + ubwc_fmt); + found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt); + + if (found_ubwc && found) { + *out_fmt = ubwc_fmt; + *out2_fmt = fmt; + return 0; + } + } + + found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, fmt); + if (found) { + *out_fmt = fmt; + *out2_fmt = 0; + return 0; + } + + found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt); + if (found) { + *out_fmt = 0; + *out2_fmt = fmt; + return 0; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(venus_helper_get_out_fmts); + +bool venus_helper_check_format(struct venus_inst *inst, u32 v4l2_pixfmt) +{ + struct venus_core *core = inst->core; + u32 fmt = to_hfi_raw_fmt(v4l2_pixfmt); + struct hfi_plat_caps *caps; + bool found; + + if (!fmt) + return false; + + caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type); + if (!caps) + return false; + + found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, fmt); + if (found) + goto done; + + found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt); +done: + return found; +} +EXPORT_SYMBOL_GPL(venus_helper_check_format); + +int venus_helper_set_stride(struct venus_inst *inst, + unsigned int width, unsigned int height) +{ + const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO; + + struct hfi_uncompressed_plane_actual_info plane_actual_info; + + plane_actual_info.buffer_type = HFI_BUFFER_INPUT; + plane_actual_info.num_planes = 2; + plane_actual_info.plane_format[0].actual_stride = width; + plane_actual_info.plane_format[0].actual_plane_buffer_height = height; + plane_actual_info.plane_format[1].actual_stride = width; + plane_actual_info.plane_format[1].actual_plane_buffer_height = height / 2; + + return hfi_session_set_property(inst, ptype, &plane_actual_info); +} +EXPORT_SYMBOL_GPL(venus_helper_set_stride); diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h new file mode 100644 index 0000000000..358e4f39c9 --- /dev/null +++ b/drivers/media/platform/qcom/venus/helpers.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#ifndef __VENUS_HELPERS_H__ +#define __VENUS_HELPERS_H__ + +#include <media/videobuf2-v4l2.h> + +struct venus_inst; +struct venus_core; + +bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt); +struct vb2_v4l2_buffer *venus_helper_find_buf(struct venus_inst *inst, + unsigned int type, u32 idx); +void venus_helper_change_dpb_owner(struct venus_inst *inst, + struct vb2_v4l2_buffer *vbuf, unsigned int type, + unsigned int buf_type, u32 idx); +void venus_helper_buffers_done(struct venus_inst *inst, unsigned int type, + enum vb2_buffer_state state); +int venus_helper_vb2_buf_init(struct vb2_buffer *vb); +int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb); +void venus_helper_vb2_buf_queue(struct vb2_buffer *vb); +void venus_helper_vb2_stop_streaming(struct vb2_queue *q); +int venus_helper_vb2_start_streaming(struct venus_inst *inst); +void venus_helper_vb2_queue_error(struct venus_inst *inst); +void venus_helper_m2m_device_run(void *priv); +void venus_helper_m2m_job_abort(void *priv); +int venus_helper_get_bufreq(struct venus_inst *inst, u32 type, + struct hfi_buffer_requirements *req); +u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height); +u32 venus_helper_get_framesz(u32 v4l2_fmt, u32 width, u32 height); +int venus_helper_set_input_resolution(struct venus_inst *inst, + unsigned int width, unsigned int height); +int venus_helper_set_output_resolution(struct venus_inst *inst, + unsigned int width, unsigned int height, + u32 buftype); +int venus_helper_set_work_mode(struct venus_inst *inst); +int venus_helper_set_format_constraints(struct venus_inst *inst); +int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs, + unsigned int output_bufs, + unsigned int output2_bufs); +int venus_helper_set_raw_format(struct venus_inst *inst, u32 hfi_format, + u32 buftype); +int venus_helper_set_color_format(struct venus_inst *inst, u32 fmt); +int venus_helper_set_dyn_bufmode(struct venus_inst *inst); +int venus_helper_set_bufsize(struct venus_inst *inst, u32 bufsize, u32 buftype); +int venus_helper_set_multistream(struct venus_inst *inst, bool out_en, + bool out2_en); +unsigned int venus_helper_get_opb_size(struct venus_inst *inst); +void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf); +void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx); +void venus_helper_init_instance(struct venus_inst *inst); +int venus_helper_session_init(struct venus_inst *inst); +int venus_helper_get_out_fmts(struct venus_inst *inst, u32 fmt, u32 *out_fmt, + u32 *out2_fmt, bool ubwc); +bool venus_helper_check_format(struct venus_inst *inst, u32 v4l2_pixfmt); +int venus_helper_alloc_dpb_bufs(struct venus_inst *inst); +int venus_helper_free_dpb_bufs(struct venus_inst *inst); +int venus_helper_intbufs_alloc(struct venus_inst *inst); +int venus_helper_intbufs_free(struct venus_inst *inst); +int venus_helper_intbufs_realloc(struct venus_inst *inst); +int venus_helper_queue_dpb_bufs(struct venus_inst *inst); +int venus_helper_unregister_bufs(struct venus_inst *inst); +int venus_helper_process_initial_cap_bufs(struct venus_inst *inst); +int venus_helper_process_initial_out_bufs(struct venus_inst *inst); +void venus_helper_get_ts_metadata(struct venus_inst *inst, u64 timestamp_us, + struct vb2_v4l2_buffer *vbuf); +int venus_helper_get_profile_level(struct venus_inst *inst, u32 *profile, u32 *level); +int venus_helper_set_profile_level(struct venus_inst *inst, u32 profile, u32 level); +int venus_helper_set_stride(struct venus_inst *inst, unsigned int aligned_width, + unsigned int aligned_height); +#endif diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c new file mode 100644 index 0000000000..e00aedb41d --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi.c @@ -0,0 +1,592 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/completion.h> +#include <linux/platform_device.h> +#include <linux/videodev2.h> + +#include "core.h" +#include "hfi.h" +#include "hfi_cmds.h" +#include "hfi_venus.h" + +#define TIMEOUT msecs_to_jiffies(1000) + +static u32 to_codec_type(u32 pixfmt) +{ + switch (pixfmt) { + case V4L2_PIX_FMT_H264: + case V4L2_PIX_FMT_H264_NO_SC: + return HFI_VIDEO_CODEC_H264; + case V4L2_PIX_FMT_H263: + return HFI_VIDEO_CODEC_H263; + case V4L2_PIX_FMT_MPEG1: + return HFI_VIDEO_CODEC_MPEG1; + case V4L2_PIX_FMT_MPEG2: + return HFI_VIDEO_CODEC_MPEG2; + case V4L2_PIX_FMT_MPEG4: + return HFI_VIDEO_CODEC_MPEG4; + case V4L2_PIX_FMT_VC1_ANNEX_G: + case V4L2_PIX_FMT_VC1_ANNEX_L: + return HFI_VIDEO_CODEC_VC1; + case V4L2_PIX_FMT_VP8: + return HFI_VIDEO_CODEC_VP8; + case V4L2_PIX_FMT_VP9: + return HFI_VIDEO_CODEC_VP9; + case V4L2_PIX_FMT_XVID: + return HFI_VIDEO_CODEC_DIVX; + case V4L2_PIX_FMT_HEVC: + return HFI_VIDEO_CODEC_HEVC; + default: + return 0; + } +} + +int hfi_core_init(struct venus_core *core) +{ + int ret = 0; + + mutex_lock(&core->lock); + + if (core->state >= CORE_INIT) + goto unlock; + + reinit_completion(&core->done); + + ret = core->ops->core_init(core); + if (ret) + goto unlock; + + ret = wait_for_completion_timeout(&core->done, TIMEOUT); + if (!ret) { + ret = -ETIMEDOUT; + goto unlock; + } + + ret = 0; + + if (core->error != HFI_ERR_NONE) { + ret = -EIO; + goto unlock; + } + + core->state = CORE_INIT; +unlock: + mutex_unlock(&core->lock); + return ret; +} + +int hfi_core_deinit(struct venus_core *core, bool blocking) +{ + int ret = 0, empty; + + mutex_lock(&core->lock); + + if (core->state == CORE_UNINIT) + goto unlock; + + empty = list_empty(&core->instances); + + if (!empty && !blocking) { + ret = -EBUSY; + goto unlock; + } + + if (!empty) { + mutex_unlock(&core->lock); + wait_var_event(&core->insts_count, + !atomic_read(&core->insts_count)); + mutex_lock(&core->lock); + } + + if (!core->ops) + goto unlock; + + ret = core->ops->core_deinit(core); + + if (!ret) + core->state = CORE_UNINIT; + +unlock: + mutex_unlock(&core->lock); + return ret; +} + +int hfi_core_suspend(struct venus_core *core) +{ + if (core->state != CORE_INIT) + return 0; + + return core->ops->suspend(core); +} + +int hfi_core_resume(struct venus_core *core, bool force) +{ + if (!force && core->state != CORE_INIT) + return 0; + + return core->ops->resume(core); +} + +int hfi_core_trigger_ssr(struct venus_core *core, u32 type) +{ + return core->ops->core_trigger_ssr(core, type); +} + +int hfi_core_ping(struct venus_core *core) +{ + int ret; + + mutex_lock(&core->lock); + + ret = core->ops->core_ping(core, 0xbeef); + if (ret) + goto unlock; + + ret = wait_for_completion_timeout(&core->done, TIMEOUT); + if (!ret) { + ret = -ETIMEDOUT; + goto unlock; + } + ret = 0; + if (core->error != HFI_ERR_NONE) + ret = -ENODEV; +unlock: + mutex_unlock(&core->lock); + return ret; +} + +static int wait_session_msg(struct venus_inst *inst) +{ + int ret; + + ret = wait_for_completion_timeout(&inst->done, TIMEOUT); + if (!ret) + return -ETIMEDOUT; + + if (inst->error != HFI_ERR_NONE) + return -EIO; + + return 0; +} + +int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops) +{ + struct venus_core *core = inst->core; + bool max; + int ret; + + if (!ops) + return -EINVAL; + + inst->state = INST_UNINIT; + init_completion(&inst->done); + inst->ops = ops; + + mutex_lock(&core->lock); + + if (test_bit(0, &inst->core->sys_error)) { + ret = -EIO; + goto unlock; + } + + max = atomic_add_unless(&core->insts_count, 1, + core->max_sessions_supported); + if (!max) { + ret = -EAGAIN; + } else { + list_add_tail(&inst->list, &core->instances); + ret = 0; + } + +unlock: + mutex_unlock(&core->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(hfi_session_create); + +int hfi_session_init(struct venus_inst *inst, u32 pixfmt) +{ + struct venus_core *core = inst->core; + const struct hfi_ops *ops = core->ops; + int ret; + + /* + * If core shutdown is in progress or if we are in system + * recovery, return an error as during system error recovery + * session_init() can't pass successfully + */ + mutex_lock(&core->lock); + if (!core->ops || test_bit(0, &inst->core->sys_error)) { + mutex_unlock(&core->lock); + return -EIO; + } + mutex_unlock(&core->lock); + + if (inst->state != INST_UNINIT) + return -EALREADY; + + inst->hfi_codec = to_codec_type(pixfmt); + reinit_completion(&inst->done); + + ret = ops->session_init(inst, inst->session_type, inst->hfi_codec); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + inst->state = INST_INIT; + + return 0; +} +EXPORT_SYMBOL_GPL(hfi_session_init); + +void hfi_session_destroy(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + + mutex_lock(&core->lock); + list_del_init(&inst->list); + if (atomic_dec_and_test(&core->insts_count)) + wake_up_var(&core->insts_count); + mutex_unlock(&core->lock); +} +EXPORT_SYMBOL_GPL(hfi_session_destroy); + +int hfi_session_deinit(struct venus_inst *inst) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + if (inst->state == INST_UNINIT) + return 0; + + if (inst->state < INST_INIT) + return -EINVAL; + + if (test_bit(0, &inst->core->sys_error)) + goto done; + + reinit_completion(&inst->done); + + ret = ops->session_end(inst); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + +done: + inst->state = INST_UNINIT; + + return 0; +} +EXPORT_SYMBOL_GPL(hfi_session_deinit); + +int hfi_session_start(struct venus_inst *inst) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + if (test_bit(0, &inst->core->sys_error)) + return -EIO; + + if (inst->state != INST_LOAD_RESOURCES) + return -EINVAL; + + reinit_completion(&inst->done); + + ret = ops->session_start(inst); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + inst->state = INST_START; + + return 0; +} +EXPORT_SYMBOL_GPL(hfi_session_start); + +int hfi_session_stop(struct venus_inst *inst) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + if (test_bit(0, &inst->core->sys_error)) + return -EIO; + + if (inst->state != INST_START) + return -EINVAL; + + reinit_completion(&inst->done); + + ret = ops->session_stop(inst); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + inst->state = INST_STOP; + + return 0; +} +EXPORT_SYMBOL_GPL(hfi_session_stop); + +int hfi_session_continue(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + + if (test_bit(0, &inst->core->sys_error)) + return -EIO; + + if (core->res->hfi_version == HFI_VERSION_1XX) + return 0; + + return core->ops->session_continue(inst); +} +EXPORT_SYMBOL_GPL(hfi_session_continue); + +int hfi_session_abort(struct venus_inst *inst) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + if (test_bit(0, &inst->core->sys_error)) + return -EIO; + + reinit_completion(&inst->done); + + ret = ops->session_abort(inst); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(hfi_session_abort); + +int hfi_session_load_res(struct venus_inst *inst) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + if (test_bit(0, &inst->core->sys_error)) + return -EIO; + + if (inst->state != INST_INIT) + return -EINVAL; + + reinit_completion(&inst->done); + + ret = ops->session_load_res(inst); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + inst->state = INST_LOAD_RESOURCES; + + return 0; +} + +int hfi_session_unload_res(struct venus_inst *inst) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + if (test_bit(0, &inst->core->sys_error)) + return -EIO; + + if (inst->state != INST_STOP) + return -EINVAL; + + reinit_completion(&inst->done); + + ret = ops->session_release_res(inst); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + inst->state = INST_RELEASE_RESOURCES; + + return 0; +} +EXPORT_SYMBOL_GPL(hfi_session_unload_res); + +int hfi_session_flush(struct venus_inst *inst, u32 type, bool block) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + if (test_bit(0, &inst->core->sys_error)) + return -EIO; + + reinit_completion(&inst->done); + + ret = ops->session_flush(inst, type); + if (ret) + return ret; + + if (block) { + ret = wait_session_msg(inst); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(hfi_session_flush); + +int hfi_session_set_buffers(struct venus_inst *inst, struct hfi_buffer_desc *bd) +{ + const struct hfi_ops *ops = inst->core->ops; + + if (test_bit(0, &inst->core->sys_error)) + return -EIO; + + return ops->session_set_buffers(inst, bd); +} + +int hfi_session_unset_buffers(struct venus_inst *inst, + struct hfi_buffer_desc *bd) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + if (test_bit(0, &inst->core->sys_error)) + return -EIO; + + reinit_completion(&inst->done); + + ret = ops->session_unset_buffers(inst, bd); + if (ret) + return ret; + + if (!bd->response_required) + return 0; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + return 0; +} + +int hfi_session_get_property(struct venus_inst *inst, u32 ptype, + union hfi_get_property *hprop) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + if (test_bit(0, &inst->core->sys_error)) + return -EIO; + + if (inst->state < INST_INIT || inst->state >= INST_STOP) + return -EINVAL; + + reinit_completion(&inst->done); + + ret = ops->session_get_property(inst, ptype); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + *hprop = inst->hprop; + + return 0; +} +EXPORT_SYMBOL_GPL(hfi_session_get_property); + +int hfi_session_set_property(struct venus_inst *inst, u32 ptype, void *pdata) +{ + const struct hfi_ops *ops = inst->core->ops; + + if (test_bit(0, &inst->core->sys_error)) + return -EIO; + + if (inst->state < INST_INIT || inst->state >= INST_STOP) + return -EINVAL; + + return ops->session_set_property(inst, ptype, pdata); +} +EXPORT_SYMBOL_GPL(hfi_session_set_property); + +int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd) +{ + const struct hfi_ops *ops = inst->core->ops; + + if (test_bit(0, &inst->core->sys_error)) + return -EIO; + + if (fd->buffer_type == HFI_BUFFER_INPUT) + return ops->session_etb(inst, fd); + else if (fd->buffer_type == HFI_BUFFER_OUTPUT || + fd->buffer_type == HFI_BUFFER_OUTPUT2) + return ops->session_ftb(inst, fd); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(hfi_session_process_buf); + +irqreturn_t hfi_isr_thread(int irq, void *dev_id) +{ + struct venus_core *core = dev_id; + + return core->ops->isr_thread(core); +} + +irqreturn_t hfi_isr(int irq, void *dev) +{ + struct venus_core *core = dev; + + return core->ops->isr(core); +} + +int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops) +{ + if (!ops) + return -EINVAL; + + atomic_set(&core->insts_count, 0); + core->core_ops = ops; + core->state = CORE_UNINIT; + init_completion(&core->done); + pkt_set_version(core->res->hfi_version); + + return venus_hfi_create(core); +} + +void hfi_destroy(struct venus_core *core) +{ + venus_hfi_destroy(core); +} + +void hfi_reinit(struct venus_core *core) +{ + venus_hfi_queues_reinit(core); +} diff --git a/drivers/media/platform/qcom/venus/hfi.h b/drivers/media/platform/qcom/venus/hfi.h new file mode 100644 index 0000000000..f25d412d65 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#ifndef __HFI_H__ +#define __HFI_H__ + +#include <linux/interrupt.h> + +#include "hfi_helper.h" + +#define VIDC_SESSION_TYPE_VPE 0 +#define VIDC_SESSION_TYPE_ENC 1 +#define VIDC_SESSION_TYPE_DEC 2 + +#define VIDC_RESOURCE_NONE 0 +#define VIDC_RESOURCE_OCMEM 1 +#define VIDC_RESOURCE_VMEM 2 + +struct hfi_buffer_desc { + u32 buffer_type; + u32 buffer_size; + u32 num_buffers; + u32 device_addr; + u32 extradata_addr; + u32 extradata_size; + u32 response_required; +}; + +struct hfi_frame_data { + u32 buffer_type; + u32 device_addr; + u32 extradata_addr; + u64 timestamp; + u32 flags; + u32 offset; + u32 alloc_len; + u32 filled_len; + u32 mark_target; + u32 mark_data; + u32 clnt_data; + u32 extradata_size; +}; + +union hfi_get_property { + struct hfi_profile_level profile_level; + struct hfi_buffer_requirements bufreq[HFI_BUFFER_TYPE_MAX]; +}; + +/* HFI events */ +#define EVT_SYS_EVENT_CHANGE 1 +#define EVT_SYS_WATCHDOG_TIMEOUT 2 +#define EVT_SYS_ERROR 3 +#define EVT_SESSION_ERROR 4 + +/* HFI event callback structure */ +struct hfi_event_data { + u32 error; + u32 height; + u32 width; + u32 event_type; + u32 packet_buffer; + u32 extradata_buffer; + u32 tag; + u32 profile; + u32 level; + /* the following properties start appear from v4 onwards */ + u32 bit_depth; + u32 pic_struct; + u32 colour_space; + u32 entropy_mode; + u32 buf_count; + struct { + u32 left, top; + u32 width, height; + } input_crop; +}; + +/* define core states */ +#define CORE_UNINIT 0 +#define CORE_INIT 1 + +/* define instance states */ +#define INST_UNINIT 2 +#define INST_INIT 3 +#define INST_LOAD_RESOURCES 4 +#define INST_START 5 +#define INST_STOP 6 +#define INST_RELEASE_RESOURCES 7 + +struct venus_core; +struct venus_inst; + +struct hfi_core_ops { + void (*event_notify)(struct venus_core *core, u32 event); +}; + +struct hfi_inst_ops { + void (*buf_done)(struct venus_inst *inst, unsigned int buf_type, + u32 tag, u32 bytesused, u32 data_offset, u32 flags, + u32 hfi_flags, u64 timestamp_us); + void (*event_notify)(struct venus_inst *inst, u32 event, + struct hfi_event_data *data); + void (*flush_done)(struct venus_inst *inst); +}; + +struct hfi_ops { + int (*core_init)(struct venus_core *core); + int (*core_deinit)(struct venus_core *core); + int (*core_ping)(struct venus_core *core, u32 cookie); + int (*core_trigger_ssr)(struct venus_core *core, u32 trigger_type); + + int (*session_init)(struct venus_inst *inst, u32 session_type, + u32 codec); + int (*session_end)(struct venus_inst *inst); + int (*session_abort)(struct venus_inst *inst); + int (*session_flush)(struct venus_inst *inst, u32 flush_mode); + int (*session_start)(struct venus_inst *inst); + int (*session_stop)(struct venus_inst *inst); + int (*session_continue)(struct venus_inst *inst); + int (*session_etb)(struct venus_inst *inst, struct hfi_frame_data *fd); + int (*session_ftb)(struct venus_inst *inst, struct hfi_frame_data *fd); + int (*session_set_buffers)(struct venus_inst *inst, + struct hfi_buffer_desc *bd); + int (*session_unset_buffers)(struct venus_inst *inst, + struct hfi_buffer_desc *bd); + int (*session_load_res)(struct venus_inst *inst); + int (*session_release_res)(struct venus_inst *inst); + int (*session_parse_seq_hdr)(struct venus_inst *inst, u32 seq_hdr, + u32 seq_hdr_len); + int (*session_get_seq_hdr)(struct venus_inst *inst, u32 seq_hdr, + u32 seq_hdr_len); + int (*session_set_property)(struct venus_inst *inst, u32 ptype, + void *pdata); + int (*session_get_property)(struct venus_inst *inst, u32 ptype); + + int (*resume)(struct venus_core *core); + int (*suspend)(struct venus_core *core); + + /* interrupt operations */ + irqreturn_t (*isr)(struct venus_core *core); + irqreturn_t (*isr_thread)(struct venus_core *core); +}; + +int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops); +void hfi_destroy(struct venus_core *core); +void hfi_reinit(struct venus_core *core); + +int hfi_core_init(struct venus_core *core); +int hfi_core_deinit(struct venus_core *core, bool blocking); +int hfi_core_suspend(struct venus_core *core); +int hfi_core_resume(struct venus_core *core, bool force); +int hfi_core_trigger_ssr(struct venus_core *core, u32 type); +int hfi_core_ping(struct venus_core *core); +int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops); +void hfi_session_destroy(struct venus_inst *inst); +int hfi_session_init(struct venus_inst *inst, u32 pixfmt); +int hfi_session_deinit(struct venus_inst *inst); +int hfi_session_start(struct venus_inst *inst); +int hfi_session_stop(struct venus_inst *inst); +int hfi_session_continue(struct venus_inst *inst); +int hfi_session_abort(struct venus_inst *inst); +int hfi_session_load_res(struct venus_inst *inst); +int hfi_session_unload_res(struct venus_inst *inst); +int hfi_session_flush(struct venus_inst *inst, u32 type, bool block); +int hfi_session_set_buffers(struct venus_inst *inst, + struct hfi_buffer_desc *bd); +int hfi_session_unset_buffers(struct venus_inst *inst, + struct hfi_buffer_desc *bd); +int hfi_session_get_property(struct venus_inst *inst, u32 ptype, + union hfi_get_property *hprop); +int hfi_session_set_property(struct venus_inst *inst, u32 ptype, void *pdata); +int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *f); +irqreturn_t hfi_isr_thread(int irq, void *dev_id); +irqreturn_t hfi_isr(int irq, void *dev); + +#endif diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c new file mode 100644 index 0000000000..3418d2dd93 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_cmds.c @@ -0,0 +1,1378 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#include <linux/overflow.h> +#include <linux/errno.h> +#include <linux/hash.h> + +#include "hfi_cmds.h" + +static enum hfi_version hfi_ver; + +void pkt_sys_init(struct hfi_sys_init_pkt *pkt, u32 arch_type) +{ + pkt->hdr.size = sizeof(*pkt); + pkt->hdr.pkt_type = HFI_CMD_SYS_INIT; + pkt->arch_type = arch_type; +} + +void pkt_sys_pc_prep(struct hfi_sys_pc_prep_pkt *pkt) +{ + pkt->hdr.size = sizeof(*pkt); + pkt->hdr.pkt_type = HFI_CMD_SYS_PC_PREP; +} + +void pkt_sys_idle_indicator(struct hfi_sys_set_property_pkt *pkt, u32 enable) +{ + struct hfi_enable *hfi = (struct hfi_enable *)&pkt->data[1]; + + pkt->hdr.size = struct_size(pkt, data, 1) + sizeof(*hfi); + pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY; + pkt->num_properties = 1; + pkt->data[0] = HFI_PROPERTY_SYS_IDLE_INDICATOR; + hfi->enable = enable; +} + +void pkt_sys_debug_config(struct hfi_sys_set_property_pkt *pkt, u32 mode, + u32 config) +{ + struct hfi_debug_config *hfi; + + pkt->hdr.size = struct_size(pkt, data, 1) + sizeof(*hfi); + pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY; + pkt->num_properties = 1; + pkt->data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG; + hfi = (struct hfi_debug_config *)&pkt->data[1]; + hfi->config = config; + hfi->mode = mode; +} + +void pkt_sys_coverage_config(struct hfi_sys_set_property_pkt *pkt, u32 mode) +{ + pkt->hdr.size = struct_size(pkt, data, 2); + pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY; + pkt->num_properties = 1; + pkt->data[0] = HFI_PROPERTY_SYS_CONFIG_COVERAGE; + pkt->data[1] = mode; +} + +void pkt_sys_ubwc_config(struct hfi_sys_set_property_pkt *pkt, const struct hfi_ubwc_config *hfi) +{ + pkt->hdr.size = struct_size(pkt, data, 1) + sizeof(*hfi); + pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY; + pkt->num_properties = 1; + pkt->data[0] = HFI_PROPERTY_SYS_UBWC_CONFIG; + memcpy(&pkt->data[1], hfi, sizeof(*hfi)); +} + +int pkt_sys_set_resource(struct hfi_sys_set_resource_pkt *pkt, u32 id, u32 size, + u32 addr, void *cookie) +{ + pkt->hdr.size = sizeof(*pkt); + pkt->hdr.pkt_type = HFI_CMD_SYS_SET_RESOURCE; + pkt->resource_handle = hash32_ptr(cookie); + + switch (id) { + case VIDC_RESOURCE_OCMEM: + case VIDC_RESOURCE_VMEM: { + struct hfi_resource_ocmem *res = + (struct hfi_resource_ocmem *)&pkt->resource_data[0]; + + res->size = size; + res->mem = addr; + pkt->resource_type = HFI_RESOURCE_OCMEM; + pkt->hdr.size += sizeof(*res); + break; + } + case VIDC_RESOURCE_NONE: + default: + return -ENOTSUPP; + } + + return 0; +} + +int pkt_sys_unset_resource(struct hfi_sys_release_resource_pkt *pkt, u32 id, + u32 size, void *cookie) +{ + pkt->hdr.size = sizeof(*pkt); + pkt->hdr.pkt_type = HFI_CMD_SYS_RELEASE_RESOURCE; + pkt->resource_handle = hash32_ptr(cookie); + + switch (id) { + case VIDC_RESOURCE_OCMEM: + case VIDC_RESOURCE_VMEM: + pkt->resource_type = HFI_RESOURCE_OCMEM; + break; + case VIDC_RESOURCE_NONE: + break; + default: + return -ENOTSUPP; + } + + return 0; +} + +void pkt_sys_ping(struct hfi_sys_ping_pkt *pkt, u32 cookie) +{ + pkt->hdr.size = sizeof(*pkt); + pkt->hdr.pkt_type = HFI_CMD_SYS_PING; + pkt->client_data = cookie; +} + +void pkt_sys_power_control(struct hfi_sys_set_property_pkt *pkt, u32 enable) +{ + struct hfi_enable *hfi = (struct hfi_enable *)&pkt->data[1]; + + pkt->hdr.size = struct_size(pkt, data, 1) + sizeof(*hfi); + pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY; + pkt->num_properties = 1; + pkt->data[0] = HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL; + hfi->enable = enable; +} + +int pkt_sys_ssr_cmd(struct hfi_sys_test_ssr_pkt *pkt, u32 trigger_type) +{ + switch (trigger_type) { + case HFI_TEST_SSR_SW_ERR_FATAL: + case HFI_TEST_SSR_SW_DIV_BY_ZERO: + case HFI_TEST_SSR_HW_WDOG_IRQ: + break; + default: + return -EINVAL; + } + + pkt->hdr.size = sizeof(*pkt); + pkt->hdr.pkt_type = HFI_CMD_SYS_TEST_SSR; + pkt->trigger_type = trigger_type; + + return 0; +} + +void pkt_sys_image_version(struct hfi_sys_get_property_pkt *pkt) +{ + pkt->hdr.size = sizeof(*pkt); + pkt->hdr.pkt_type = HFI_CMD_SYS_GET_PROPERTY; + pkt->num_properties = 1; + pkt->data[0] = HFI_PROPERTY_SYS_IMAGE_VERSION; +} + +int pkt_session_init(struct hfi_session_init_pkt *pkt, void *cookie, + u32 session_type, u32 codec) +{ + if (!pkt || !cookie || !codec) + return -EINVAL; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SYS_SESSION_INIT; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->session_domain = session_type; + pkt->session_codec = codec; + + return 0; +} + +void pkt_session_cmd(struct hfi_session_pkt *pkt, u32 pkt_type, void *cookie) +{ + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = pkt_type; + pkt->shdr.session_id = hash32_ptr(cookie); +} + +int pkt_session_set_buffers(struct hfi_session_set_buffers_pkt *pkt, + void *cookie, struct hfi_buffer_desc *bd) +{ + unsigned int i; + + if (!cookie || !pkt || !bd) + return -EINVAL; + + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_BUFFERS; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->buffer_size = bd->buffer_size; + pkt->min_buffer_size = bd->buffer_size; + pkt->num_buffers = bd->num_buffers; + + if (bd->buffer_type == HFI_BUFFER_OUTPUT || + bd->buffer_type == HFI_BUFFER_OUTPUT2) { + struct hfi_buffer_info *bi; + + pkt->extradata_size = bd->extradata_size; + pkt->shdr.hdr.size = sizeof(*pkt) + + bd->num_buffers * sizeof(*bi); + bi = (struct hfi_buffer_info *)pkt->buffer_info; + for (i = 0; i < pkt->num_buffers; i++) { + bi->buffer_addr = bd->device_addr; + bi->extradata_addr = bd->extradata_addr; + } + } else { + pkt->extradata_size = 0; + pkt->shdr.hdr.size = struct_size(pkt, buffer_info, + bd->num_buffers); + for (i = 0; i < pkt->num_buffers; i++) + pkt->buffer_info[i] = bd->device_addr; + } + + pkt->buffer_type = bd->buffer_type; + + return 0; +} + +int pkt_session_unset_buffers(struct hfi_session_release_buffer_pkt *pkt, + void *cookie, struct hfi_buffer_desc *bd) +{ + unsigned int i; + + if (!cookie || !pkt || !bd) + return -EINVAL; + + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_RELEASE_BUFFERS; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->buffer_size = bd->buffer_size; + pkt->num_buffers = bd->num_buffers; + + if (bd->buffer_type == HFI_BUFFER_OUTPUT || + bd->buffer_type == HFI_BUFFER_OUTPUT2) { + struct hfi_buffer_info *bi; + + bi = (struct hfi_buffer_info *)pkt->buffer_info; + for (i = 0; i < pkt->num_buffers; i++) { + bi->buffer_addr = bd->device_addr; + bi->extradata_addr = bd->extradata_addr; + } + pkt->shdr.hdr.size = + sizeof(struct hfi_session_set_buffers_pkt) + + bd->num_buffers * sizeof(*bi); + } else { + for (i = 0; i < pkt->num_buffers; i++) + pkt->buffer_info[i] = bd->device_addr; + + pkt->extradata_size = 0; + pkt->shdr.hdr.size = + struct_size_t(struct hfi_session_set_buffers_pkt, + buffer_info, bd->num_buffers); + } + + pkt->response_req = bd->response_required; + pkt->buffer_type = bd->buffer_type; + + return 0; +} + +int pkt_session_etb_decoder(struct hfi_session_empty_buffer_compressed_pkt *pkt, + void *cookie, struct hfi_frame_data *in_frame) +{ + if (!cookie) + return -EINVAL; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->time_stamp_hi = upper_32_bits(in_frame->timestamp); + pkt->time_stamp_lo = lower_32_bits(in_frame->timestamp); + pkt->flags = in_frame->flags; + pkt->mark_target = in_frame->mark_target; + pkt->mark_data = in_frame->mark_data; + pkt->offset = in_frame->offset; + pkt->alloc_len = in_frame->alloc_len; + pkt->filled_len = in_frame->filled_len; + pkt->input_tag = in_frame->clnt_data; + pkt->packet_buffer = in_frame->device_addr; + + return 0; +} + +int pkt_session_etb_encoder( + struct hfi_session_empty_buffer_uncompressed_plane0_pkt *pkt, + void *cookie, struct hfi_frame_data *in_frame) +{ + if (!cookie || !in_frame->device_addr) + return -EINVAL; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->view_id = 0; + pkt->time_stamp_hi = upper_32_bits(in_frame->timestamp); + pkt->time_stamp_lo = lower_32_bits(in_frame->timestamp); + pkt->flags = in_frame->flags; + pkt->mark_target = in_frame->mark_target; + pkt->mark_data = in_frame->mark_data; + pkt->offset = in_frame->offset; + pkt->alloc_len = in_frame->alloc_len; + pkt->filled_len = in_frame->filled_len; + pkt->input_tag = in_frame->clnt_data; + pkt->packet_buffer = in_frame->device_addr; + pkt->extradata_buffer = in_frame->extradata_addr; + + return 0; +} + +int pkt_session_ftb(struct hfi_session_fill_buffer_pkt *pkt, void *cookie, + struct hfi_frame_data *out_frame) +{ + if (!cookie || !out_frame || !out_frame->device_addr) + return -EINVAL; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_FILL_BUFFER; + pkt->shdr.session_id = hash32_ptr(cookie); + + if (out_frame->buffer_type == HFI_BUFFER_OUTPUT) + pkt->stream_id = 0; + else if (out_frame->buffer_type == HFI_BUFFER_OUTPUT2) + pkt->stream_id = 1; + + pkt->output_tag = out_frame->clnt_data; + pkt->packet_buffer = out_frame->device_addr; + pkt->extradata_buffer = out_frame->extradata_addr; + pkt->alloc_len = out_frame->alloc_len; + pkt->filled_len = out_frame->filled_len; + pkt->offset = out_frame->offset; + pkt->data[0] = out_frame->extradata_size; + + return 0; +} + +int pkt_session_parse_seq_header( + struct hfi_session_parse_sequence_header_pkt *pkt, + void *cookie, u32 seq_hdr, u32 seq_hdr_len) +{ + if (!cookie || !seq_hdr || !seq_hdr_len) + return -EINVAL; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->header_len = seq_hdr_len; + pkt->packet_buffer = seq_hdr; + + return 0; +} + +int pkt_session_get_seq_hdr(struct hfi_session_get_sequence_header_pkt *pkt, + void *cookie, u32 seq_hdr, u32 seq_hdr_len) +{ + if (!cookie || !seq_hdr || !seq_hdr_len) + return -EINVAL; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_SEQUENCE_HEADER; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->buffer_len = seq_hdr_len; + pkt->packet_buffer = seq_hdr; + + return 0; +} + +int pkt_session_flush(struct hfi_session_flush_pkt *pkt, void *cookie, u32 type) +{ + switch (type) { + case HFI_FLUSH_INPUT: + case HFI_FLUSH_OUTPUT: + case HFI_FLUSH_OUTPUT2: + case HFI_FLUSH_ALL: + break; + default: + return -EINVAL; + } + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->flush_type = type; + + return 0; +} + +static int pkt_session_get_property_1x(struct hfi_session_get_property_pkt *pkt, + void *cookie, u32 ptype) +{ + switch (ptype) { + case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: + case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS: + break; + default: + return -EINVAL; + } + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_PROPERTY; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->num_properties = 1; + pkt->data[0] = ptype; + + return 0; +} + +static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt, + void *cookie, u32 ptype, void *pdata) +{ + void *prop_data; + int ret = 0; + + if (!pkt || !cookie || !pdata) + return -EINVAL; + + prop_data = &pkt->data[1]; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->num_properties = 1; + pkt->data[0] = ptype; + + switch (ptype) { + case HFI_PROPERTY_CONFIG_FRAME_RATE: { + struct hfi_framerate *in = pdata, *frate = prop_data; + + frate->buffer_type = in->buffer_type; + frate->framerate = in->framerate; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*frate); + break; + } + case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT: { + struct hfi_uncompressed_format_select *in = pdata; + struct hfi_uncompressed_format_select *hfi = prop_data; + + hfi->buffer_type = in->buffer_type; + hfi->format = in->format; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi); + break; + } + case HFI_PROPERTY_PARAM_FRAME_SIZE: { + struct hfi_framesize *in = pdata, *fsize = prop_data; + + fsize->buffer_type = in->buffer_type; + fsize->height = in->height; + fsize->width = in->width; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*fsize); + break; + } + case HFI_PROPERTY_CONFIG_REALTIME: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL: { + struct hfi_buffer_count_actual *in = pdata, *count = prop_data; + + count->count_actual = in->count_actual; + count->type = in->type; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count); + break; + } + case HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL: { + struct hfi_buffer_size_actual *in = pdata, *sz = prop_data; + + sz->size = in->size; + sz->type = in->type; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*sz); + break; + } + case HFI_PROPERTY_PARAM_BUFFER_DISPLAY_HOLD_COUNT_ACTUAL: { + struct hfi_buffer_display_hold_count_actual *in = pdata; + struct hfi_buffer_display_hold_count_actual *count = prop_data; + + count->hold_count = in->hold_count; + count->type = in->type; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count); + break; + } + case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT: { + struct hfi_nal_stream_format_select *in = pdata; + struct hfi_nal_stream_format_select *fmt = prop_data; + + fmt->format = in->format; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*fmt); + break; + } + case HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER: { + u32 *in = pdata; + + switch (*in) { + case HFI_OUTPUT_ORDER_DECODE: + case HFI_OUTPUT_ORDER_DISPLAY: + break; + default: + ret = -EINVAL; + break; + } + + pkt->data[1] = *in; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE: { + struct hfi_enable_picture *in = pdata, *en = prop_data; + + en->picture_type = in->picture_type; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VDEC_ENABLE_SUFFICIENT_SEQCHANGE_EVENT: + case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER: { + struct hfi_enable *in = pdata; + struct hfi_enable *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM: { + struct hfi_multi_stream *in = pdata, *multi = prop_data; + + multi->buffer_type = in->buffer_type; + multi->enable = in->enable; + multi->width = in->width; + multi->height = in->height; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi); + break; + } + case HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT: { + struct hfi_display_picture_buffer_count *in = pdata; + struct hfi_display_picture_buffer_count *count = prop_data; + + count->count = in->count; + count->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count); + break; + } + case HFI_PROPERTY_PARAM_DIVX_FORMAT: { + u32 *in = pdata; + + switch (*in) { + case HFI_DIVX_FORMAT_4: + case HFI_DIVX_FORMAT_5: + case HFI_DIVX_FORMAT_6: + break; + default: + ret = -EINVAL; + break; + } + + pkt->data[1] = *in; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME: + pkt->shdr.hdr.size += sizeof(u32); + break; + case HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER: + break; + case HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION: + break; + case HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE: { + struct hfi_bitrate *in = pdata, *brate = prop_data; + + brate->bitrate = in->bitrate; + brate->layer_id = in->layer_id; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*brate); + break; + } + case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE: { + struct hfi_bitrate *in = pdata, *hfi = prop_data; + + hfi->bitrate = in->bitrate; + hfi->layer_id = in->layer_id; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi); + break; + } + case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: { + struct hfi_profile_level *in = pdata, *pl = prop_data; + + pl->level = in->level; + pl->profile = in->profile; + if (pl->profile <= 0) + /* Profile not supported, falling back to high */ + pl->profile = HFI_H264_PROFILE_HIGH; + + if (!pl->level) + /* Level not supported, falling back to 1 */ + pl->level = 1; + + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*pl); + break; + } + case HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL: { + struct hfi_h264_entropy_control *in = pdata, *hfi = prop_data; + + hfi->entropy_mode = in->entropy_mode; + if (hfi->entropy_mode == HFI_H264_ENTROPY_CABAC) + hfi->cabac_model = in->cabac_model; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi); + break; + } + case HFI_PROPERTY_PARAM_VENC_RATE_CONTROL: { + u32 *in = pdata; + + switch (*in) { + case HFI_RATE_CONTROL_OFF: + case HFI_RATE_CONTROL_CBR_CFR: + case HFI_RATE_CONTROL_CBR_VFR: + case HFI_RATE_CONTROL_VBR_CFR: + case HFI_RATE_CONTROL_VBR_VFR: + case HFI_RATE_CONTROL_CQ: + break; + default: + ret = -EINVAL; + break; + } + + pkt->data[1] = *in; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION: { + struct hfi_mpeg4_time_resolution *in = pdata, *res = prop_data; + + res->time_increment_resolution = in->time_increment_resolution; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*res); + break; + } + case HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION: { + struct hfi_mpeg4_header_extension *in = pdata, *ext = prop_data; + + ext->header_extension = in->header_extension; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ext); + break; + } + case HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL: { + struct hfi_h264_db_control *in = pdata, *db = prop_data; + + switch (in->mode) { + case HFI_H264_DB_MODE_DISABLE: + case HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY: + case HFI_H264_DB_MODE_ALL_BOUNDARY: + break; + default: + ret = -EINVAL; + break; + } + + db->mode = in->mode; + db->slice_alpha_offset = in->slice_alpha_offset; + db->slice_beta_offset = in->slice_beta_offset; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*db); + break; + } + case HFI_PROPERTY_PARAM_VENC_SESSION_QP: { + struct hfi_quantization *in = pdata, *quant = prop_data; + + quant->qp_i = in->qp_i; + quant->qp_p = in->qp_p; + quant->qp_b = in->qp_b; + quant->layer_id = in->layer_id; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*quant); + break; + } + case HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE: { + struct hfi_quantization_range *in = pdata, *range = prop_data; + u32 min_qp, max_qp; + + min_qp = in->min_qp; + max_qp = in->max_qp; + + /* We'll be packing in the qp, so make sure we + * won't be losing data when masking + */ + if (min_qp > 0xff || max_qp > 0xff) { + ret = -ERANGE; + break; + } + + /* When creating the packet, pack the qp value as + * 0xiippbb, where ii = qp range for I-frames, + * pp = qp range for P-frames, etc. + */ + range->min_qp = min_qp | min_qp << 8 | min_qp << 16; + range->max_qp = max_qp | max_qp << 8 | max_qp << 16; + range->layer_id = in->layer_id; + + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*range); + break; + } + case HFI_PROPERTY_PARAM_VENC_VC1_PERF_CFG: { + struct hfi_vc1e_perf_cfg_type *in = pdata, *perf = prop_data; + + memcpy(perf->search_range_x_subsampled, + in->search_range_x_subsampled, + sizeof(perf->search_range_x_subsampled)); + memcpy(perf->search_range_y_subsampled, + in->search_range_y_subsampled, + sizeof(perf->search_range_y_subsampled)); + + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*perf); + break; + } + case HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES: { + struct hfi_max_num_b_frames *bframes = prop_data; + u32 *in = pdata; + + bframes->max_num_b_frames = *in; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*bframes); + break; + } + case HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD: { + struct hfi_intra_period *in = pdata, *intra = prop_data; + + intra->pframes = in->pframes; + intra->bframes = in->bframes; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra); + break; + } + case HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD: { + struct hfi_idr_period *in = pdata, *idr = prop_data; + + idr->idr_period = in->idr_period; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*idr); + break; + } + case HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR: { + struct hfi_conceal_color *color = prop_data; + u32 *in = pdata; + + color->conceal_color = *in & 0xff; + color->conceal_color |= ((*in >> 10) & 0xff) << 8; + color->conceal_color |= ((*in >> 20) & 0xff) << 16; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*color); + break; + } + case HFI_PROPERTY_CONFIG_VPE_OPERATIONS: { + struct hfi_operations_type *in = pdata, *ops = prop_data; + + switch (in->rotation) { + case HFI_ROTATE_NONE: + case HFI_ROTATE_90: + case HFI_ROTATE_180: + case HFI_ROTATE_270: + break; + default: + ret = -EINVAL; + break; + } + + switch (in->flip) { + case HFI_FLIP_NONE: + case HFI_FLIP_HORIZONTAL: + case HFI_FLIP_VERTICAL: + break; + default: + ret = -EINVAL; + break; + } + + ops->rotation = in->rotation; + ops->flip = in->flip; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ops); + break; + } + case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH: { + struct hfi_intra_refresh *in = pdata, *intra = prop_data; + + switch (in->mode) { + case HFI_INTRA_REFRESH_NONE: + case HFI_INTRA_REFRESH_ADAPTIVE: + case HFI_INTRA_REFRESH_CYCLIC: + case HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE: + case HFI_INTRA_REFRESH_RANDOM: + break; + default: + ret = -EINVAL; + break; + } + + intra->mode = in->mode; + intra->air_mbs = in->air_mbs; + intra->air_ref = in->air_ref; + intra->cir_mbs = in->cir_mbs; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra); + break; + } + case HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL: { + struct hfi_multi_slice_control *in = pdata, *multi = prop_data; + + switch (in->multi_slice) { + case HFI_MULTI_SLICE_OFF: + case HFI_MULTI_SLICE_GOB: + case HFI_MULTI_SLICE_BY_MB_COUNT: + case HFI_MULTI_SLICE_BY_BYTE_COUNT: + break; + default: + ret = -EINVAL; + break; + } + + multi->multi_slice = in->multi_slice; + multi->slice_size = in->slice_size; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi); + break; + } + case HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO: { + struct hfi_h264_vui_timing_info *in = pdata, *vui = prop_data; + + vui->enable = in->enable; + vui->fixed_framerate = in->fixed_framerate; + vui->time_scale = in->time_scale; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*vui); + break; + } + case HFI_PROPERTY_CONFIG_VPE_DEINTERLACE: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE: { + struct hfi_buffer_alloc_mode *in = pdata, *mode = prop_data; + + mode->type = in->type; + mode->mode = in->mode; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*mode); + break; + } + case HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VDEC_SCS_THRESHOLD: { + struct hfi_scs_threshold *thres = prop_data; + u32 *in = pdata; + + thres->threshold_value = *in; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*thres); + break; + } + case HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT: { + struct hfi_mvc_buffer_layout_descp_type *in = pdata; + struct hfi_mvc_buffer_layout_descp_type *mvc = prop_data; + + switch (in->layout_type) { + case HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM: + case HFI_MVC_BUFFER_LAYOUT_SEQ: + break; + default: + ret = -EINVAL; + break; + } + + mvc->layout_type = in->layout_type; + mvc->bright_view_first = in->bright_view_first; + mvc->ngap = in->ngap; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*mvc); + break; + } + case HFI_PROPERTY_PARAM_VENC_LTRMODE: { + struct hfi_ltr_mode *in = pdata, *ltr = prop_data; + + switch (in->ltr_mode) { + case HFI_LTR_MODE_DISABLE: + case HFI_LTR_MODE_MANUAL: + case HFI_LTR_MODE_PERIODIC: + break; + default: + ret = -EINVAL; + break; + } + + ltr->ltr_mode = in->ltr_mode; + ltr->ltr_count = in->ltr_count; + ltr->trust_mode = in->trust_mode; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr); + break; + } + case HFI_PROPERTY_CONFIG_VENC_USELTRFRAME: { + struct hfi_ltr_use *in = pdata, *ltr_use = prop_data; + + ltr_use->frames = in->frames; + ltr_use->ref_ltr = in->ref_ltr; + ltr_use->use_constrnt = in->use_constrnt; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr_use); + break; + } + case HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME: { + struct hfi_ltr_mark *in = pdata, *ltr_mark = prop_data; + + ltr_mark->mark_frame = in->mark_frame; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr_mark); + break; + } + case HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER: { + u32 *in = pdata; + + pkt->data[1] = *in; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER: { + u32 *in = pdata; + + pkt->data[1] = *in; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VENC_INITIAL_QP: { + struct hfi_initial_quantization *in = pdata, *quant = prop_data; + + quant->init_qp_enable = in->init_qp_enable; + quant->qp_i = in->qp_i; + quant->qp_p = in->qp_p; + quant->qp_b = in->qp_b; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*quant); + break; + } + case HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION: { + struct hfi_vpe_color_space_conversion *in = pdata; + struct hfi_vpe_color_space_conversion *csc = prop_data; + + memcpy(csc->csc_matrix, in->csc_matrix, + sizeof(csc->csc_matrix)); + memcpy(csc->csc_bias, in->csc_bias, sizeof(csc->csc_bias)); + memcpy(csc->csc_limit, in->csc_limit, sizeof(csc->csc_limit)); + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*csc); + break; + } + case HFI_PROPERTY_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_CONFIG_VENC_PERF_MODE: { + u32 *in = pdata; + + pkt->data[1] = *in; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER: { + u32 *in = pdata; + + pkt->data[1] = *in; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_PARAM_VDEC_NONCP_OUTPUT2: { + struct hfi_enable *in = pdata, *en = prop_data; + + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE: { + struct hfi_hybrid_hierp *in = pdata, *hierp = prop_data; + + hierp->layers = in->layers; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hierp); + break; + } + case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: { + struct hfi_uncompressed_plane_actual_info *in = pdata; + struct hfi_uncompressed_plane_actual_info *info = prop_data; + + info->buffer_type = in->buffer_type; + info->num_planes = in->num_planes; + info->plane_format[0] = in->plane_format[0]; + if (in->num_planes > 1) + info->plane_format[1] = in->plane_format[1]; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info); + break; + } + case HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI: + return -ENOTSUPP; + + /* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */ + case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS: + case HFI_PROPERTY_CONFIG_PRIORITY: + case HFI_PROPERTY_CONFIG_BATCH_INFO: + case HFI_PROPERTY_SYS_IDLE_INDICATOR: + case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED: + case HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED: + case HFI_PROPERTY_PARAM_CHROMA_SITE: + case HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED: + case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED: + case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED: + case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED: + case HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT: + case HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE: + case HFI_PROPERTY_PARAM_CODEC_SUPPORTED: + case HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT: + case HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION: + case HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB: + case HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING: + case HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO: + default: + return -EINVAL; + } + + return ret; +} + +static int +pkt_session_get_property_3xx(struct hfi_session_get_property_pkt *pkt, + void *cookie, u32 ptype) +{ + int ret = 0; + + if (!pkt || !cookie) + return -EINVAL; + + pkt->shdr.hdr.size = sizeof(struct hfi_session_get_property_pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_PROPERTY; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->num_properties = 1; + + switch (ptype) { + case HFI_PROPERTY_CONFIG_VDEC_ENTROPY: + pkt->data[0] = HFI_PROPERTY_CONFIG_VDEC_ENTROPY; + break; + default: + ret = pkt_session_get_property_1x(pkt, cookie, ptype); + break; + } + + return ret; +} + +static int +pkt_session_set_property_3xx(struct hfi_session_set_property_pkt *pkt, + void *cookie, u32 ptype, void *pdata) +{ + void *prop_data; + int ret = 0; + + if (!pkt || !cookie || !pdata) + return -EINVAL; + + prop_data = &pkt->data[1]; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->num_properties = 1; + pkt->data[0] = ptype; + + /* + * Any session set property which is different in 3XX packetization + * should be added as a new case below. All unchanged session set + * properties will be handled in the default case. + */ + switch (ptype) { + case HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM: { + struct hfi_multi_stream *in = pdata; + struct hfi_multi_stream_3x *multi = prop_data; + + multi->buffer_type = in->buffer_type; + multi->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi); + break; + } + case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH: { + struct hfi_intra_refresh *in = pdata; + struct hfi_intra_refresh_3x *intra = prop_data; + + switch (in->mode) { + case HFI_INTRA_REFRESH_NONE: + case HFI_INTRA_REFRESH_ADAPTIVE: + case HFI_INTRA_REFRESH_CYCLIC: + case HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE: + case HFI_INTRA_REFRESH_RANDOM: + break; + default: + ret = -EINVAL; + break; + } + + intra->mode = in->mode; + intra->mbs = in->cir_mbs; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra); + break; + } + case HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER: + /* for 3xx fw version session_continue is used */ + break; + default: + ret = pkt_session_set_property_1x(pkt, cookie, ptype, pdata); + break; + } + + return ret; +} + +static int +pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt, + void *cookie, u32 ptype, void *pdata) +{ + void *prop_data; + + if (!pkt || !cookie || !pdata) + return -EINVAL; + + prop_data = &pkt->data[1]; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->num_properties = 1; + pkt->data[0] = ptype; + + /* + * Any session set property which is different in 3XX packetization + * should be added as a new case below. All unchanged session set + * properties will be handled in the default case. + */ + switch (ptype) { + case HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL: { + struct hfi_buffer_count_actual *in = pdata; + struct hfi_buffer_count_actual_4xx *count = prop_data; + + count->count_actual = in->count_actual; + count->type = in->type; + count->count_min_host = in->count_actual; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count); + break; + } + case HFI_PROPERTY_PARAM_WORK_MODE: { + struct hfi_video_work_mode *in = pdata, *wm = prop_data; + + wm->video_work_mode = in->video_work_mode; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*wm); + break; + } + case HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE: { + struct hfi_videocores_usage_type *in = pdata, *cu = prop_data; + + cu->video_core_enable_mask = in->video_core_enable_mask; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cu); + break; + } + case HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI: { + struct hfi_hdr10_pq_sei *in = pdata, *hdr10 = prop_data; + + memcpy(hdr10, in, sizeof(*hdr10)); + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hdr10); + break; + } + case HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR: { + struct hfi_conceal_color_v4 *color = prop_data; + u32 *in = pdata; + + color->conceal_color_8bit = *in & 0xff; + color->conceal_color_8bit |= ((*in >> 10) & 0xff) << 8; + color->conceal_color_8bit |= ((*in >> 20) & 0xff) << 16; + color->conceal_color_10bit = *in; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*color); + break; + } + + case HFI_PROPERTY_PARAM_VENC_H264_TRANSFORM_8X8: { + struct hfi_h264_8x8_transform *in = pdata, *tm = prop_data; + + tm->enable_type = in->enable_type; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*tm); + break; + } + case HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2: { + struct hfi_quantization_range_v2 *in = pdata, *range = prop_data; + u32 min_qp, max_qp; + + min_qp = in->min_qp.qp_packed; + max_qp = in->max_qp.qp_packed; + + /* We'll be packing in the qp, so make sure we + * won't be losing data when masking + */ + if (min_qp > 0xff || max_qp > 0xff) + return -ERANGE; + + range->min_qp.layer_id = 0xFF; + range->max_qp.layer_id = 0xFF; + range->min_qp.qp_packed = (min_qp & 0xFF) | ((min_qp & 0xFF) << 8) | + ((min_qp & 0xFF) << 16); + range->max_qp.qp_packed = (max_qp & 0xFF) | ((max_qp & 0xFF) << 8) | + ((max_qp & 0xFF) << 16); + range->min_qp.enable = 7; + range->max_qp.enable = 7; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*range); + break; + } + case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE: + case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER: + case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE: + case HFI_PROPERTY_PARAM_VENC_SESSION_QP: + case HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE: + /* not implemented on Venus 4xx */ + return -ENOTSUPP; + default: + return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata); + } + + return 0; +} + +static int +pkt_session_set_property_6xx(struct hfi_session_set_property_pkt *pkt, + void *cookie, u32 ptype, void *pdata) +{ + void *prop_data; + + if (!pkt || !cookie || !pdata) + return -EINVAL; + + prop_data = &pkt->data[1]; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->num_properties = 1; + pkt->data[0] = ptype; + + switch (ptype) { + case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO: { + struct hfi_uncompressed_plane_actual_constraints_info *in = pdata; + struct hfi_uncompressed_plane_actual_constraints_info *info = prop_data; + + info->buffer_type = in->buffer_type; + info->num_planes = in->num_planes; + info->plane_format[0] = in->plane_format[0]; + if (in->num_planes > 1) + info->plane_format[1] = in->plane_format[1]; + + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info); + break; + } + case HFI_PROPERTY_CONFIG_HEIC_FRAME_QUALITY: { + struct hfi_heic_frame_quality *in = pdata, *cq = prop_data; + + cq->frame_quality = in->frame_quality; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cq); + break; + } + case HFI_PROPERTY_PARAM_WORK_ROUTE: { + struct hfi_video_work_route *in = pdata, *wr = prop_data; + + wr->video_work_route = in->video_work_route; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*wr); + break; + } + default: + return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata); + } + + return 0; +} + +int pkt_session_get_property(struct hfi_session_get_property_pkt *pkt, + void *cookie, u32 ptype) +{ + if (hfi_ver == HFI_VERSION_1XX) + return pkt_session_get_property_1x(pkt, cookie, ptype); + + return pkt_session_get_property_3xx(pkt, cookie, ptype); +} + +int pkt_session_set_property(struct hfi_session_set_property_pkt *pkt, + void *cookie, u32 ptype, void *pdata) +{ + if (hfi_ver == HFI_VERSION_1XX) + return pkt_session_set_property_1x(pkt, cookie, ptype, pdata); + + if (hfi_ver == HFI_VERSION_3XX) + return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata); + + if (hfi_ver == HFI_VERSION_4XX) + return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata); + + return pkt_session_set_property_6xx(pkt, cookie, ptype, pdata); +} + +void pkt_set_version(enum hfi_version version) +{ + hfi_ver = version; +} diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.h b/drivers/media/platform/qcom/venus/hfi_cmds.h new file mode 100644 index 0000000000..dd9c506644 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_cmds.h @@ -0,0 +1,296 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#ifndef __VENUS_HFI_CMDS_H__ +#define __VENUS_HFI_CMDS_H__ + +#include "hfi.h" + +/* commands */ +#define HFI_CMD_SYS_INIT 0x10001 +#define HFI_CMD_SYS_PC_PREP 0x10002 +#define HFI_CMD_SYS_SET_RESOURCE 0x10003 +#define HFI_CMD_SYS_RELEASE_RESOURCE 0x10004 +#define HFI_CMD_SYS_SET_PROPERTY 0x10005 +#define HFI_CMD_SYS_GET_PROPERTY 0x10006 +#define HFI_CMD_SYS_SESSION_INIT 0x10007 +#define HFI_CMD_SYS_SESSION_END 0x10008 +#define HFI_CMD_SYS_SET_BUFFERS 0x10009 +#define HFI_CMD_SYS_TEST_SSR 0x10101 + +#define HFI_CMD_SESSION_SET_PROPERTY 0x11001 +#define HFI_CMD_SESSION_SET_BUFFERS 0x11002 +#define HFI_CMD_SESSION_GET_SEQUENCE_HEADER 0x11003 + +#define HFI_CMD_SYS_SESSION_ABORT 0x210001 +#define HFI_CMD_SYS_PING 0x210002 + +#define HFI_CMD_SESSION_LOAD_RESOURCES 0x211001 +#define HFI_CMD_SESSION_START 0x211002 +#define HFI_CMD_SESSION_STOP 0x211003 +#define HFI_CMD_SESSION_EMPTY_BUFFER 0x211004 +#define HFI_CMD_SESSION_FILL_BUFFER 0x211005 +#define HFI_CMD_SESSION_SUSPEND 0x211006 +#define HFI_CMD_SESSION_RESUME 0x211007 +#define HFI_CMD_SESSION_FLUSH 0x211008 +#define HFI_CMD_SESSION_GET_PROPERTY 0x211009 +#define HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER 0x21100a +#define HFI_CMD_SESSION_RELEASE_BUFFERS 0x21100b +#define HFI_CMD_SESSION_RELEASE_RESOURCES 0x21100c +#define HFI_CMD_SESSION_CONTINUE 0x21100d +#define HFI_CMD_SESSION_SYNC 0x21100e + +/* command packets */ +struct hfi_sys_init_pkt { + struct hfi_pkt_hdr hdr; + u32 arch_type; +}; + +struct hfi_sys_pc_prep_pkt { + struct hfi_pkt_hdr hdr; +}; + +struct hfi_sys_set_resource_pkt { + struct hfi_pkt_hdr hdr; + u32 resource_handle; + u32 resource_type; + u32 resource_data[]; +}; + +struct hfi_sys_release_resource_pkt { + struct hfi_pkt_hdr hdr; + u32 resource_type; + u32 resource_handle; +}; + +struct hfi_sys_set_property_pkt { + struct hfi_pkt_hdr hdr; + u32 num_properties; + u32 data[]; +}; + +struct hfi_sys_get_property_pkt { + struct hfi_pkt_hdr hdr; + u32 num_properties; + u32 data[1]; +}; + +struct hfi_sys_set_buffers_pkt { + struct hfi_pkt_hdr hdr; + u32 buffer_type; + u32 buffer_size; + u32 num_buffers; + u32 buffer_addr[1]; +}; + +struct hfi_sys_ping_pkt { + struct hfi_pkt_hdr hdr; + u32 client_data; +}; + +struct hfi_session_init_pkt { + struct hfi_session_hdr_pkt shdr; + u32 session_domain; + u32 session_codec; +}; + +struct hfi_session_end_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_abort_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_set_property_pkt { + struct hfi_session_hdr_pkt shdr; + u32 num_properties; + u32 data[]; +}; + +struct hfi_session_set_buffers_pkt { + struct hfi_session_hdr_pkt shdr; + u32 buffer_type; + u32 buffer_size; + u32 extradata_size; + u32 min_buffer_size; + u32 num_buffers; + u32 buffer_info[]; +}; + +struct hfi_session_get_sequence_header_pkt { + struct hfi_session_hdr_pkt shdr; + u32 buffer_len; + u32 packet_buffer; +}; + +struct hfi_session_load_resources_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_start_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_stop_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_empty_buffer_compressed_pkt { + struct hfi_session_hdr_pkt shdr; + u32 time_stamp_hi; + u32 time_stamp_lo; + u32 flags; + u32 mark_target; + u32 mark_data; + u32 offset; + u32 alloc_len; + u32 filled_len; + u32 input_tag; + u32 packet_buffer; + u32 extradata_buffer; + u32 data[1]; +}; + +struct hfi_session_empty_buffer_uncompressed_plane0_pkt { + struct hfi_session_hdr_pkt shdr; + u32 view_id; + u32 time_stamp_hi; + u32 time_stamp_lo; + u32 flags; + u32 mark_target; + u32 mark_data; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 input_tag; + u32 packet_buffer; + u32 extradata_buffer; + u32 data[1]; +}; + +struct hfi_session_empty_buffer_uncompressed_plane1_pkt { + u32 flags; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 packet_buffer2; + u32 data[1]; +}; + +struct hfi_session_empty_buffer_uncompressed_plane2_pkt { + u32 flags; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 packet_buffer3; + u32 data[1]; +}; + +struct hfi_session_fill_buffer_pkt { + struct hfi_session_hdr_pkt shdr; + u32 stream_id; + u32 offset; + u32 alloc_len; + u32 filled_len; + u32 output_tag; + u32 packet_buffer; + u32 extradata_buffer; + u32 data[1]; +}; + +struct hfi_session_flush_pkt { + struct hfi_session_hdr_pkt shdr; + u32 flush_type; +}; + +struct hfi_session_suspend_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_resume_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_get_property_pkt { + struct hfi_session_hdr_pkt shdr; + u32 num_properties; + u32 data[1]; +}; + +struct hfi_session_release_buffer_pkt { + struct hfi_session_hdr_pkt shdr; + u32 buffer_type; + u32 buffer_size; + u32 extradata_size; + u32 response_req; + u32 num_buffers; + u32 buffer_info[1]; +}; + +struct hfi_session_release_resources_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_parse_sequence_header_pkt { + struct hfi_session_hdr_pkt shdr; + u32 header_len; + u32 packet_buffer; +}; + +struct hfi_sfr { + u32 buf_size; + u8 data[1]; +}; + +struct hfi_sys_test_ssr_pkt { + struct hfi_pkt_hdr hdr; + u32 trigger_type; +}; + +void pkt_set_version(enum hfi_version version); + +void pkt_sys_init(struct hfi_sys_init_pkt *pkt, u32 arch_type); +void pkt_sys_pc_prep(struct hfi_sys_pc_prep_pkt *pkt); +void pkt_sys_idle_indicator(struct hfi_sys_set_property_pkt *pkt, u32 enable); +void pkt_sys_power_control(struct hfi_sys_set_property_pkt *pkt, u32 enable); +void pkt_sys_ubwc_config(struct hfi_sys_set_property_pkt *pkt, const struct hfi_ubwc_config *hfi); +int pkt_sys_set_resource(struct hfi_sys_set_resource_pkt *pkt, u32 id, u32 size, + u32 addr, void *cookie); +int pkt_sys_unset_resource(struct hfi_sys_release_resource_pkt *pkt, u32 id, + u32 size, void *cookie); +void pkt_sys_debug_config(struct hfi_sys_set_property_pkt *pkt, u32 mode, + u32 config); +void pkt_sys_coverage_config(struct hfi_sys_set_property_pkt *pkt, u32 mode); +void pkt_sys_ping(struct hfi_sys_ping_pkt *pkt, u32 cookie); +void pkt_sys_image_version(struct hfi_sys_get_property_pkt *pkt); +int pkt_sys_ssr_cmd(struct hfi_sys_test_ssr_pkt *pkt, u32 trigger_type); +int pkt_session_init(struct hfi_session_init_pkt *pkt, void *cookie, + u32 session_type, u32 codec); +void pkt_session_cmd(struct hfi_session_pkt *pkt, u32 pkt_type, void *cookie); +int pkt_session_set_buffers(struct hfi_session_set_buffers_pkt *pkt, + void *cookie, struct hfi_buffer_desc *bd); +int pkt_session_unset_buffers(struct hfi_session_release_buffer_pkt *pkt, + void *cookie, struct hfi_buffer_desc *bd); +int pkt_session_etb_decoder(struct hfi_session_empty_buffer_compressed_pkt *pkt, + void *cookie, struct hfi_frame_data *input_frame); +int pkt_session_etb_encoder( + struct hfi_session_empty_buffer_uncompressed_plane0_pkt *pkt, + void *cookie, struct hfi_frame_data *input_frame); +int pkt_session_ftb(struct hfi_session_fill_buffer_pkt *pkt, + void *cookie, struct hfi_frame_data *output_frame); +int pkt_session_parse_seq_header( + struct hfi_session_parse_sequence_header_pkt *pkt, + void *cookie, u32 seq_hdr, u32 seq_hdr_len); +int pkt_session_get_seq_hdr(struct hfi_session_get_sequence_header_pkt *pkt, + void *cookie, u32 seq_hdr, u32 seq_hdr_len); +int pkt_session_flush(struct hfi_session_flush_pkt *pkt, void *cookie, + u32 flush_mode); +int pkt_session_get_property(struct hfi_session_get_property_pkt *pkt, + void *cookie, u32 ptype); +int pkt_session_set_property(struct hfi_session_set_property_pkt *pkt, + void *cookie, u32 ptype, void *pdata); + +#endif diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h new file mode 100644 index 0000000000..e4c05d62cf --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_helper.h @@ -0,0 +1,1302 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#ifndef __VENUS_HFI_HELPER_H__ +#define __VENUS_HFI_HELPER_H__ + +#define HFI_DOMAIN_BASE_COMMON 0 + +#define HFI_DOMAIN_BASE_VDEC 0x1000000 +#define HFI_DOMAIN_BASE_VENC 0x2000000 +#define HFI_DOMAIN_BASE_VPE 0x3000000 + +#define HFI_VIDEO_ARCH_OX 0x1 + +#define HFI_ARCH_COMMON_OFFSET 0 +#define HFI_ARCH_OX_OFFSET 0x200000 + +#define HFI_OX_BASE 0x1000000 + +#define HFI_CMD_START_OFFSET 0x10000 +#define HFI_MSG_START_OFFSET 0x20000 + +#define HFI_ERR_NONE 0x0 +#define HFI_ERR_SYS_FATAL 0x1 +#define HFI_ERR_SYS_INVALID_PARAMETER 0x2 +#define HFI_ERR_SYS_VERSION_MISMATCH 0x3 +#define HFI_ERR_SYS_INSUFFICIENT_RESOURCES 0x4 +#define HFI_ERR_SYS_MAX_SESSIONS_REACHED 0x5 +#define HFI_ERR_SYS_UNSUPPORTED_CODEC 0x6 +#define HFI_ERR_SYS_SESSION_IN_USE 0x7 +#define HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE 0x8 +#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN 0x9 + +#define HFI_ERR_SESSION_FATAL 0x1001 +#define HFI_ERR_SESSION_INVALID_PARAMETER 0x1002 +#define HFI_ERR_SESSION_BAD_POINTER 0x1003 +#define HFI_ERR_SESSION_INVALID_SESSION_ID 0x1004 +#define HFI_ERR_SESSION_INVALID_STREAM_ID 0x1005 +#define HFI_ERR_SESSION_INCORRECT_STATE_OPERATION 0x1006 +#define HFI_ERR_SESSION_UNSUPPORTED_PROPERTY 0x1007 +#define HFI_ERR_SESSION_UNSUPPORTED_SETTING 0x1008 +#define HFI_ERR_SESSION_INSUFFICIENT_RESOURCES 0x1009 +#define HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED 0x100a +#define HFI_ERR_SESSION_STREAM_CORRUPT 0x100b +#define HFI_ERR_SESSION_ENC_OVERFLOW 0x100c +#define HFI_ERR_SESSION_UNSUPPORTED_STREAM 0x100d +#define HFI_ERR_SESSION_CMDSIZE 0x100e +#define HFI_ERR_SESSION_UNSUPPORT_CMD 0x100f +#define HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE 0x1010 +#define HFI_ERR_SESSION_BUFFERCOUNT_TOOSMALL 0x1011 +#define HFI_ERR_SESSION_INVALID_SCALE_FACTOR 0x1012 +#define HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED 0x1013 + +#define HFI_EVENT_SYS_ERROR 0x1 +#define HFI_EVENT_SESSION_ERROR 0x2 + +#define HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES 0x1000001 +#define HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES 0x1000002 +#define HFI_EVENT_SESSION_SEQUENCE_CHANGED 0x1000003 +#define HFI_EVENT_SESSION_PROPERTY_CHANGED 0x1000004 +#define HFI_EVENT_SESSION_LTRUSE_FAILED 0x1000005 +#define HFI_EVENT_RELEASE_BUFFER_REFERENCE 0x1000006 + +#define HFI_BUFFERFLAG_EOS 0x00000001 +#define HFI_BUFFERFLAG_STARTTIME 0x00000002 +#define HFI_BUFFERFLAG_DECODEONLY 0x00000004 +#define HFI_BUFFERFLAG_DATACORRUPT 0x00000008 +#define HFI_BUFFERFLAG_ENDOFFRAME 0x00000010 +#define HFI_BUFFERFLAG_SYNCFRAME 0x00000020 +#define HFI_BUFFERFLAG_EXTRADATA 0x00000040 +#define HFI_BUFFERFLAG_CODECCONFIG 0x00000080 +#define HFI_BUFFERFLAG_TIMESTAMPINVALID 0x00000100 +#define HFI_BUFFERFLAG_READONLY 0x00000200 +#define HFI_BUFFERFLAG_ENDOFSUBFRAME 0x00000400 +#define HFI_BUFFERFLAG_EOSEQ 0x00200000 +#define HFI_BUFFERFLAG_MBAFF 0x08000000 +#define HFI_BUFFERFLAG_VPE_YUV_601_709_CSC_CLAMP 0x10000000 +#define HFI_BUFFERFLAG_DROP_FRAME 0x20000000 +#define HFI_BUFFERFLAG_TEI 0x40000000 +#define HFI_BUFFERFLAG_DISCONTINUITY 0x80000000 + +#define HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING 0x1001001 +#define HFI_ERR_SESSION_SAME_STATE_OPERATION 0x1001002 +#define HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED 0x1001003 +#define HFI_ERR_SESSION_START_CODE_NOT_FOUND 0x1001004 + +#define HFI_FLUSH_INPUT 0x1000001 +#define HFI_FLUSH_OUTPUT 0x1000002 +#define HFI_FLUSH_OUTPUT2 0x1000003 +#define HFI_FLUSH_ALL 0x1000004 + +#define HFI_EXTRADATA_NONE 0x00000000 +#define HFI_EXTRADATA_MB_QUANTIZATION 0x00000001 +#define HFI_EXTRADATA_INTERLACE_VIDEO 0x00000002 +#define HFI_EXTRADATA_VC1_FRAMEDISP 0x00000003 +#define HFI_EXTRADATA_VC1_SEQDISP 0x00000004 +#define HFI_EXTRADATA_TIMESTAMP 0x00000005 +#define HFI_EXTRADATA_S3D_FRAME_PACKING 0x00000006 +#define HFI_EXTRADATA_FRAME_RATE 0x00000007 +#define HFI_EXTRADATA_PANSCAN_WINDOW 0x00000008 +#define HFI_EXTRADATA_RECOVERY_POINT_SEI 0x00000009 +#define HFI_EXTRADATA_MPEG2_SEQDISP 0x0000000d +#define HFI_EXTRADATA_STREAM_USERDATA 0x0000000e +#define HFI_EXTRADATA_FRAME_QP 0x0000000f +#define HFI_EXTRADATA_FRAME_BITS_INFO 0x00000010 +#define HFI_EXTRADATA_MULTISLICE_INFO 0x7f100000 +#define HFI_EXTRADATA_NUM_CONCEALED_MB 0x7f100001 +#define HFI_EXTRADATA_INDEX 0x7f100002 +#define HFI_EXTRADATA_METADATA_LTR 0x7f100004 +#define HFI_EXTRADATA_METADATA_FILLER 0x7fe00002 + +#define HFI_INDEX_EXTRADATA_INPUT_CROP 0x0700000e +#define HFI_INDEX_EXTRADATA_OUTPUT_CROP 0x0700000f +#define HFI_INDEX_EXTRADATA_DIGITAL_ZOOM 0x07000010 +#define HFI_INDEX_EXTRADATA_ASPECT_RATIO 0x7f100003 + +#define HFI_INTERLACE_FRAME_PROGRESSIVE 0x01 +#define HFI_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST 0x02 +#define HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST 0x04 +#define HFI_INTERLACE_FRAME_TOPFIELDFIRST 0x08 +#define HFI_INTERLACE_FRAME_BOTTOMFIELDFIRST 0x10 + +/* + * HFI_PROPERTY_PARAM_OX_START + * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000 + */ +#define HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL 0x201001 +#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO 0x201002 +#define HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED 0x201003 +#define HFI_PROPERTY_PARAM_CHROMA_SITE 0x201004 +#define HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG 0x201005 +#define HFI_PROPERTY_PARAM_INDEX_EXTRADATA 0x201006 +#define HFI_PROPERTY_PARAM_DIVX_FORMAT 0x201007 +#define HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE 0x201008 +#define HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA 0x201009 +#define HFI_PROPERTY_PARAM_ERR_DETECTION_CODE_EXTRADATA 0x20100a +#define HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED 0x20100b +#define HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL 0x20100c +#define HFI_PROPERTY_PARAM_BUFFER_DISPLAY_HOLD_COUNT_ACTUAL 0x20100d + +/* + * HFI_PROPERTY_CONFIG_OX_START + * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x2000 + */ +#define HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS 0x202001 +#define HFI_PROPERTY_CONFIG_REALTIME 0x202002 +#define HFI_PROPERTY_CONFIG_PRIORITY 0x202003 +#define HFI_PROPERTY_CONFIG_BATCH_INFO 0x202004 + +/* + * HFI_PROPERTY_PARAM_VDEC_OX_START \ + * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x3000 + */ +#define HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER 0x1203001 +#define HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT 0x1203002 +#define HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT 0x1203003 +#define HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE 0x1203004 +#define HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER 0x1203005 +#define HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION 0x1203006 +#define HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB 0x1203007 +#define HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING 0x1203008 +#define HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO 0x1203009 +#define HFI_PROPERTY_PARAM_VDEC_FRAME_RATE_EXTRADATA 0x120300a +#define HFI_PROPERTY_PARAM_VDEC_PANSCAN_WNDW_EXTRADATA 0x120300b +#define HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA 0x120300c +#define HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE 0x120300d +#define HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY 0x120300e +#define HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS 0x120300e +#define HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA 0x1203011 +#define HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA 0x1203012 +#define HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA 0x1203013 +#define HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA 0x1203014 +#define HFI_PROPERTY_PARAM_VDEC_AVC_SESSION_SELECT 0x1203015 +#define HFI_PROPERTY_PARAM_VDEC_MPEG2_SEQDISP_EXTRADATA 0x1203016 +#define HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA 0x1203017 +#define HFI_PROPERTY_PARAM_VDEC_FRAME_QP_EXTRADATA 0x1203018 +#define HFI_PROPERTY_PARAM_VDEC_FRAME_BITS_INFO_EXTRADATA 0x1203019 +#define HFI_PROPERTY_PARAM_VDEC_SCS_THRESHOLD 0x120301a + +/* + * HFI_PROPERTY_CONFIG_VDEC_OX_START + * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x0000 + */ +#define HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER 0x1200001 +#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING 0x1200002 +#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP 0x1200003 + +#define HFI_PROPERTY_CONFIG_VDEC_ENTROPY 0x1204004 + +/* + * HFI_PROPERTY_PARAM_VENC_OX_START + * HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x5000 + */ +#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO 0x2205001 +#define HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL 0x2205002 +#define HFI_PROPERTY_PARAM_VENC_LTR_INFO 0x2205003 +#define HFI_PROPERTY_PARAM_VENC_MBI_DUMPING 0x2205005 + +/* + * HFI_PROPERTY_CONFIG_VENC_OX_START + * HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000 + */ +#define HFI_PROPERTY_CONFIG_VENC_FRAME_QP 0x2206001 + +/* + * HFI_PROPERTY_PARAM_VPE_OX_START + * HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x7000 + */ +#define HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION 0x3207001 + +#define HFI_PROPERTY_CONFIG_VPE_OX_START \ + (HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x8000) + +#define HFI_CHROMA_SITE_0 0x1000001 +#define HFI_CHROMA_SITE_1 0x1000002 +#define HFI_CHROMA_SITE_2 0x1000003 +#define HFI_CHROMA_SITE_3 0x1000004 +#define HFI_CHROMA_SITE_4 0x1000005 +#define HFI_CHROMA_SITE_5 0x1000006 + +#define HFI_PRIORITY_LOW 10 +#define HFI_PRIOIRTY_MEDIUM 20 +#define HFI_PRIORITY_HIGH 30 + +#define HFI_OUTPUT_ORDER_DISPLAY 0x1000001 +#define HFI_OUTPUT_ORDER_DECODE 0x1000002 + +#define HFI_RATE_CONTROL_OFF 0x1000001 +#define HFI_RATE_CONTROL_VBR_VFR 0x1000002 +#define HFI_RATE_CONTROL_VBR_CFR 0x1000003 +#define HFI_RATE_CONTROL_CBR_VFR 0x1000004 +#define HFI_RATE_CONTROL_CBR_CFR 0x1000005 +#define HFI_RATE_CONTROL_CQ 0x1000008 + +#define HFI_VIDEO_CODEC_H264 0x00000002 +#define HFI_VIDEO_CODEC_H263 0x00000004 +#define HFI_VIDEO_CODEC_MPEG1 0x00000008 +#define HFI_VIDEO_CODEC_MPEG2 0x00000010 +#define HFI_VIDEO_CODEC_MPEG4 0x00000020 +#define HFI_VIDEO_CODEC_DIVX_311 0x00000040 +#define HFI_VIDEO_CODEC_DIVX 0x00000080 +#define HFI_VIDEO_CODEC_VC1 0x00000100 +#define HFI_VIDEO_CODEC_SPARK 0x00000200 +#define HFI_VIDEO_CODEC_VP8 0x00001000 +#define HFI_VIDEO_CODEC_HEVC 0x00002000 +#define HFI_VIDEO_CODEC_VP9 0x00004000 +#define HFI_VIDEO_CODEC_HEVC_HYBRID 0x80000000 + +#define HFI_H264_PROFILE_BASELINE 0x00000001 +#define HFI_H264_PROFILE_MAIN 0x00000002 +#define HFI_H264_PROFILE_HIGH 0x00000004 +#define HFI_H264_PROFILE_STEREO_HIGH 0x00000008 +#define HFI_H264_PROFILE_MULTIVIEW_HIGH 0x00000010 +#define HFI_H264_PROFILE_CONSTRAINED_BASE 0x00000020 +#define HFI_H264_PROFILE_CONSTRAINED_HIGH 0x00000040 + +#define HFI_H264_LEVEL_1 0x00000001 +#define HFI_H264_LEVEL_1b 0x00000002 +#define HFI_H264_LEVEL_11 0x00000004 +#define HFI_H264_LEVEL_12 0x00000008 +#define HFI_H264_LEVEL_13 0x00000010 +#define HFI_H264_LEVEL_2 0x00000020 +#define HFI_H264_LEVEL_21 0x00000040 +#define HFI_H264_LEVEL_22 0x00000080 +#define HFI_H264_LEVEL_3 0x00000100 +#define HFI_H264_LEVEL_31 0x00000200 +#define HFI_H264_LEVEL_32 0x00000400 +#define HFI_H264_LEVEL_4 0x00000800 +#define HFI_H264_LEVEL_41 0x00001000 +#define HFI_H264_LEVEL_42 0x00002000 +#define HFI_H264_LEVEL_5 0x00004000 +#define HFI_H264_LEVEL_51 0x00008000 +#define HFI_H264_LEVEL_52 0x00010000 + +#define HFI_H263_PROFILE_BASELINE 0x00000001 + +#define HFI_H263_LEVEL_10 0x00000001 +#define HFI_H263_LEVEL_20 0x00000002 +#define HFI_H263_LEVEL_30 0x00000004 +#define HFI_H263_LEVEL_40 0x00000008 +#define HFI_H263_LEVEL_45 0x00000010 +#define HFI_H263_LEVEL_50 0x00000020 +#define HFI_H263_LEVEL_60 0x00000040 +#define HFI_H263_LEVEL_70 0x00000080 + +#define HFI_MPEG2_PROFILE_SIMPLE 0x00000001 +#define HFI_MPEG2_PROFILE_MAIN 0x00000002 +#define HFI_MPEG2_PROFILE_422 0x00000004 +#define HFI_MPEG2_PROFILE_SNR 0x00000008 +#define HFI_MPEG2_PROFILE_SPATIAL 0x00000010 +#define HFI_MPEG2_PROFILE_HIGH 0x00000020 + +#define HFI_MPEG2_LEVEL_LL 0x00000001 +#define HFI_MPEG2_LEVEL_ML 0x00000002 +#define HFI_MPEG2_LEVEL_H14 0x00000004 +#define HFI_MPEG2_LEVEL_HL 0x00000008 + +#define HFI_MPEG4_PROFILE_SIMPLE 0x00000001 +#define HFI_MPEG4_PROFILE_ADVANCEDSIMPLE 0x00000002 + +#define HFI_MPEG4_LEVEL_0 0x00000001 +#define HFI_MPEG4_LEVEL_0b 0x00000002 +#define HFI_MPEG4_LEVEL_1 0x00000004 +#define HFI_MPEG4_LEVEL_2 0x00000008 +#define HFI_MPEG4_LEVEL_3 0x00000010 +#define HFI_MPEG4_LEVEL_4 0x00000020 +#define HFI_MPEG4_LEVEL_4a 0x00000040 +#define HFI_MPEG4_LEVEL_5 0x00000080 +#define HFI_MPEG4_LEVEL_6 0x00000100 +#define HFI_MPEG4_LEVEL_7 0x00000200 +#define HFI_MPEG4_LEVEL_8 0x00000400 +#define HFI_MPEG4_LEVEL_9 0x00000800 +#define HFI_MPEG4_LEVEL_3b 0x00001000 + +#define HFI_VC1_PROFILE_SIMPLE 0x00000001 +#define HFI_VC1_PROFILE_MAIN 0x00000002 +#define HFI_VC1_PROFILE_ADVANCED 0x00000004 + +#define HFI_VC1_LEVEL_LOW 0x00000001 +#define HFI_VC1_LEVEL_MEDIUM 0x00000002 +#define HFI_VC1_LEVEL_HIGH 0x00000004 +#define HFI_VC1_LEVEL_0 0x00000008 +#define HFI_VC1_LEVEL_1 0x00000010 +#define HFI_VC1_LEVEL_2 0x00000020 +#define HFI_VC1_LEVEL_3 0x00000040 +#define HFI_VC1_LEVEL_4 0x00000080 + +#define HFI_VPX_PROFILE_SIMPLE 0x00000001 +#define HFI_VPX_PROFILE_ADVANCED 0x00000002 +#define HFI_VPX_PROFILE_VERSION_0 0x00000004 +#define HFI_VPX_PROFILE_VERSION_1 0x00000008 +#define HFI_VPX_PROFILE_VERSION_2 0x00000010 +#define HFI_VPX_PROFILE_VERSION_3 0x00000020 + +#define HFI_DIVX_FORMAT_4 0x1 +#define HFI_DIVX_FORMAT_5 0x2 +#define HFI_DIVX_FORMAT_6 0x3 + +#define HFI_DIVX_PROFILE_QMOBILE 0x00000001 +#define HFI_DIVX_PROFILE_MOBILE 0x00000002 +#define HFI_DIVX_PROFILE_MT 0x00000004 +#define HFI_DIVX_PROFILE_HT 0x00000008 +#define HFI_DIVX_PROFILE_HD 0x00000010 + +#define HFI_HEVC_PROFILE_MAIN 0x00000001 +#define HFI_HEVC_PROFILE_MAIN10 0x00000002 +#define HFI_HEVC_PROFILE_MAIN_STILL_PIC 0x00000004 + +#define HFI_HEVC_LEVEL_1 0x00000001 +#define HFI_HEVC_LEVEL_2 0x00000002 +#define HFI_HEVC_LEVEL_21 0x00000004 +#define HFI_HEVC_LEVEL_3 0x00000008 +#define HFI_HEVC_LEVEL_31 0x00000010 +#define HFI_HEVC_LEVEL_4 0x00000020 +#define HFI_HEVC_LEVEL_41 0x00000040 +#define HFI_HEVC_LEVEL_5 0x00000080 +#define HFI_HEVC_LEVEL_51 0x00000100 +#define HFI_HEVC_LEVEL_52 0x00000200 +#define HFI_HEVC_LEVEL_6 0x00000400 +#define HFI_HEVC_LEVEL_61 0x00000800 +#define HFI_HEVC_LEVEL_62 0x00001000 + +#define HFI_HEVC_TIER_MAIN 0x1 +#define HFI_HEVC_TIER_HIGH0 0x2 + +#define HFI_VPX_PROFILE_MAIN 0x00000001 + +#define HFI_VPX_LEVEL_VERSION_0 0x00000001 +#define HFI_VPX_LEVEL_VERSION_1 0x00000002 +#define HFI_VPX_LEVEL_VERSION_2 0x00000004 +#define HFI_VPX_LEVEL_VERSION_3 0x00000008 + +/* VP9 Profile 0, 8-bit */ +#define HFI_VP9_PROFILE_P0 0x00000001 +/* VP9 Profile 2, 10-bit */ +#define HFI_VP9_PROFILE_P2_10B 0x00000004 + +#define HFI_VP9_LEVEL_1 0x00000001 +#define HFI_VP9_LEVEL_11 0x00000002 +#define HFI_VP9_LEVEL_2 0x00000004 +#define HFI_VP9_LEVEL_21 0x00000008 +#define HFI_VP9_LEVEL_3 0x00000010 +#define HFI_VP9_LEVEL_31 0x00000020 +#define HFI_VP9_LEVEL_4 0x00000040 +#define HFI_VP9_LEVEL_41 0x00000080 +#define HFI_VP9_LEVEL_5 0x00000100 +#define HFI_VP9_LEVEL_51 0x00000200 +#define HFI_VP9_LEVEL_6 0x00000400 +#define HFI_VP9_LEVEL_61 0x00000800 + +#define HFI_BUFFER_INPUT 0x1 +#define HFI_BUFFER_OUTPUT 0x2 +#define HFI_BUFFER_OUTPUT2 0x3 +#define HFI_BUFFER_INTERNAL_PERSIST 0x4 +#define HFI_BUFFER_INTERNAL_PERSIST_1 0x5 +#define HFI_BUFFER_INTERNAL_SCRATCH(ver) \ + (((ver) == HFI_VERSION_4XX || \ + (ver) == HFI_VERSION_6XX) ? 0x6 : 0x1000001) +#define HFI_BUFFER_INTERNAL_SCRATCH_1(ver) \ + (((ver) == HFI_VERSION_4XX || \ + (ver) == HFI_VERSION_6XX) ? 0x7 : 0x1000005) +#define HFI_BUFFER_INTERNAL_SCRATCH_2(ver) \ + (((ver) == HFI_VERSION_4XX || \ + (ver) == HFI_VERSION_6XX) ? 0x8 : 0x1000006) +#define HFI_BUFFER_EXTRADATA_INPUT(ver) \ + (((ver) == HFI_VERSION_4XX) ? 0xc : 0x1000002) +#define HFI_BUFFER_EXTRADATA_OUTPUT(ver) \ + (((ver) == HFI_VERSION_4XX) ? 0xa : 0x1000003) +#define HFI_BUFFER_EXTRADATA_OUTPUT2(ver) \ + (((ver) == HFI_VERSION_4XX) ? 0xb : 0x1000004) +#define HFI_BUFFER_TYPE_MAX 11 + +#define HFI_BUFFER_MODE_STATIC 0x1000001 +#define HFI_BUFFER_MODE_RING 0x1000002 +#define HFI_BUFFER_MODE_DYNAMIC 0x1000003 + +/* + * HFI_PROPERTY_SYS_COMMON_START + * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000 + */ +#define HFI_PROPERTY_SYS_DEBUG_CONFIG 0x1 +#define HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO 0x2 +#define HFI_PROPERTY_SYS_CONFIG_VCODEC_CLKFREQ 0x3 +#define HFI_PROPERTY_SYS_IDLE_INDICATOR 0x4 +#define HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL 0x5 +#define HFI_PROPERTY_SYS_IMAGE_VERSION 0x6 +#define HFI_PROPERTY_SYS_CONFIG_COVERAGE 0x7 +#define HFI_PROPERTY_SYS_UBWC_CONFIG 0x8 + +/* + * HFI_PROPERTY_PARAM_COMMON_START + * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000 + */ +#define HFI_PROPERTY_PARAM_FRAME_SIZE 0x1001 +#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO 0x1002 +#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT 0x1003 +#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED 0x1004 +#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT 0x1005 +#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED 0x1006 +#define HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED 0x1007 +#define HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED 0x1008 +#define HFI_PROPERTY_PARAM_CODEC_SUPPORTED 0x1009 +#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED 0x100a +#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT 0x100b +#define HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT 0x100c +#define HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE 0x100d +#define HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED 0x100e +#define HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT 0x100f +#define HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED 0x1010 +#define HFI_PROPERTY_PARAM_WORK_MODE 0x1015 +#define HFI_PROPERTY_PARAM_WORK_ROUTE 0x1017 + +/* + * HFI_PROPERTY_CONFIG_COMMON_START + * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000 + */ +#define HFI_PROPERTY_CONFIG_FRAME_RATE 0x2001 +#define HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE 0x2002 + +/* + * HFI_PROPERTY_PARAM_VDEC_COMMON_START + * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x3000 + */ +#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM 0x1003001 +#define HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR 0x1003002 +#define HFI_PROPERTY_PARAM_VDEC_NONCP_OUTPUT2 0x1003003 +#define HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH 0x1003007 +#define HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT 0x1003009 +#define HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE 0x100300a +#define HFI_PROPERTY_PARAM_VDEC_ENABLE_SUFFICIENT_SEQCHANGE_EVENT \ + 0x100300b + +/* + * HFI_PROPERTY_CONFIG_VDEC_COMMON_START + * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x4000 + */ + +/* + * HFI_PROPERTY_PARAM_VENC_COMMON_START + * HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x5000 + */ +#define HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE 0x2005001 +#define HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL 0x2005002 +#define HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL 0x2005003 +#define HFI_PROPERTY_PARAM_VENC_RATE_CONTROL 0x2005004 +#define HFI_PROPERTY_PARAM_VENC_H264_PICORDER_CNT_TYPE 0x2005005 +#define HFI_PROPERTY_PARAM_VENC_SESSION_QP 0x2005006 +#define HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION 0x2005007 +#define HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE 0x2005008 +/* + * Note: HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2 is + * specific to HFI_VERSION_6XX and HFI_VERSION_4XX only + */ +#define HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2 0x2005009 +#define HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION 0x2005009 +#define HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER 0x200500a +#define HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION 0x200500b +#define HFI_PROPERTY_PARAM_VENC_OPEN_GOP 0x200500c +#define HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH 0x200500d +#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL 0x200500e +#define HFI_PROPERTY_PARAM_VENC_VBV_HRD_BUF_SIZE 0x200500f +#define HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED 0x2005010 +#define HFI_PROPERTY_PARAM_VENC_ADVANCED 0x2005012 +#define HFI_PROPERTY_PARAM_VENC_H264_SPS_ID 0x2005014 +#define HFI_PROPERTY_PARAM_VENC_H264_PPS_ID 0x2005015 +#define HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL 0x2005016 +#define HFI_PROPERTY_PARAM_VENC_ASPECT_RATIO 0x2005017 +#define HFI_PROPERTY_PARAM_VENC_NUMREF 0x2005018 +#define HFI_PROPERTY_PARAM_VENC_MULTIREF_P 0x2005019 +#define HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT 0x200501b +#define HFI_PROPERTY_PARAM_VENC_LTRMODE 0x200501c +#define HFI_PROPERTY_PARAM_VENC_VIDEO_FULL_RANGE 0x200501d +#define HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO 0x200501e +#define HFI_PROPERTY_PARAM_VENC_VC1_PERF_CFG 0x200501f +#define HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES 0x2005020 +#define HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC 0x2005021 +#define HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY 0x2005023 +#define HFI_PROPERTY_PARAM_VENC_H264_TRANSFORM_8X8 0x2005025 +#define HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER 0x2005026 +#define HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP 0x2005027 +#define HFI_PROPERTY_PARAM_VENC_INITIAL_QP 0x2005028 +#define HFI_PROPERTY_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE 0x2005029 +#define HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER 0x200502c +#define HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE 0x200502f +#define HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI 0x2005036 + +/* + * HFI_PROPERTY_CONFIG_VENC_COMMON_START + * HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000 + */ +#define HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE 0x2006001 +#define HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD 0x2006002 +#define HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD 0x2006003 +#define HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME 0x2006004 +#define HFI_PROPERTY_CONFIG_VENC_SLICE_SIZE 0x2006005 +#define HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE 0x2006007 +#define HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER 0x2006008 +#define HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME 0x2006009 +#define HFI_PROPERTY_CONFIG_VENC_USELTRFRAME 0x200600a +#define HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER 0x200600b +#define HFI_PROPERTY_CONFIG_VENC_LTRPERIOD 0x200600c +#define HFI_PROPERTY_CONFIG_VENC_PERF_MODE 0x200600e +#define HFI_PROPERTY_CONFIG_HEIC_FRAME_QUALITY 0x2006014 + +/* + * HFI_PROPERTY_PARAM_VPE_COMMON_START + * HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000 + */ + +/* + * HFI_PROPERTY_CONFIG_VPE_COMMON_START + * HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000 + */ +#define HFI_PROPERTY_CONFIG_VPE_DEINTERLACE 0x3008001 +#define HFI_PROPERTY_CONFIG_VPE_OPERATIONS 0x3008002 + +enum hfi_version { + HFI_VERSION_1XX, + HFI_VERSION_3XX, + HFI_VERSION_4XX, + HFI_VERSION_6XX, +}; + +struct hfi_buffer_info { + u32 buffer_addr; + u32 extradata_addr; +}; + +struct hfi_bitrate { + u32 bitrate; + u32 layer_id; +}; + +struct hfi_h264_8x8_transform { + u32 enable_type; +}; + +#define HFI_CAPABILITY_FRAME_WIDTH 0x01 +#define HFI_CAPABILITY_FRAME_HEIGHT 0x02 +#define HFI_CAPABILITY_MBS_PER_FRAME 0x03 +#define HFI_CAPABILITY_MBS_PER_SECOND 0x04 +#define HFI_CAPABILITY_FRAMERATE 0x05 +#define HFI_CAPABILITY_SCALE_X 0x06 +#define HFI_CAPABILITY_SCALE_Y 0x07 +#define HFI_CAPABILITY_BITRATE 0x08 +#define HFI_CAPABILITY_BFRAME 0x09 +#define HFI_CAPABILITY_PEAKBITRATE 0x0a +#define HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS 0x10 +#define HFI_CAPABILITY_ENC_LTR_COUNT 0x11 +#define HFI_CAPABILITY_CP_OUTPUT2_THRESH 0x12 +#define HFI_CAPABILITY_HIER_B_NUM_ENH_LAYERS 0x13 +#define HFI_CAPABILITY_LCU_SIZE 0x14 +#define HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS 0x15 +#define HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE 0x16 +#define HFI_CAPABILITY_I_FRAME_QP 0x20 +#define HFI_CAPABILITY_P_FRAME_QP 0x21 +#define HFI_CAPABILITY_B_FRAME_QP 0x22 +#define HFI_CAPABILITY_RATE_CONTROL_MODES 0x23 +#define HFI_CAPABILITY_BLUR_WIDTH 0x24 +#define HFI_CAPABILITY_BLUR_HEIGHT 0x25 +#define HFI_CAPABILITY_SLICE_BYTE 0x27 +#define HFI_CAPABILITY_SLICE_MB 0x28 +#define HFI_CAPABILITY_MAX_VIDEOCORES 0x2b +#define HFI_CAPABILITY_MAX_WORKMODES 0x2c +#define HFI_CAPABILITY_ROTATION 0x2f +#define HFI_CAPABILITY_COLOR_SPACE_CONVERSION 0x30 + +struct hfi_capability { + u32 capability_type; + u32 min; + u32 max; + u32 step_size; +}; + +struct hfi_capabilities { + u32 num_capabilities; + struct hfi_capability data[]; +}; + +#define HFI_DEBUG_MSG_LOW 0x01 +#define HFI_DEBUG_MSG_MEDIUM 0x02 +#define HFI_DEBUG_MSG_HIGH 0x04 +#define HFI_DEBUG_MSG_ERROR 0x08 +#define HFI_DEBUG_MSG_FATAL 0x10 +#define HFI_DEBUG_MSG_PERF 0x20 + +#define HFI_DEBUG_MODE_QUEUE 0x01 +#define HFI_DEBUG_MODE_QDSS 0x02 + +struct hfi_debug_config { + u32 config; + u32 mode; +}; + +struct hfi_ubwc_config { + u32 size; + u32 packet_type; + struct { + u32 max_channel_override : 1; + u32 mal_length_override : 1; + u32 hb_override : 1; + u32 bank_swzl_level_override : 1; + u32 bank_spreading_override : 1; + u32 reserved : 27; + } override_bit_info; + u32 max_channels; + u32 mal_length; + u32 highest_bank_bit; + u32 bank_swzl_level; + u32 bank_spreading; + u32 reserved[2]; +}; + +struct hfi_enable { + u32 enable; +}; + +#define HFI_H264_DB_MODE_DISABLE 0x1 +#define HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY 0x2 +#define HFI_H264_DB_MODE_ALL_BOUNDARY 0x3 + +struct hfi_h264_db_control { + u32 mode; + s32 slice_alpha_offset; + s32 slice_beta_offset; +}; + +#define HFI_H264_ENTROPY_CAVLC 0x1 +#define HFI_H264_ENTROPY_CABAC 0x2 + +#define HFI_H264_CABAC_MODEL_0 0x1 +#define HFI_H264_CABAC_MODEL_1 0x2 +#define HFI_H264_CABAC_MODEL_2 0x3 + +struct hfi_h264_entropy_control { + u32 entropy_mode; + u32 cabac_model; +}; + +struct hfi_framerate { + u32 buffer_type; + u32 framerate; +}; + +#define HFI_INTRA_REFRESH_NONE 0x1 +#define HFI_INTRA_REFRESH_CYCLIC 0x2 +#define HFI_INTRA_REFRESH_ADAPTIVE 0x3 +#define HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE 0x4 +#define HFI_INTRA_REFRESH_RANDOM 0x5 + +struct hfi_intra_refresh { + u32 mode; + u32 air_mbs; + u32 air_ref; + u32 cir_mbs; +}; + +struct hfi_intra_refresh_3x { + u32 mode; + u32 mbs; +}; + +struct hfi_idr_period { + u32 idr_period; +}; + +struct hfi_operations_type { + u32 rotation; + u32 flip; +}; + +struct hfi_max_num_b_frames { + u32 max_num_b_frames; +}; + +struct hfi_vc1e_perf_cfg_type { + u32 search_range_x_subsampled[3]; + u32 search_range_y_subsampled[3]; +}; + +/* + * 0 - 7bit -> Luma (def: 16) + * 8 - 15bit -> Chroma (def: 128) + * format is valid up to v4 + */ +struct hfi_conceal_color { + u32 conceal_color; +}; + +struct hfi_conceal_color_v4 { + u32 conceal_color_8bit; + u32 conceal_color_10bit; +}; + +struct hfi_intra_period { + u32 pframes; + u32 bframes; +}; + +struct hfi_mpeg4_header_extension { + u32 header_extension; +}; + +struct hfi_mpeg4_time_resolution { + u32 time_increment_resolution; +}; + +struct hfi_multi_stream { + u32 buffer_type; + u32 enable; + u32 width; + u32 height; +}; + +struct hfi_multi_stream_3x { + u32 buffer_type; + u32 enable; +}; + +struct hfi_multi_view_format { + u32 views; + u32 view_order[1]; +}; + +#define HFI_MULTI_SLICE_OFF 0x1 +#define HFI_MULTI_SLICE_BY_MB_COUNT 0x2 +#define HFI_MULTI_SLICE_BY_BYTE_COUNT 0x3 +#define HFI_MULTI_SLICE_GOB 0x4 + +struct hfi_multi_slice_control { + u32 multi_slice; + u32 slice_size; +}; + +#define HFI_NAL_FORMAT_STARTCODES 0x01 +#define HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER 0x02 +#define HFI_NAL_FORMAT_ONE_BYTE_LENGTH 0x04 +#define HFI_NAL_FORMAT_TWO_BYTE_LENGTH 0x08 +#define HFI_NAL_FORMAT_FOUR_BYTE_LENGTH 0x10 + +struct hfi_nal_stream_format { + u32 format; +}; + +struct hfi_nal_stream_format_select { + u32 format; +}; + +#define HFI_PICTURE_TYPE_I 0x01 +#define HFI_PICTURE_TYPE_P 0x02 +#define HFI_PICTURE_TYPE_B 0x04 +#define HFI_PICTURE_TYPE_IDR 0x08 + +struct hfi_profile_level { + u32 profile; + u32 level; +}; + +#define HFI_MAX_PROFILE_COUNT 16 + +struct hfi_profile_level_supported { + u32 profile_count; + struct hfi_profile_level profile_level[]; +}; + +struct hfi_quality_vs_speed { + u32 quality_vs_speed; +}; + +struct hfi_heic_frame_quality { + u32 frame_quality; + u32 reserved[3]; +}; + +struct hfi_quantization { + u32 qp_i; + u32 qp_p; + u32 qp_b; + u32 layer_id; +}; + +struct hfi_initial_quantization { + u32 qp_i; + u32 qp_p; + u32 qp_b; + u32 init_qp_enable; +}; + +struct hfi_quantization_range { + u32 min_qp; + u32 max_qp; + u32 layer_id; +}; + +struct hfi_quantization_v2 { + u32 qp_packed; + u32 layer_id; + u32 enable; + u32 reserved[3]; +}; + +struct hfi_quantization_range_v2 { + struct hfi_quantization_v2 min_qp; + struct hfi_quantization_v2 max_qp; + u32 reserved[4]; +}; + +#define HFI_LTR_MODE_DISABLE 0x0 +#define HFI_LTR_MODE_MANUAL 0x1 +#define HFI_LTR_MODE_PERIODIC 0x2 + +struct hfi_ltr_mode { + u32 ltr_mode; + u32 ltr_count; + u32 trust_mode; +}; + +struct hfi_ltr_use { + u32 ref_ltr; + u32 use_constrnt; + u32 frames; +}; + +struct hfi_ltr_mark { + u32 mark_frame; +}; + +struct hfi_mastering_display_colour_sei_payload { + u32 display_primaries_x[3]; + u32 display_primaries_y[3]; + u32 white_point_x; + u32 white_point_y; + u32 max_display_mastering_luminance; + u32 min_display_mastering_luminance; +}; + +struct hfi_content_light_level_sei_payload { + u32 max_content_light; + u32 max_pic_average_light; +}; + +struct hfi_hdr10_pq_sei { + struct hfi_mastering_display_colour_sei_payload mastering; + struct hfi_content_light_level_sei_payload cll; +}; + +struct hfi_framesize { + u32 buffer_type; + u32 width; + u32 height; +}; + +#define HFI_VENC_PERFMODE_MAX_QUALITY 0x1 +#define HFI_VENC_PERFMODE_POWER_SAVE 0x2 + +struct hfi_perf_mode { + u32 video_perf_mode; +}; + +#define VIDC_CORE_ID_DEFAULT 0 +#define VIDC_CORE_ID_1 1 +#define VIDC_CORE_ID_2 2 +#define VIDC_CORE_ID_3 3 + +struct hfi_videocores_usage_type { + u32 video_core_enable_mask; +}; + +#define VIDC_WORK_MODE_1 1 +#define VIDC_WORK_MODE_2 2 + +struct hfi_video_work_mode { + u32 video_work_mode; +}; + +struct hfi_video_work_route { + u32 video_work_route; +}; + +struct hfi_h264_vui_timing_info { + u32 enable; + u32 fixed_framerate; + u32 time_scale; +}; + +#define VIDC_BITDEPTH_8 0x00000 +#define VIDC_BITDEPTH_10 0x20002 + +struct hfi_bit_depth { + u32 buffer_type; + u32 bit_depth; +}; + +struct hfi_picture_type { + u32 is_sync_frame; + u32 picture_type; +}; + +struct hfi_pic_struct { + u32 progressive_only; +}; + +struct hfi_colour_space { + u32 colour_space; +}; + +struct hfi_extradata_input_crop { + u32 size; + u32 version; + u32 port_index; + u32 left; + u32 top; + u32 width; + u32 height; +}; + +struct hfi_dpb_counts { + u32 max_dpb_count; + u32 max_ref_frames; + u32 max_dec_buffering; + u32 max_reorder_frames; + u32 fw_min_cnt; +}; + +#define HFI_COLOR_FORMAT_MONOCHROME 0x01 +#define HFI_COLOR_FORMAT_NV12 0x02 +#define HFI_COLOR_FORMAT_NV21 0x03 +#define HFI_COLOR_FORMAT_NV12_4x4TILE 0x04 +#define HFI_COLOR_FORMAT_NV21_4x4TILE 0x05 +#define HFI_COLOR_FORMAT_YUYV 0x06 +#define HFI_COLOR_FORMAT_YVYU 0x07 +#define HFI_COLOR_FORMAT_UYVY 0x08 +#define HFI_COLOR_FORMAT_VYUY 0x09 +#define HFI_COLOR_FORMAT_RGB565 0x0a +#define HFI_COLOR_FORMAT_BGR565 0x0b +#define HFI_COLOR_FORMAT_RGB888 0x0c +#define HFI_COLOR_FORMAT_BGR888 0x0d +#define HFI_COLOR_FORMAT_YUV444 0x0e +#define HFI_COLOR_FORMAT_RGBA8888 0x10 + +#define HFI_COLOR_FORMAT_UBWC_BASE 0x8000 +#define HFI_COLOR_FORMAT_10_BIT_BASE 0x4000 + +#define HFI_COLOR_FORMAT_YUV420_TP10 0x4002 +#define HFI_COLOR_FORMAT_P010 0x4003 +#define HFI_COLOR_FORMAT_NV12_UBWC 0x8002 +#define HFI_COLOR_FORMAT_YUV420_TP10_UBWC 0xc002 +#define HFI_COLOR_FORMAT_P010_UBWC 0xc003 +#define HFI_COLOR_FORMAT_RGBA8888_UBWC 0x8010 + +struct hfi_uncompressed_format_select { + u32 buffer_type; + u32 format; +}; + +struct hfi_uncompressed_plane_constraints { + u32 stride_multiples; + u32 max_stride; + u32 min_plane_buffer_height_multiple; + u32 buffer_alignment; +}; + +struct hfi_uncompressed_plane_info { + u32 format; + u32 num_planes; + struct hfi_uncompressed_plane_constraints plane_constraints[1]; +}; + +struct hfi_uncompressed_format_supported { + u32 buffer_type; + u32 format_entries; + struct hfi_uncompressed_plane_info plane_info[1]; +}; + +struct hfi_uncompressed_plane_actual { + int actual_stride; + u32 actual_plane_buffer_height; +}; + +struct hfi_uncompressed_plane_actual_info { + u32 buffer_type; + u32 num_planes; + struct hfi_uncompressed_plane_actual plane_format[2]; +}; + +struct hfi_uncompressed_plane_actual_constraints_info { + u32 buffer_type; + u32 num_planes; + struct hfi_uncompressed_plane_constraints plane_format[2]; +}; + +struct hfi_codec_supported { + u32 dec_codecs; + u32 enc_codecs; +}; + +struct hfi_properties_supported { + u32 num_properties; + u32 properties[1]; +}; + +struct hfi_max_sessions_supported { + u32 max_sessions; +}; + +#define HFI_MAX_MATRIX_COEFFS 9 +#define HFI_MAX_BIAS_COEFFS 3 +#define HFI_MAX_LIMIT_COEFFS 6 + +struct hfi_vpe_color_space_conversion { + u32 csc_matrix[HFI_MAX_MATRIX_COEFFS]; + u32 csc_bias[HFI_MAX_BIAS_COEFFS]; + u32 csc_limit[HFI_MAX_LIMIT_COEFFS]; +}; + +#define HFI_ROTATE_NONE 0x1 +#define HFI_ROTATE_90 0x2 +#define HFI_ROTATE_180 0x3 +#define HFI_ROTATE_270 0x4 + +#define HFI_FLIP_NONE 0x1 +#define HFI_FLIP_HORIZONTAL 0x2 +#define HFI_FLIP_VERTICAL 0x3 + +struct hfi_operations { + u32 rotate; + u32 flip; +}; + +#define HFI_RESOURCE_OCMEM 0x1 + +struct hfi_resource_ocmem { + u32 size; + u32 mem; +}; + +struct hfi_resource_ocmem_requirement { + u32 session_domain; + u32 width; + u32 height; + u32 size; +}; + +struct hfi_resource_ocmem_requirement_info { + u32 num_entries; + struct hfi_resource_ocmem_requirement requirements[1]; +}; + +struct hfi_property_sys_image_version_info_type { + u32 string_size; + u8 str_image_version[1]; +}; + +struct hfi_codec_mask_supported { + u32 codecs; + u32 video_domains; +}; + +struct hfi_seq_header_info { + u32 max_hader_len; +}; + +struct hfi_aspect_ratio { + u32 aspect_width; + u32 aspect_height; +}; + +#define HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM 0 +#define HFI_MVC_BUFFER_LAYOUT_SIDEBYSIDE 1 +#define HFI_MVC_BUFFER_LAYOUT_SEQ 2 + +struct hfi_mvc_buffer_layout_descp_type { + u32 layout_type; + u32 bright_view_first; + u32 ngap; +}; + +struct hfi_scs_threshold { + u32 threshold_value; +}; + +#define HFI_TEST_SSR_SW_ERR_FATAL 0x1 +#define HFI_TEST_SSR_SW_DIV_BY_ZERO 0x2 +#define HFI_TEST_SSR_HW_WDOG_IRQ 0x3 + +struct hfi_buffer_alloc_mode { + u32 type; + u32 mode; +}; + +struct hfi_index_extradata_config { + u32 enable; + u32 index_extra_data_id; +}; + +struct hfi_extradata_header { + u32 size; + u32 version; + u32 port_index; + u32 type; + u32 data_size; + u8 data[1]; +}; + +struct hfi_batch_info { + u32 input_batch_count; + u32 output_batch_count; +}; + +struct hfi_buffer_count_actual { + u32 type; + u32 count_actual; +}; + +struct hfi_buffer_count_actual_4xx { + u32 type; + u32 count_actual; + u32 count_min_host; +}; + +struct hfi_buffer_size_actual { + u32 type; + u32 size; +}; + +struct hfi_buffer_display_hold_count_actual { + u32 type; + u32 hold_count; +}; + +struct hfi_buffer_requirements { + u32 type; + u32 size; + u32 region_size; + u32 hold_count; + u32 count_min; + u32 count_actual; + u32 contiguous; + u32 alignment; +}; + +/* On HFI 4XX, some of the struct members have been swapped. */ +static inline u32 hfi_bufreq_get_hold_count(struct hfi_buffer_requirements *req, + u32 ver) +{ + if (ver == HFI_VERSION_4XX) + return 0; + + return req->hold_count; +}; + +static inline u32 hfi_bufreq_get_count_min(struct hfi_buffer_requirements *req, + u32 ver) +{ + if (ver == HFI_VERSION_4XX) + return req->hold_count; + + return req->count_min; +}; + +static inline u32 hfi_bufreq_get_count_min_host(struct hfi_buffer_requirements *req, + u32 ver) +{ + if (ver == HFI_VERSION_4XX) + return req->count_min; + + return 0; +}; + +static inline void hfi_bufreq_set_hold_count(struct hfi_buffer_requirements *req, + u32 ver, u32 val) +{ + if (ver == HFI_VERSION_4XX) + return; + + req->hold_count = val; +}; + +static inline void hfi_bufreq_set_count_min(struct hfi_buffer_requirements *req, + u32 ver, u32 val) +{ + if (ver == HFI_VERSION_4XX) + req->hold_count = val; + + req->count_min = val; +}; + +static inline void hfi_bufreq_set_count_min_host(struct hfi_buffer_requirements *req, + u32 ver, u32 val) +{ + if (ver == HFI_VERSION_4XX) + req->count_min = val; +}; + +struct hfi_data_payload { + u32 size; + u8 data[1]; +}; + +struct hfi_enable_picture { + u32 picture_type; +}; + +struct hfi_display_picture_buffer_count { + int enable; + u32 count; +}; + +struct hfi_extra_data_header_config { + u32 type; + u32 buffer_type; + u32 version; + u32 port_index; + u32 client_extra_data_id; +}; + +struct hfi_interlace_format_supported { + u32 buffer_type; + u32 format; +}; + +struct hfi_buffer_alloc_mode_supported { + u32 buffer_type; + u32 num_entries; + u32 data[1]; +}; + +struct hfi_mb_error_map { + u32 error_map_size; + u8 error_map[1]; +}; + +struct hfi_metadata_pass_through { + int enable; + u32 size; +}; + +struct hfi_multi_view_select { + u32 view_index; +}; + +struct hfi_hybrid_hierp { + u32 layers; +}; + +struct hfi_pkt_hdr { + u32 size; + u32 pkt_type; +}; + +struct hfi_session_hdr_pkt { + struct hfi_pkt_hdr hdr; + u32 session_id; +}; + +struct hfi_session_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +#endif diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c new file mode 100644 index 0000000000..0a041b4db9 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_msgs.c @@ -0,0 +1,820 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#include <linux/hash.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/soc/qcom/smem.h> +#include <media/videobuf2-v4l2.h> + +#include "core.h" +#include "hfi.h" +#include "hfi_helper.h" +#include "hfi_msgs.h" +#include "hfi_parser.h" + +#define SMEM_IMG_VER_TBL 469 +#define VER_STR_SZ 128 +#define SMEM_IMG_OFFSET_VENUS (14 * 128) + +static void event_seq_changed(struct venus_core *core, struct venus_inst *inst, + struct hfi_msg_event_notify_pkt *pkt) +{ + enum hfi_version ver = core->res->hfi_version; + struct hfi_event_data event = {0}; + int num_properties_changed; + struct hfi_framesize *frame_sz; + struct hfi_profile_level *profile_level; + struct hfi_bit_depth *pixel_depth; + struct hfi_pic_struct *pic_struct; + struct hfi_colour_space *colour_info; + struct hfi_buffer_requirements *bufreq; + struct hfi_extradata_input_crop *crop; + struct hfi_dpb_counts *dpb_count; + u8 *data_ptr; + u32 ptype; + + inst->error = HFI_ERR_NONE; + + switch (pkt->event_data1) { + case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES: + case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES: + break; + default: + inst->error = HFI_ERR_SESSION_INVALID_PARAMETER; + goto done; + } + + event.event_type = pkt->event_data1; + + num_properties_changed = pkt->event_data2; + if (!num_properties_changed) { + inst->error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES; + goto done; + } + + data_ptr = (u8 *)&pkt->ext_event_data[0]; + do { + ptype = *((u32 *)data_ptr); + switch (ptype) { + case HFI_PROPERTY_PARAM_FRAME_SIZE: + data_ptr += sizeof(u32); + frame_sz = (struct hfi_framesize *)data_ptr; + event.width = frame_sz->width; + event.height = frame_sz->height; + data_ptr += sizeof(*frame_sz); + break; + case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: + data_ptr += sizeof(u32); + profile_level = (struct hfi_profile_level *)data_ptr; + event.profile = profile_level->profile; + event.level = profile_level->level; + data_ptr += sizeof(*profile_level); + break; + case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH: + data_ptr += sizeof(u32); + pixel_depth = (struct hfi_bit_depth *)data_ptr; + event.bit_depth = pixel_depth->bit_depth; + data_ptr += sizeof(*pixel_depth); + break; + case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT: + data_ptr += sizeof(u32); + pic_struct = (struct hfi_pic_struct *)data_ptr; + event.pic_struct = pic_struct->progressive_only; + data_ptr += sizeof(*pic_struct); + break; + case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE: + data_ptr += sizeof(u32); + colour_info = (struct hfi_colour_space *)data_ptr; + event.colour_space = colour_info->colour_space; + data_ptr += sizeof(*colour_info); + break; + case HFI_PROPERTY_CONFIG_VDEC_ENTROPY: + data_ptr += sizeof(u32); + event.entropy_mode = *(u32 *)data_ptr; + data_ptr += sizeof(u32); + break; + case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS: + data_ptr += sizeof(u32); + bufreq = (struct hfi_buffer_requirements *)data_ptr; + event.buf_count = hfi_bufreq_get_count_min(bufreq, ver); + data_ptr += sizeof(*bufreq); + break; + case HFI_INDEX_EXTRADATA_INPUT_CROP: + data_ptr += sizeof(u32); + crop = (struct hfi_extradata_input_crop *)data_ptr; + event.input_crop.left = crop->left; + event.input_crop.top = crop->top; + event.input_crop.width = crop->width; + event.input_crop.height = crop->height; + data_ptr += sizeof(*crop); + break; + case HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS: + data_ptr += sizeof(u32); + dpb_count = (struct hfi_dpb_counts *)data_ptr; + event.buf_count = dpb_count->fw_min_cnt; + data_ptr += sizeof(*dpb_count); + break; + default: + break; + } + num_properties_changed--; + } while (num_properties_changed > 0); + +done: + inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event); +} + +static void event_release_buffer_ref(struct venus_core *core, + struct venus_inst *inst, + struct hfi_msg_event_notify_pkt *pkt) +{ + struct hfi_event_data event = {0}; + struct hfi_msg_event_release_buffer_ref_pkt *data; + + data = (struct hfi_msg_event_release_buffer_ref_pkt *) + pkt->ext_event_data; + + event.event_type = HFI_EVENT_RELEASE_BUFFER_REFERENCE; + event.packet_buffer = data->packet_buffer; + event.extradata_buffer = data->extradata_buffer; + event.tag = data->output_tag; + + inst->error = HFI_ERR_NONE; + inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event); +} + +static void event_sys_error(struct venus_core *core, u32 event, + struct hfi_msg_event_notify_pkt *pkt) +{ + if (pkt) + dev_dbg(core->dev, VDBGH + "sys error (session id:%x, data1:%x, data2:%x)\n", + pkt->shdr.session_id, pkt->event_data1, + pkt->event_data2); + + core->core_ops->event_notify(core, event); +} + +static void +event_session_error(struct venus_core *core, struct venus_inst *inst, + struct hfi_msg_event_notify_pkt *pkt) +{ + struct device *dev = core->dev; + + dev_dbg(dev, VDBGH "session error: event id:%x, session id:%x\n", + pkt->event_data1, pkt->shdr.session_id); + + if (!inst) + return; + + switch (pkt->event_data1) { + /* non fatal session errors */ + case HFI_ERR_SESSION_INVALID_SCALE_FACTOR: + case HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE: + case HFI_ERR_SESSION_UNSUPPORTED_SETTING: + case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED: + inst->error = HFI_ERR_NONE; + break; + default: + dev_err(dev, "session error: event id:%x (%x), session id:%x\n", + pkt->event_data1, pkt->event_data2, + pkt->shdr.session_id); + + inst->error = pkt->event_data1; + inst->ops->event_notify(inst, EVT_SESSION_ERROR, NULL); + break; + } +} + +static void hfi_event_notify(struct venus_core *core, struct venus_inst *inst, + void *packet) +{ + struct hfi_msg_event_notify_pkt *pkt = packet; + + if (!packet) + return; + + switch (pkt->event_id) { + case HFI_EVENT_SYS_ERROR: + event_sys_error(core, EVT_SYS_ERROR, pkt); + break; + case HFI_EVENT_SESSION_ERROR: + event_session_error(core, inst, pkt); + break; + case HFI_EVENT_SESSION_SEQUENCE_CHANGED: + event_seq_changed(core, inst, pkt); + break; + case HFI_EVENT_RELEASE_BUFFER_REFERENCE: + event_release_buffer_ref(core, inst, pkt); + break; + case HFI_EVENT_SESSION_PROPERTY_CHANGED: + break; + default: + break; + } +} + +static void hfi_sys_init_done(struct venus_core *core, struct venus_inst *inst, + void *packet) +{ + struct hfi_msg_sys_init_done_pkt *pkt = packet; + int rem_bytes; + u32 error; + + error = pkt->error_type; + if (error != HFI_ERR_NONE) + goto done; + + if (!pkt->num_properties) { + error = HFI_ERR_SYS_INVALID_PARAMETER; + goto done; + } + + rem_bytes = pkt->hdr.size - sizeof(*pkt); + if (rem_bytes <= 0) { + /* missing property data */ + error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + goto done; + } + + error = hfi_parser(core, inst, pkt->data, rem_bytes); + +done: + core->error = error; + complete(&core->done); +} + +static void +sys_get_prop_image_version(struct venus_core *core, + struct hfi_msg_sys_property_info_pkt *pkt) +{ + struct device *dev = core->dev; + u8 *smem_tbl_ptr; + u8 *img_ver; + int req_bytes; + size_t smem_blk_sz; + int ret; + + req_bytes = pkt->hdr.size - sizeof(*pkt); + + if (req_bytes < VER_STR_SZ || !pkt->data[0] || pkt->num_properties > 1) + /* bad packet */ + return; + + img_ver = pkt->data; + if (!img_ver) + return; + + ret = sscanf(img_ver, "14:video-firmware.%u.%u-%u", + &core->venus_ver.major, &core->venus_ver.minor, &core->venus_ver.rev); + if (ret) + goto done; + + ret = sscanf(img_ver, "14:VIDEO.VPU.%u.%u-%u", + &core->venus_ver.major, &core->venus_ver.minor, &core->venus_ver.rev); + if (ret) + goto done; + + ret = sscanf(img_ver, "14:VIDEO.VE.%u.%u-%u", + &core->venus_ver.major, &core->venus_ver.minor, &core->venus_ver.rev); + if (ret) + goto done; + + dev_err(dev, VDBGL "error reading F/W version\n"); + return; + +done: + dev_dbg(dev, VDBGL "F/W version: %s, major %u, minor %u, revision %u\n", + img_ver, core->venus_ver.major, core->venus_ver.minor, core->venus_ver.rev); + + smem_tbl_ptr = qcom_smem_get(QCOM_SMEM_HOST_ANY, + SMEM_IMG_VER_TBL, &smem_blk_sz); + if (!IS_ERR(smem_tbl_ptr) && smem_blk_sz >= SMEM_IMG_OFFSET_VENUS + VER_STR_SZ) + memcpy(smem_tbl_ptr + SMEM_IMG_OFFSET_VENUS, + img_ver, VER_STR_SZ); +} + +static void hfi_sys_property_info(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_sys_property_info_pkt *pkt = packet; + struct device *dev = core->dev; + + if (!pkt->num_properties) { + dev_dbg(dev, VDBGL "no properties\n"); + return; + } + + switch (pkt->property) { + case HFI_PROPERTY_SYS_IMAGE_VERSION: + sys_get_prop_image_version(core, pkt); + break; + default: + dev_dbg(dev, VDBGL "unknown property data\n"); + break; + } +} + +static void hfi_sys_rel_resource_done(struct venus_core *core, + struct venus_inst *inst, + void *packet) +{ + struct hfi_msg_sys_release_resource_done_pkt *pkt = packet; + + core->error = pkt->error_type; + complete(&core->done); +} + +static void hfi_sys_ping_done(struct venus_core *core, struct venus_inst *inst, + void *packet) +{ + struct hfi_msg_sys_ping_ack_pkt *pkt = packet; + + core->error = HFI_ERR_NONE; + + if (pkt->client_data != 0xbeef) + core->error = HFI_ERR_SYS_FATAL; + + complete(&core->done); +} + +static void hfi_sys_idle_done(struct venus_core *core, struct venus_inst *inst, + void *packet) +{ + dev_dbg(core->dev, VDBGL "sys idle\n"); +} + +static void hfi_sys_pc_prepare_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_sys_pc_prep_done_pkt *pkt = packet; + + dev_dbg(core->dev, VDBGL "pc prepare done (error %x)\n", + pkt->error_type); +} + +static unsigned int +session_get_prop_profile_level(struct hfi_msg_session_property_info_pkt *pkt, + struct hfi_profile_level *profile_level) +{ + struct hfi_profile_level *hfi; + u32 req_bytes; + + req_bytes = pkt->shdr.hdr.size - sizeof(*pkt); + + if (!req_bytes || req_bytes % sizeof(struct hfi_profile_level)) + /* bad packet */ + return HFI_ERR_SESSION_INVALID_PARAMETER; + + hfi = (struct hfi_profile_level *)&pkt->data[0]; + profile_level->profile = hfi->profile; + profile_level->level = hfi->level; + + return HFI_ERR_NONE; +} + +static unsigned int +session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt, + struct hfi_buffer_requirements *bufreq) +{ + struct hfi_buffer_requirements *buf_req; + u32 req_bytes; + unsigned int idx = 0; + + req_bytes = pkt->shdr.hdr.size - sizeof(*pkt); + + if (!req_bytes || req_bytes % sizeof(*buf_req) || !pkt->data[0]) + /* bad packet */ + return HFI_ERR_SESSION_INVALID_PARAMETER; + + buf_req = (struct hfi_buffer_requirements *)&pkt->data[0]; + if (!buf_req) + return HFI_ERR_SESSION_INVALID_PARAMETER; + + while (req_bytes) { + memcpy(&bufreq[idx], buf_req, sizeof(*bufreq)); + idx++; + + if (idx >= HFI_BUFFER_TYPE_MAX) + return HFI_ERR_SESSION_INVALID_PARAMETER; + + req_bytes -= sizeof(struct hfi_buffer_requirements); + buf_req++; + } + + return HFI_ERR_NONE; +} + +static void hfi_session_prop_info(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_property_info_pkt *pkt = packet; + struct device *dev = core->dev; + union hfi_get_property *hprop = &inst->hprop; + unsigned int error = HFI_ERR_NONE; + + if (!pkt->num_properties) { + error = HFI_ERR_SESSION_INVALID_PARAMETER; + dev_err(dev, "%s: no properties\n", __func__); + goto done; + } + + switch (pkt->property) { + case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS: + memset(hprop->bufreq, 0, sizeof(hprop->bufreq)); + error = session_get_prop_buf_req(pkt, hprop->bufreq); + break; + case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: + memset(&hprop->profile_level, 0, sizeof(hprop->profile_level)); + error = session_get_prop_profile_level(pkt, + &hprop->profile_level); + break; + case HFI_PROPERTY_CONFIG_VDEC_ENTROPY: + break; + default: + dev_dbg(dev, VDBGM "unknown property id:%x\n", pkt->property); + return; + } + +done: + inst->error = error; + complete(&inst->done); +} + +static void hfi_session_init_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_init_done_pkt *pkt = packet; + int rem_bytes; + u32 error; + + error = pkt->error_type; + if (error != HFI_ERR_NONE) + goto done; + + if (!IS_V1(core)) + goto done; + + rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt); + if (rem_bytes <= 0) { + error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES; + goto done; + } + + error = hfi_parser(core, inst, pkt->data, rem_bytes); +done: + inst->error = error; + complete(&inst->done); +} + +static void hfi_session_load_res_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_load_resources_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +static void hfi_session_flush_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_flush_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); + if (inst->ops->flush_done) + inst->ops->flush_done(inst); +} + +static void hfi_session_etb_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_empty_buffer_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + inst->ops->buf_done(inst, HFI_BUFFER_INPUT, pkt->input_tag, + pkt->filled_len, pkt->offset, 0, 0, 0); +} + +static void hfi_session_ftb_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + u32 session_type = inst->session_type; + u64 timestamp_us = 0; + u32 timestamp_hi = 0, timestamp_lo = 0; + unsigned int error; + u32 flags = 0, hfi_flags = 0, offset = 0, filled_len = 0; + u32 pic_type = 0, buffer_type = 0, output_tag = -1; + + if (session_type == VIDC_SESSION_TYPE_ENC) { + struct hfi_msg_session_fbd_compressed_pkt *pkt = packet; + + timestamp_hi = pkt->time_stamp_hi; + timestamp_lo = pkt->time_stamp_lo; + hfi_flags = pkt->flags; + offset = pkt->offset; + filled_len = pkt->filled_len; + pic_type = pkt->picture_type; + output_tag = pkt->output_tag; + buffer_type = HFI_BUFFER_OUTPUT; + + error = pkt->error_type; + } else if (session_type == VIDC_SESSION_TYPE_DEC) { + struct hfi_msg_session_fbd_uncompressed_plane0_pkt *pkt = + packet; + + timestamp_hi = pkt->time_stamp_hi; + timestamp_lo = pkt->time_stamp_lo; + hfi_flags = pkt->flags; + offset = pkt->offset; + filled_len = pkt->filled_len; + pic_type = pkt->picture_type; + output_tag = pkt->output_tag; + + if (pkt->stream_id == 0) + buffer_type = HFI_BUFFER_OUTPUT; + else if (pkt->stream_id == 1) + buffer_type = HFI_BUFFER_OUTPUT2; + + error = pkt->error_type; + } else { + error = HFI_ERR_SESSION_INVALID_PARAMETER; + } + + if (buffer_type != HFI_BUFFER_OUTPUT && + buffer_type != HFI_BUFFER_OUTPUT2) + goto done; + + if (hfi_flags & HFI_BUFFERFLAG_EOS) + flags |= V4L2_BUF_FLAG_LAST; + + switch (pic_type) { + case HFI_PICTURE_IDR: + case HFI_PICTURE_I: + flags |= V4L2_BUF_FLAG_KEYFRAME; + break; + case HFI_PICTURE_P: + flags |= V4L2_BUF_FLAG_PFRAME; + break; + case HFI_PICTURE_B: + flags |= V4L2_BUF_FLAG_BFRAME; + break; + case HFI_FRAME_NOTCODED: + case HFI_UNUSED_PICT: + case HFI_FRAME_YUV: + default: + break; + } + + if (!(hfi_flags & HFI_BUFFERFLAG_TIMESTAMPINVALID) && filled_len) { + timestamp_us = timestamp_hi; + timestamp_us = (timestamp_us << 32) | timestamp_lo; + } + +done: + inst->error = error; + inst->ops->buf_done(inst, buffer_type, output_tag, filled_len, + offset, flags, hfi_flags, timestamp_us); +} + +static void hfi_session_start_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_start_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +static void hfi_session_stop_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_stop_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +static void hfi_session_rel_res_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_release_resources_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +static void hfi_session_rel_buf_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_release_buffers_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +static void hfi_session_end_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_end_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +static void hfi_session_abort_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_sys_session_abort_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +static void hfi_session_get_seq_hdr_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_get_sequence_hdr_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +struct hfi_done_handler { + u32 pkt; + u32 pkt_sz; + u32 pkt_sz2; + void (*done)(struct venus_core *, struct venus_inst *, void *); + bool is_sys_pkt; +}; + +static const struct hfi_done_handler handlers[] = { + {.pkt = HFI_MSG_EVENT_NOTIFY, + .pkt_sz = sizeof(struct hfi_msg_event_notify_pkt), + .done = hfi_event_notify, + }, + {.pkt = HFI_MSG_SYS_INIT, + .pkt_sz = sizeof(struct hfi_msg_sys_init_done_pkt), + .done = hfi_sys_init_done, + .is_sys_pkt = true, + }, + {.pkt = HFI_MSG_SYS_PROPERTY_INFO, + .pkt_sz = sizeof(struct hfi_msg_sys_property_info_pkt), + .done = hfi_sys_property_info, + .is_sys_pkt = true, + }, + {.pkt = HFI_MSG_SYS_RELEASE_RESOURCE, + .pkt_sz = sizeof(struct hfi_msg_sys_release_resource_done_pkt), + .done = hfi_sys_rel_resource_done, + .is_sys_pkt = true, + }, + {.pkt = HFI_MSG_SYS_PING_ACK, + .pkt_sz = sizeof(struct hfi_msg_sys_ping_ack_pkt), + .done = hfi_sys_ping_done, + .is_sys_pkt = true, + }, + {.pkt = HFI_MSG_SYS_IDLE, + .pkt_sz = sizeof(struct hfi_msg_sys_idle_pkt), + .done = hfi_sys_idle_done, + .is_sys_pkt = true, + }, + {.pkt = HFI_MSG_SYS_PC_PREP, + .pkt_sz = sizeof(struct hfi_msg_sys_pc_prep_done_pkt), + .done = hfi_sys_pc_prepare_done, + .is_sys_pkt = true, + }, + {.pkt = HFI_MSG_SYS_SESSION_INIT, + .pkt_sz = sizeof(struct hfi_msg_session_init_done_pkt), + .done = hfi_session_init_done, + }, + {.pkt = HFI_MSG_SYS_SESSION_END, + .pkt_sz = sizeof(struct hfi_msg_session_end_done_pkt), + .done = hfi_session_end_done, + }, + {.pkt = HFI_MSG_SESSION_LOAD_RESOURCES, + .pkt_sz = sizeof(struct hfi_msg_session_load_resources_done_pkt), + .done = hfi_session_load_res_done, + }, + {.pkt = HFI_MSG_SESSION_START, + .pkt_sz = sizeof(struct hfi_msg_session_start_done_pkt), + .done = hfi_session_start_done, + }, + {.pkt = HFI_MSG_SESSION_STOP, + .pkt_sz = sizeof(struct hfi_msg_session_stop_done_pkt), + .done = hfi_session_stop_done, + }, + {.pkt = HFI_MSG_SYS_SESSION_ABORT, + .pkt_sz = sizeof(struct hfi_msg_sys_session_abort_done_pkt), + .done = hfi_session_abort_done, + }, + {.pkt = HFI_MSG_SESSION_EMPTY_BUFFER, + .pkt_sz = sizeof(struct hfi_msg_session_empty_buffer_done_pkt), + .done = hfi_session_etb_done, + }, + {.pkt = HFI_MSG_SESSION_FILL_BUFFER, + .pkt_sz = sizeof(struct hfi_msg_session_fbd_uncompressed_plane0_pkt), + .pkt_sz2 = sizeof(struct hfi_msg_session_fbd_compressed_pkt), + .done = hfi_session_ftb_done, + }, + {.pkt = HFI_MSG_SESSION_FLUSH, + .pkt_sz = sizeof(struct hfi_msg_session_flush_done_pkt), + .done = hfi_session_flush_done, + }, + {.pkt = HFI_MSG_SESSION_PROPERTY_INFO, + .pkt_sz = sizeof(struct hfi_msg_session_property_info_pkt), + .done = hfi_session_prop_info, + }, + {.pkt = HFI_MSG_SESSION_RELEASE_RESOURCES, + .pkt_sz = sizeof(struct hfi_msg_session_release_resources_done_pkt), + .done = hfi_session_rel_res_done, + }, + {.pkt = HFI_MSG_SESSION_GET_SEQUENCE_HEADER, + .pkt_sz = sizeof(struct hfi_msg_session_get_sequence_hdr_done_pkt), + .done = hfi_session_get_seq_hdr_done, + }, + {.pkt = HFI_MSG_SESSION_RELEASE_BUFFERS, + .pkt_sz = sizeof(struct hfi_msg_session_release_buffers_done_pkt), + .done = hfi_session_rel_buf_done, + }, +}; + +void hfi_process_watchdog_timeout(struct venus_core *core) +{ + event_sys_error(core, EVT_SYS_WATCHDOG_TIMEOUT, NULL); +} + +static struct venus_inst *to_instance(struct venus_core *core, u32 session_id) +{ + struct venus_inst *inst; + + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) + if (hash32_ptr(inst) == session_id) { + mutex_unlock(&core->lock); + return inst; + } + mutex_unlock(&core->lock); + + return NULL; +} + +u32 hfi_process_msg_packet(struct venus_core *core, struct hfi_pkt_hdr *hdr) +{ + const struct hfi_done_handler *handler; + struct device *dev = core->dev; + struct venus_inst *inst; + bool found = false; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(handlers); i++) { + handler = &handlers[i]; + if (handler->pkt != hdr->pkt_type) + continue; + found = true; + break; + } + + if (!found) + return hdr->pkt_type; + + if (hdr->size && hdr->size < handler->pkt_sz && + hdr->size < handler->pkt_sz2) { + dev_err(dev, "bad packet size (%d should be %d, pkt type:%x)\n", + hdr->size, handler->pkt_sz, hdr->pkt_type); + + return hdr->pkt_type; + } + + if (handler->is_sys_pkt) { + inst = NULL; + } else { + struct hfi_session_pkt *pkt; + + pkt = (struct hfi_session_pkt *)hdr; + inst = to_instance(core, pkt->shdr.session_id); + + if (!inst) + dev_warn(dev, "no valid instance(pkt session_id:%x, pkt:%x)\n", + pkt->shdr.session_id, + handler ? handler->pkt : 0); + + /* + * Event of type HFI_EVENT_SYS_ERROR will not have any session + * associated with it + */ + if (!inst && hdr->pkt_type != HFI_MSG_EVENT_NOTIFY) { + dev_err(dev, "got invalid session id:%x\n", + pkt->shdr.session_id); + goto invalid_session; + } + } + + handler->done(core, inst, hdr); + +invalid_session: + return hdr->pkt_type; +} diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.h b/drivers/media/platform/qcom/venus/hfi_msgs.h new file mode 100644 index 0000000000..8c2e17b0d3 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_msgs.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#ifndef __VENUS_HFI_MSGS_H__ +#define __VENUS_HFI_MSGS_H__ + +/* message calls */ +#define HFI_MSG_SYS_INIT 0x20001 +#define HFI_MSG_SYS_PC_PREP 0x20002 +#define HFI_MSG_SYS_RELEASE_RESOURCE 0x20003 +#define HFI_MSG_SYS_DEBUG 0x20004 +#define HFI_MSG_SYS_SESSION_INIT 0x20006 +#define HFI_MSG_SYS_SESSION_END 0x20007 +#define HFI_MSG_SYS_IDLE 0x20008 +#define HFI_MSG_SYS_COV 0x20009 +#define HFI_MSG_SYS_PROPERTY_INFO 0x2000a + +#define HFI_MSG_EVENT_NOTIFY 0x21001 +#define HFI_MSG_SESSION_GET_SEQUENCE_HEADER 0x21002 + +#define HFI_MSG_SYS_PING_ACK 0x220002 +#define HFI_MSG_SYS_SESSION_ABORT 0x220004 + +#define HFI_MSG_SESSION_LOAD_RESOURCES 0x221001 +#define HFI_MSG_SESSION_START 0x221002 +#define HFI_MSG_SESSION_STOP 0x221003 +#define HFI_MSG_SESSION_SUSPEND 0x221004 +#define HFI_MSG_SESSION_RESUME 0x221005 +#define HFI_MSG_SESSION_FLUSH 0x221006 +#define HFI_MSG_SESSION_EMPTY_BUFFER 0x221007 +#define HFI_MSG_SESSION_FILL_BUFFER 0x221008 +#define HFI_MSG_SESSION_PROPERTY_INFO 0x221009 +#define HFI_MSG_SESSION_RELEASE_RESOURCES 0x22100a +#define HFI_MSG_SESSION_PARSE_SEQUENCE_HEADER 0x22100b +#define HFI_MSG_SESSION_RELEASE_BUFFERS 0x22100c + +#define HFI_PICTURE_I 0x00000001 +#define HFI_PICTURE_P 0x00000002 +#define HFI_PICTURE_B 0x00000004 +#define HFI_PICTURE_IDR 0x00000008 +#define HFI_FRAME_NOTCODED 0x7f002000 +#define HFI_FRAME_YUV 0x7f004000 +#define HFI_UNUSED_PICT 0x10000000 + +/* message packets */ +struct hfi_msg_event_notify_pkt { + struct hfi_session_hdr_pkt shdr; + u32 event_id; + u32 event_data1; + u32 event_data2; + u32 ext_event_data[]; +}; + +struct hfi_msg_event_release_buffer_ref_pkt { + u32 packet_buffer; + u32 extradata_buffer; + u32 output_tag; +}; + +struct hfi_msg_sys_init_done_pkt { + struct hfi_pkt_hdr hdr; + u32 error_type; + u32 num_properties; + u32 data[]; +}; + +struct hfi_msg_sys_pc_prep_done_pkt { + struct hfi_pkt_hdr hdr; + u32 error_type; +}; + +struct hfi_msg_sys_release_resource_done_pkt { + struct hfi_pkt_hdr hdr; + u32 resource_handle; + u32 error_type; +}; + +struct hfi_msg_session_init_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; + u32 num_properties; + u32 data[]; +}; + +struct hfi_msg_session_end_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_session_get_sequence_hdr_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; + u32 header_len; + u32 sequence_header; +}; + +struct hfi_msg_sys_session_abort_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_sys_idle_pkt { + struct hfi_pkt_hdr hdr; +}; + +struct hfi_msg_sys_ping_ack_pkt { + struct hfi_pkt_hdr hdr; + u32 client_data; +}; + +struct hfi_msg_sys_property_info_pkt { + struct hfi_pkt_hdr hdr; + u32 num_properties; + u32 property; + u8 data[]; +}; + +struct hfi_msg_session_load_resources_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_session_start_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_session_stop_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_session_suspend_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_session_resume_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_session_flush_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; + u32 flush_type; +}; + +struct hfi_msg_session_empty_buffer_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; + u32 offset; + u32 filled_len; + u32 input_tag; + u32 packet_buffer; + u32 extradata_buffer; + u32 data[]; +}; + +struct hfi_msg_session_fbd_compressed_pkt { + struct hfi_session_hdr_pkt shdr; + u32 time_stamp_hi; + u32 time_stamp_lo; + u32 error_type; + u32 flags; + u32 mark_target; + u32 mark_data; + u32 stats; + u32 offset; + u32 alloc_len; + u32 filled_len; + u32 input_tag; + u32 output_tag; + u32 picture_type; + u32 packet_buffer; + u32 extradata_buffer; + u32 data[]; +}; + +struct hfi_msg_session_fbd_uncompressed_plane0_pkt { + struct hfi_session_hdr_pkt shdr; + u32 stream_id; + u32 view_id; + u32 error_type; + u32 time_stamp_hi; + u32 time_stamp_lo; + u32 flags; + u32 mark_target; + u32 mark_data; + u32 stats; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 frame_width; + u32 frame_height; + u32 start_x_coord; + u32 start_y_coord; + u32 input_tag; + u32 input_tag2; + u32 output_tag; + u32 picture_type; + u32 packet_buffer; + u32 extradata_buffer; + u32 data[]; +}; + +struct hfi_msg_session_fbd_uncompressed_plane1_pkt { + u32 flags; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 packet_buffer2; + u32 data[]; +}; + +struct hfi_msg_session_fbd_uncompressed_plane2_pkt { + u32 flags; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 packet_buffer3; + u32 data[]; +}; + +struct hfi_msg_session_parse_sequence_header_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; + u32 num_properties; + u32 data[]; +}; + +struct hfi_msg_session_property_info_pkt { + struct hfi_session_hdr_pkt shdr; + u32 num_properties; + u32 property; + u8 data[]; +}; + +struct hfi_msg_session_release_resources_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_session_release_buffers_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; + u32 num_buffers; + u32 buffer_info[]; +}; + +struct hfi_msg_sys_debug_pkt { + struct hfi_pkt_hdr hdr; + u32 msg_type; + u32 msg_size; + u32 time_stamp_hi; + u32 time_stamp_lo; + u8 msg_data[]; +}; + +struct hfi_msg_sys_coverage_pkt { + struct hfi_pkt_hdr hdr; + u32 msg_size; + u32 time_stamp_hi; + u32 time_stamp_lo; + u8 msg_data[]; +}; + +struct venus_core; +struct hfi_pkt_hdr; + +void hfi_process_watchdog_timeout(struct venus_core *core); +u32 hfi_process_msg_packet(struct venus_core *core, struct hfi_pkt_hdr *hdr); + +#endif diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c new file mode 100644 index 0000000000..c43839539d --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_parser.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Linaro Ltd. + * + * Author: Stanimir Varbanov <stanimir.varbanov@linaro.org> + */ +#include <linux/bitops.h> +#include <linux/kernel.h> + +#include "core.h" +#include "hfi_helper.h" +#include "hfi_parser.h" + +typedef void (*func)(struct hfi_plat_caps *cap, const void *data, + unsigned int size); + +static void init_codecs(struct venus_core *core) +{ + struct hfi_plat_caps *caps = core->caps, *cap; + unsigned long bit; + + if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM) + return; + + for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) { + cap = &caps[core->codecs_count++]; + cap->codec = BIT(bit); + cap->domain = VIDC_SESSION_TYPE_DEC; + cap->valid = false; + } + + for_each_set_bit(bit, &core->enc_codecs, MAX_CODEC_NUM) { + cap = &caps[core->codecs_count++]; + cap->codec = BIT(bit); + cap->domain = VIDC_SESSION_TYPE_ENC; + cap->valid = false; + } +} + +static void for_each_codec(struct hfi_plat_caps *caps, unsigned int caps_num, + u32 codecs, u32 domain, func cb, void *data, + unsigned int size) +{ + struct hfi_plat_caps *cap; + unsigned int i; + + for (i = 0; i < caps_num; i++) { + cap = &caps[i]; + if (cap->valid && cap->domain == domain) + continue; + if (cap->codec & codecs && cap->domain == domain) + cb(cap, data, size); + } +} + +static void +fill_buf_mode(struct hfi_plat_caps *cap, const void *data, unsigned int num) +{ + const u32 *type = data; + + if (*type == HFI_BUFFER_MODE_DYNAMIC) + cap->cap_bufs_mode_dynamic = true; +} + +static void +parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data) +{ + struct hfi_buffer_alloc_mode_supported *mode = data; + u32 num_entries = mode->num_entries; + u32 *type; + + if (num_entries > MAX_ALLOC_MODE_ENTRIES) + return; + + type = mode->data; + + while (num_entries--) { + if (mode->buffer_type == HFI_BUFFER_OUTPUT || + mode->buffer_type == HFI_BUFFER_OUTPUT2) + for_each_codec(core->caps, ARRAY_SIZE(core->caps), + codecs, domain, fill_buf_mode, type, 1); + + type++; + } +} + +static void fill_profile_level(struct hfi_plat_caps *cap, const void *data, + unsigned int num) +{ + const struct hfi_profile_level *pl = data; + + if (cap->num_pl + num >= HFI_MAX_PROFILE_COUNT) + return; + + memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl)); + cap->num_pl += num; +} + +static void +parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data) +{ + struct hfi_profile_level_supported *pl = data; + struct hfi_profile_level *proflevel = pl->profile_level; + struct hfi_profile_level pl_arr[HFI_MAX_PROFILE_COUNT] = {}; + + if (pl->profile_count > HFI_MAX_PROFILE_COUNT) + return; + + memcpy(pl_arr, proflevel, pl->profile_count * sizeof(*proflevel)); + + for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain, + fill_profile_level, pl_arr, pl->profile_count); +} + +static void +fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num) +{ + const struct hfi_capability *caps = data; + + if (cap->num_caps + num >= MAX_CAP_ENTRIES) + return; + + memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps)); + cap->num_caps += num; +} + +static void +parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data) +{ + struct hfi_capabilities *caps = data; + struct hfi_capability *cap = caps->data; + u32 num_caps = caps->num_capabilities; + struct hfi_capability caps_arr[MAX_CAP_ENTRIES] = {}; + + if (num_caps > MAX_CAP_ENTRIES) + return; + + memcpy(caps_arr, cap, num_caps * sizeof(*cap)); + + for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain, + fill_caps, caps_arr, num_caps); +} + +static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts, + unsigned int num_fmts) +{ + const struct raw_formats *formats = fmts; + + if (cap->num_fmts + num_fmts >= MAX_FMT_ENTRIES) + return; + + memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats)); + cap->num_fmts += num_fmts; +} + +static void +parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data) +{ + struct hfi_uncompressed_format_supported *fmt = data; + struct hfi_uncompressed_plane_info *pinfo = fmt->plane_info; + struct hfi_uncompressed_plane_constraints *constr; + struct raw_formats rawfmts[MAX_FMT_ENTRIES] = {}; + u32 entries = fmt->format_entries; + unsigned int i = 0; + u32 num_planes; + + while (entries) { + num_planes = pinfo->num_planes; + + rawfmts[i].fmt = pinfo->format; + rawfmts[i].buftype = fmt->buffer_type; + i++; + + if (i >= MAX_FMT_ENTRIES) + return; + + if (pinfo->num_planes > MAX_PLANES) + break; + + pinfo = (void *)pinfo + sizeof(*constr) * num_planes + + 2 * sizeof(u32); + entries--; + } + + for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain, + fill_raw_fmts, rawfmts, i); +} + +static void parse_codecs(struct venus_core *core, void *data) +{ + struct hfi_codec_supported *codecs = data; + + core->dec_codecs = codecs->dec_codecs; + core->enc_codecs = codecs->enc_codecs; + + if (IS_V1(core)) { + core->dec_codecs &= ~HFI_VIDEO_CODEC_HEVC; + core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK; + core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC; + } +} + +static void parse_max_sessions(struct venus_core *core, const void *data) +{ + const struct hfi_max_sessions_supported *sessions = data; + + core->max_sessions_supported = sessions->max_sessions; +} + +static void parse_codecs_mask(u32 *codecs, u32 *domain, void *data) +{ + struct hfi_codec_mask_supported *mask = data; + + *codecs = mask->codecs; + *domain = mask->video_domains; +} + +static void parser_init(struct venus_inst *inst, u32 *codecs, u32 *domain) +{ + if (!inst || !IS_V1(inst->core)) + return; + + *codecs = inst->hfi_codec; + *domain = inst->session_type; +} + +static void parser_fini(struct venus_inst *inst, u32 codecs, u32 domain) +{ + struct hfi_plat_caps *caps, *cap; + unsigned int i; + u32 dom; + + if (!inst || !IS_V1(inst->core)) + return; + + caps = inst->core->caps; + dom = inst->session_type; + + for (i = 0; i < MAX_CODEC_NUM; i++) { + cap = &caps[i]; + if (cap->codec & codecs && cap->domain == dom) + cap->valid = true; + } +} + +static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst) +{ + const struct hfi_platform *plat; + const struct hfi_plat_caps *caps = NULL; + u32 enc_codecs, dec_codecs, count = 0; + unsigned int entries; + int ret; + + plat = hfi_platform_get(core->res->hfi_version); + if (!plat) + return -EINVAL; + + if (inst) + return 0; + + ret = hfi_platform_get_codecs(core, &enc_codecs, &dec_codecs, &count); + if (ret) + return ret; + + if (plat->capabilities) + caps = plat->capabilities(&entries); + + if (!caps || !entries || !count) + return -EINVAL; + + core->enc_codecs = enc_codecs; + core->dec_codecs = dec_codecs; + core->codecs_count = count; + core->max_sessions_supported = MAX_SESSIONS; + memset(core->caps, 0, sizeof(*caps) * MAX_CODEC_NUM); + memcpy(core->caps, caps, sizeof(*caps) * entries); + + return 0; +} + +u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf, + u32 size) +{ + unsigned int words_count = size >> 2; + u32 *word = buf, *data, codecs = 0, domain = 0; + int ret; + + ret = hfi_platform_parser(core, inst); + if (!ret) + return HFI_ERR_NONE; + + if (size % 4) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + parser_init(inst, &codecs, &domain); + + if (core->res->hfi_version > HFI_VERSION_1XX) { + core->codecs_count = 0; + memset(core->caps, 0, sizeof(core->caps)); + } + + while (words_count) { + data = word + 1; + + switch (*word) { + case HFI_PROPERTY_PARAM_CODEC_SUPPORTED: + parse_codecs(core, data); + init_codecs(core); + break; + case HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED: + parse_max_sessions(core, data); + break; + case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED: + parse_codecs_mask(&codecs, &domain, data); + break; + case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED: + parse_raw_formats(core, codecs, domain, data); + break; + case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED: + parse_caps(core, codecs, domain, data); + break; + case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED: + parse_profile_level(core, codecs, domain, data); + break; + case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED: + parse_alloc_mode(core, codecs, domain, data); + break; + default: + break; + } + + word++; + words_count--; + } + + if (!core->max_sessions_supported) + core->max_sessions_supported = MAX_SESSIONS; + + parser_fini(inst, codecs, domain); + + return HFI_ERR_NONE; +} diff --git a/drivers/media/platform/qcom/venus/hfi_parser.h b/drivers/media/platform/qcom/venus/hfi_parser.h new file mode 100644 index 0000000000..5751d01407 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_parser.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Linaro Ltd. */ +#ifndef __VENUS_HFI_PARSER_H__ +#define __VENUS_HFI_PARSER_H__ + +#include "core.h" + +u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, + void *buf, u32 size); + +#define WHICH_CAP_MIN 0 +#define WHICH_CAP_MAX 1 +#define WHICH_CAP_STEP 2 + +static inline u32 get_cap(struct venus_inst *inst, u32 type, u32 which) +{ + struct venus_core *core = inst->core; + struct hfi_capability *cap = NULL; + struct hfi_plat_caps *caps; + unsigned int i; + + caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type); + if (!caps) + return 0; + + for (i = 0; i < caps->num_caps; i++) { + if (caps->caps[i].capability_type == type) { + cap = &caps->caps[i]; + break; + } + } + + if (!cap) + return 0; + + switch (which) { + case WHICH_CAP_MIN: + return cap->min; + case WHICH_CAP_MAX: + return cap->max; + case WHICH_CAP_STEP: + return cap->step_size; + default: + break; + } + + return 0; +} + +static inline u32 cap_min(struct venus_inst *inst, u32 type) +{ + return get_cap(inst, type, WHICH_CAP_MIN); +} + +static inline u32 cap_max(struct venus_inst *inst, u32 type) +{ + return get_cap(inst, type, WHICH_CAP_MAX); +} + +static inline u32 cap_step(struct venus_inst *inst, u32 type) +{ + return get_cap(inst, type, WHICH_CAP_STEP); +} + +static inline u32 frame_width_min(struct venus_inst *inst) +{ + return cap_min(inst, HFI_CAPABILITY_FRAME_WIDTH); +} + +static inline u32 frame_width_max(struct venus_inst *inst) +{ + return cap_max(inst, HFI_CAPABILITY_FRAME_WIDTH); +} + +static inline u32 frame_width_step(struct venus_inst *inst) +{ + return cap_step(inst, HFI_CAPABILITY_FRAME_WIDTH); +} + +static inline u32 frame_height_min(struct venus_inst *inst) +{ + return cap_min(inst, HFI_CAPABILITY_FRAME_HEIGHT); +} + +static inline u32 frame_height_max(struct venus_inst *inst) +{ + return cap_max(inst, HFI_CAPABILITY_FRAME_HEIGHT); +} + +static inline u32 frame_height_step(struct venus_inst *inst) +{ + return cap_step(inst, HFI_CAPABILITY_FRAME_HEIGHT); +} + +static inline u32 frate_min(struct venus_inst *inst) +{ + return cap_min(inst, HFI_CAPABILITY_FRAMERATE); +} + +static inline u32 frate_max(struct venus_inst *inst) +{ + return cap_max(inst, HFI_CAPABILITY_FRAMERATE); +} + +static inline u32 frate_step(struct venus_inst *inst) +{ + return cap_step(inst, HFI_CAPABILITY_FRAMERATE); +} + +static inline u32 core_num_max(struct venus_inst *inst) +{ + return cap_max(inst, HFI_CAPABILITY_MAX_VIDEOCORES); +} + +static inline u32 mbs_per_frame_max(struct venus_inst *inst) +{ + return cap_max(inst, HFI_CAPABILITY_MBS_PER_FRAME); +} + +#endif diff --git a/drivers/media/platform/qcom/venus/hfi_plat_bufs.h b/drivers/media/platform/qcom/venus/hfi_plat_bufs.h new file mode 100644 index 0000000000..25e6074520 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_plat_bufs.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __HFI_PLATFORM_BUFFERS_H__ +#define __HFI_PLATFORM_BUFFERS_H__ + +#include <linux/types.h> +#include "hfi_helper.h" + +struct hfi_plat_buffers_params { + u32 width; + u32 height; + u32 out_width; + u32 out_height; + u32 codec; + u32 hfi_color_fmt; + u32 hfi_dpb_color_fmt; + enum hfi_version version; + u32 num_vpp_pipes; + union { + struct { + u32 max_mbs_per_frame; + u32 buffer_size_limit; + bool is_secondary_output; + bool is_interlaced; + } dec; + struct { + u32 work_mode; + u32 rc_type; + u32 num_b_frames; + bool is_tenbit; + } enc; + }; +}; + +int hfi_plat_bufreq_v6(struct hfi_plat_buffers_params *params, u32 session_type, + u32 buftype, struct hfi_buffer_requirements *bufreq); + +#endif diff --git a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c new file mode 100644 index 0000000000..f5a655973c --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c @@ -0,0 +1,1334 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ +#include <linux/kernel.h> +#include <linux/sizes.h> +#include <linux/videodev2.h> + +#include "hfi.h" +#include "hfi_plat_bufs.h" +#include "helpers.h" + +#define MIN_INPUT_BUFFERS 4 +#define MIN_ENC_OUTPUT_BUFFERS 4 + +#define NV12_UBWC_Y_TILE_WIDTH 32 +#define NV12_UBWC_Y_TILE_HEIGHT 8 +#define NV12_UBWC_UV_TILE_WIDTH 16 +#define NV12_UBWC_UV_TILE_HEIGHT 8 +#define TP10_UBWC_Y_TILE_WIDTH 48 +#define TP10_UBWC_Y_TILE_HEIGHT 4 +#define METADATA_STRIDE_MULTIPLE 64 +#define METADATA_HEIGHT_MULTIPLE 16 +#define HFI_DMA_ALIGNMENT 256 + +#define MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE 64 +#define MAX_FE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE 64 +#define MAX_FE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE 64 +#define MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE 640 +#define MAX_FE_NBR_DATA_CB_LINE_BUFFER_SIZE 320 +#define MAX_FE_NBR_DATA_CR_LINE_BUFFER_SIZE 320 + +#define MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE (128 / 8) +#define MAX_SE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE (128 / 8) +#define MAX_SE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE (128 / 8) + +#define MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE (64 * 2 * 3) +#define MAX_PE_NBR_DATA_LCU32_LINE_BUFFER_SIZE (32 * 2 * 3) +#define MAX_PE_NBR_DATA_LCU16_LINE_BUFFER_SIZE (16 * 2 * 3) + +#define MAX_TILE_COLUMNS 32 /* 8K/256 */ + +#define VPP_CMD_MAX_SIZE BIT(20) +#define NUM_HW_PIC_BUF 32 +#define BIN_BUFFER_THRESHOLD (1280 * 736) +#define H264D_MAX_SLICE 1800 +/* sizeof(h264d_buftab_t) aligned to 256 */ +#define SIZE_H264D_BUFTAB_T 256 +/* sizeof(h264d_hw_pic_t) aligned to 32 */ +#define SIZE_H264D_HW_PIC_T BIT(11) +#define SIZE_H264D_BSE_CMD_PER_BUF (32 * 4) +#define SIZE_H264D_VPP_CMD_PER_BUF 512 + +/* Line Buffer definitions, One for Luma and 1/2 for each Chroma */ +#define SIZE_H264D_LB_FE_TOP_DATA(width, height) \ + (MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE * ALIGN((width), 16) * 3) + +#define SIZE_H264D_LB_FE_TOP_CTRL(width, height) \ + (MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * (((width) + 15) >> 4)) + +#define SIZE_H264D_LB_FE_LEFT_CTRL(width, height) \ + (MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * (((height) + 15) >> 4)) + +#define SIZE_H264D_LB_SE_TOP_CTRL(width, height) \ + (MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * (((width) + 15) >> 4)) + +#define SIZE_H264D_LB_SE_LEFT_CTRL(width, height) \ + (MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * (((height) + 15) >> 4)) + +#define SIZE_H264D_LB_PE_TOP_DATA(width, height) \ + (MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE * (((width) + 15) >> 4)) + +#define SIZE_H264D_LB_VSP_TOP(width, height) (((((width) + 15) >> 4) << 7)) + +#define SIZE_H264D_LB_RECON_DMA_METADATA_WR(width, height) \ + (ALIGN((height), 16) * 32) + +#define SIZE_H264D_QP(width, height) \ + ((((width) + 63) >> 6) * (((height) + 63) >> 6) * 128) + +#define SIZE_HW_PIC(size_per_buf) (NUM_HW_PIC_BUF * (size_per_buf)) + +#define H264_CABAC_HDR_RATIO_HD_TOT 1 +#define H264_CABAC_RES_RATIO_HD_TOT 3 + +/* + * Some content need more bin buffer, but limit buffer + * size for high resolution + */ +#define NUM_SLIST_BUF_H264 (256 + 32) +#define SIZE_SLIST_BUF_H264 512 +#define LCU_MAX_SIZE_PELS 64 +#define LCU_MIN_SIZE_PELS 16 +#define SIZE_SEI_USERDATA 4096 + +#define H265D_MAX_SLICE 3600 +#define SIZE_H265D_HW_PIC_T SIZE_H264D_HW_PIC_T +#define SIZE_H265D_BSE_CMD_PER_BUF (16 * sizeof(u32)) +#define SIZE_H265D_VPP_CMD_PER_BUF 256 + +#define SIZE_H265D_LB_FE_TOP_DATA(width, height) \ + (MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE * (ALIGN(width, 64) + 8) * 2) + +#define SIZE_H265D_LB_FE_TOP_CTRL(width, height) \ + (MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * \ + (ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS)) + +#define SIZE_H265D_LB_FE_LEFT_CTRL(width, height) \ + (MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * \ + (ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS)) + +#define SIZE_H265D_LB_SE_TOP_CTRL(width, height) \ + ((LCU_MAX_SIZE_PELS / 8 * (128 / 8)) * (((width) + 15) >> 4)) + +static inline u32 size_h265d_lb_se_left_ctrl(u32 width, u32 height) +{ + u32 x, y, z; + + x = ((height + 16 - 1) / 8) * MAX_SE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE; + y = ((height + 32 - 1) / 8) * MAX_SE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE; + z = ((height + 64 - 1) / 8) * MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE; + + return max3(x, y, z); +} + +#define SIZE_H265D_LB_PE_TOP_DATA(width, height) \ + (MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE * \ + (ALIGN(width, LCU_MIN_SIZE_PELS) / LCU_MIN_SIZE_PELS)) + +#define SIZE_H265D_LB_VSP_TOP(width, height) ((((width) + 63) >> 6) * 128) + +#define SIZE_H265D_LB_VSP_LEFT(width, height) ((((height) + 63) >> 6) * 128) + +#define SIZE_H265D_LB_RECON_DMA_METADATA_WR(width, height) \ + SIZE_H264D_LB_RECON_DMA_METADATA_WR(width, height) + +#define SIZE_H265D_QP(width, height) SIZE_H264D_QP(width, height) + +#define H265_CABAC_HDR_RATIO_HD_TOT 2 +#define H265_CABAC_RES_RATIO_HD_TOT 2 + +/* + * Some content need more bin buffer, but limit buffer size + * for high resolution + */ +#define SIZE_SLIST_BUF_H265 BIT(10) +#define NUM_SLIST_BUF_H265 (80 + 20) +#define H265_NUM_TILE_COL 32 +#define H265_NUM_TILE_ROW 128 +#define H265_NUM_TILE (H265_NUM_TILE_ROW * H265_NUM_TILE_COL + 1) + +static inline u32 size_vpxd_lb_fe_left_ctrl(u32 width, u32 height) +{ + u32 x, y, z; + + x = ((height + 15) >> 4) * MAX_FE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE; + y = ((height + 31) >> 5) * MAX_FE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE; + z = ((height + 63) >> 6) * MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE; + + return max3(x, y, z); +} + +#define SIZE_VPXD_LB_FE_TOP_CTRL(width, height) \ + (((ALIGN(width, 64) + 8) * 10 * 2)) /* small line */ +#define SIZE_VPXD_LB_SE_TOP_CTRL(width, height) \ + ((((width) + 15) >> 4) * MAX_FE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE) + +static inline u32 size_vpxd_lb_se_left_ctrl(u32 width, u32 height) +{ + u32 x, y, z; + + x = ((height + 15) >> 4) * MAX_SE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE; + y = ((height + 31) >> 5) * MAX_SE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE; + z = ((height + 63) >> 6) * MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE; + + return max3(x, y, z); +} + +#define SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height) \ + ALIGN((ALIGN(height, 16) / (4 / 2)) * 64, 32) +#define SIZE_VP8D_LB_FE_TOP_DATA(width, height) \ + ((ALIGN(width, 16) + 8) * 10 * 2) +#define SIZE_VP9D_LB_FE_TOP_DATA(width, height) \ + ((ALIGN(ALIGN(width, 16), 64) + 8) * 10 * 2) +#define SIZE_VP8D_LB_PE_TOP_DATA(width, height) \ + ((ALIGN(width, 16) >> 4) * 64) +#define SIZE_VP9D_LB_PE_TOP_DATA(width, height) \ + ((ALIGN(ALIGN(width, 16), 64) >> 6) * 176) +#define SIZE_VP8D_LB_VSP_TOP(width, height) \ + (((ALIGN(width, 16) >> 4) * 64 / 2) + 256) +#define SIZE_VP9D_LB_VSP_TOP(width, height) \ + (((ALIGN(ALIGN(width, 16), 64) >> 6) * 64 * 8) + 256) + +#define HFI_IRIS2_VP9D_COMV_SIZE \ + ((((8192 + 63) >> 6) * ((4320 + 63) >> 6) * 8 * 8 * 2 * 8)) + +#define VPX_DECODER_FRAME_CONCURENCY_LVL 2 +#define VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_NUM 1 +#define VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_DEN 2 +#define VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_NUM 3 +#define VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_DEN 2 + +#define VP8_NUM_FRAME_INFO_BUF (5 + 1) +#define VP9_NUM_FRAME_INFO_BUF 32 +#define VP8_NUM_PROBABILITY_TABLE_BUF VP8_NUM_FRAME_INFO_BUF +#define VP9_NUM_PROBABILITY_TABLE_BUF (VP9_NUM_FRAME_INFO_BUF + 4) +#define VP8_PROB_TABLE_SIZE 3840 +#define VP9_PROB_TABLE_SIZE 3840 + +#define VP9_UDC_HEADER_BUF_SIZE (3 * 128) +#define MAX_SUPERFRAME_HEADER_LEN 34 +#define CCE_TILE_OFFSET_SIZE ALIGN(32 * 4 * 4, 32) + +#define QMATRIX_SIZE (sizeof(u32) * 128 + 256) +#define MP2D_QPDUMP_SIZE 115200 +#define HFI_IRIS2_ENC_PERSIST_SIZE 204800 +#define HFI_MAX_COL_FRAME 6 +#define HFI_VENUS_VENC_TRE_WB_BUFF_SIZE (65 << 4) /* in Bytes */ +#define HFI_VENUS_VENC_DB_LINE_BUFF_PER_MB 512 +#define HFI_VENUS_VPPSG_MAX_REGISTERS 2048 +#define HFI_VENUS_WIDTH_ALIGNMENT 128 +#define HFI_VENUS_WIDTH_TEN_BIT_ALIGNMENT 192 +#define HFI_VENUS_HEIGHT_ALIGNMENT 32 + +#define SYSTEM_LAL_TILE10 192 +#define NUM_MBS_720P (((1280 + 15) >> 4) * ((720 + 15) >> 4)) +#define NUM_MBS_4K (((4096 + 15) >> 4) * ((2304 + 15) >> 4)) +#define MB_SIZE_IN_PIXEL (16 * 16) +#define HDR10PLUS_PAYLOAD_SIZE 1024 +#define HDR10_HIST_EXTRADATA_SIZE 4096 + +static u32 size_vpss_lb(u32 width, u32 height, u32 num_vpp_pipes) +{ + u32 vpss_4tap_top_buffer_size, vpss_div2_top_buffer_size; + u32 vpss_4tap_left_buffer_size, vpss_div2_left_buffer_size; + u32 opb_wr_top_line_luma_buf_size, opb_wr_top_line_chroma_buf_size; + u32 opb_lb_wr_llb_y_buffer_size, opb_lb_wr_llb_uv_buffer_size; + u32 macrotiling_size; + u32 size = 0; + + vpss_4tap_top_buffer_size = 0; + vpss_div2_top_buffer_size = 0; + vpss_4tap_left_buffer_size = 0; + vpss_div2_left_buffer_size = 0; + + macrotiling_size = 32; + opb_wr_top_line_luma_buf_size = + ALIGN(width, macrotiling_size) / macrotiling_size * 256; + opb_wr_top_line_luma_buf_size = + ALIGN(opb_wr_top_line_luma_buf_size, HFI_DMA_ALIGNMENT) + + (MAX_TILE_COLUMNS - 1) * 256; + opb_wr_top_line_luma_buf_size = + max(opb_wr_top_line_luma_buf_size, (32 * ALIGN(height, 16))); + opb_wr_top_line_chroma_buf_size = opb_wr_top_line_luma_buf_size; + opb_lb_wr_llb_y_buffer_size = ALIGN((ALIGN(height, 16) / 2) * 64, 32); + opb_lb_wr_llb_uv_buffer_size = opb_lb_wr_llb_y_buffer_size; + size = num_vpp_pipes * + 2 * (vpss_4tap_top_buffer_size + vpss_div2_top_buffer_size) + + 2 * (vpss_4tap_left_buffer_size + vpss_div2_left_buffer_size) + + opb_wr_top_line_luma_buf_size + + opb_wr_top_line_chroma_buf_size + + opb_lb_wr_llb_uv_buffer_size + + opb_lb_wr_llb_y_buffer_size; + + return size; +} + +static u32 size_h264d_hw_bin_buffer(u32 width, u32 height) +{ + u32 size_yuv, size_bin_hdr, size_bin_res; + u32 size = 0; + u32 product; + + product = width * height; + size_yuv = (product <= BIN_BUFFER_THRESHOLD) ? + ((BIN_BUFFER_THRESHOLD * 3) >> 1) : ((product * 3) >> 1); + + size_bin_hdr = size_yuv * H264_CABAC_HDR_RATIO_HD_TOT; + size_bin_res = size_yuv * H264_CABAC_RES_RATIO_HD_TOT; + size_bin_hdr = ALIGN(size_bin_hdr, HFI_DMA_ALIGNMENT); + size_bin_res = ALIGN(size_bin_res, HFI_DMA_ALIGNMENT); + size = size_bin_hdr + size_bin_res; + + return size; +} + +static u32 h264d_scratch_size(u32 width, u32 height, bool is_interlaced) +{ + u32 aligned_width = ALIGN(width, 16); + u32 aligned_height = ALIGN(height, 16); + u32 size = 0; + + if (!is_interlaced) + size = size_h264d_hw_bin_buffer(aligned_width, aligned_height); + + return size; +} + +static u32 size_h265d_hw_bin_buffer(u32 width, u32 height) +{ + u32 size_yuv, size_bin_hdr, size_bin_res; + u32 size = 0; + u32 product; + + product = width * height; + size_yuv = (product <= BIN_BUFFER_THRESHOLD) ? + ((BIN_BUFFER_THRESHOLD * 3) >> 1) : ((product * 3) >> 1); + size_bin_hdr = size_yuv * H265_CABAC_HDR_RATIO_HD_TOT; + size_bin_res = size_yuv * H265_CABAC_RES_RATIO_HD_TOT; + size_bin_hdr = ALIGN(size_bin_hdr, HFI_DMA_ALIGNMENT); + size_bin_res = ALIGN(size_bin_res, HFI_DMA_ALIGNMENT); + size = size_bin_hdr + size_bin_res; + + return size; +} + +static u32 h265d_scratch_size(u32 width, u32 height, bool is_interlaced) +{ + u32 aligned_width = ALIGN(width, 16); + u32 aligned_height = ALIGN(height, 16); + u32 size = 0; + + if (!is_interlaced) + size = size_h265d_hw_bin_buffer(aligned_width, aligned_height); + + return size; +} + +static u32 vpxd_scratch_size(u32 width, u32 height, bool is_interlaced) +{ + u32 aligned_width = ALIGN(width, 16); + u32 aligned_height = ALIGN(height, 16); + u32 size_yuv = aligned_width * aligned_height * 3 / 2; + u32 size = 0; + + if (!is_interlaced) { + u32 binbuffer1_size, binbufer2_size; + + binbuffer1_size = max_t(u32, size_yuv, + ((BIN_BUFFER_THRESHOLD * 3) >> 1)); + binbuffer1_size *= VPX_DECODER_FRAME_CONCURENCY_LVL * + VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_NUM / + VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_DEN; + binbufer2_size = max_t(u32, size_yuv, + ((BIN_BUFFER_THRESHOLD * 3) >> 1)); + binbufer2_size *= VPX_DECODER_FRAME_CONCURENCY_LVL * + VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_NUM / + VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_DEN; + size = ALIGN(binbuffer1_size + binbufer2_size, + HFI_DMA_ALIGNMENT); + } + + return size; +} + +static u32 mpeg2d_scratch_size(u32 width, u32 height, bool is_interlaced) +{ + return 0; +} + +static u32 calculate_enc_output_frame_size(u32 width, u32 height, u32 rc_type) +{ + u32 aligned_width, aligned_height; + u32 mbs_per_frame; + u32 frame_size; + + /* + * Encoder output size calculation: 32 Align width/height + * For resolution < 720p : YUVsize * 4 + * For resolution > 720p & <= 4K : YUVsize / 2 + * For resolution > 4k : YUVsize / 4 + * Initially frame_size = YUVsize * 2; + */ + aligned_width = ALIGN(width, 32); + aligned_height = ALIGN(height, 32); + mbs_per_frame = (ALIGN(aligned_height, 16) * + ALIGN(aligned_width, 16)) / 256; + frame_size = width * height * 3; + + if (mbs_per_frame < NUM_MBS_720P) + frame_size = frame_size << 1; + else if (mbs_per_frame <= NUM_MBS_4K) + frame_size = frame_size >> 2; + else + frame_size = frame_size >> 3; + + if (rc_type == HFI_RATE_CONTROL_OFF || rc_type == HFI_RATE_CONTROL_CQ) + frame_size = frame_size << 1; + + /* + * In case of opaque color format bitdepth will be known + * with first ETB, buffers allocated already with 8 bit + * won't be sufficient for 10 bit + * calculate size considering 10-bit by default + * For 10-bit cases size = size * 1.25 + */ + frame_size *= 5; + frame_size /= 4; + + return ALIGN(frame_size, SZ_4K); +} + +static u32 calculate_enc_scratch_size(u32 width, u32 height, u32 work_mode, + u32 lcu_size, u32 num_vpp_pipes, + u32 rc_type) +{ + u32 aligned_width, aligned_height, bitstream_size; + u32 total_bitbin_buffers, size_single_pipe, bitbin_size; + u32 sao_bin_buffer_size, padded_bin_size, size; + + aligned_width = ALIGN(width, lcu_size); + aligned_height = ALIGN(height, lcu_size); + bitstream_size = + calculate_enc_output_frame_size(width, height, rc_type); + + bitstream_size = ALIGN(bitstream_size, HFI_DMA_ALIGNMENT); + + if (work_mode == VIDC_WORK_MODE_2) { + total_bitbin_buffers = 3; + bitbin_size = bitstream_size * 17 / 10; + bitbin_size = ALIGN(bitbin_size, HFI_DMA_ALIGNMENT); + } else { + total_bitbin_buffers = 1; + bitstream_size = aligned_width * aligned_height * 3; + bitbin_size = ALIGN(bitstream_size, HFI_DMA_ALIGNMENT); + } + + if (num_vpp_pipes > 2) + size_single_pipe = bitbin_size / 2; + else + size_single_pipe = bitbin_size; + + size_single_pipe = ALIGN(size_single_pipe, HFI_DMA_ALIGNMENT); + sao_bin_buffer_size = + (64 * (((width + 32) * (height + 32)) >> 10)) + 384; + padded_bin_size = ALIGN(size_single_pipe, HFI_DMA_ALIGNMENT); + size_single_pipe = sao_bin_buffer_size + padded_bin_size; + size_single_pipe = ALIGN(size_single_pipe, HFI_DMA_ALIGNMENT); + bitbin_size = size_single_pipe * num_vpp_pipes; + size = ALIGN(bitbin_size, HFI_DMA_ALIGNMENT) * + total_bitbin_buffers + 512; + + return size; +} + +static u32 h264e_scratch_size(u32 width, u32 height, u32 work_mode, + u32 num_vpp_pipes, u32 rc_type) +{ + return calculate_enc_scratch_size(width, height, work_mode, 16, + num_vpp_pipes, rc_type); +} + +static u32 h265e_scratch_size(u32 width, u32 height, u32 work_mode, + u32 num_vpp_pipes, u32 rc_type) +{ + return calculate_enc_scratch_size(width, height, work_mode, 32, + num_vpp_pipes, rc_type); +} + +static u32 vp8e_scratch_size(u32 width, u32 height, u32 work_mode, + u32 num_vpp_pipes, u32 rc_type) +{ + return calculate_enc_scratch_size(width, height, work_mode, 16, + num_vpp_pipes, rc_type); +} + +static u32 hfi_iris2_h264d_comv_size(u32 width, u32 height, + u32 yuv_buf_min_count) +{ + u32 frame_width_in_mbs = ((width + 15) >> 4); + u32 frame_height_in_mbs = ((height + 15) >> 4); + u32 col_mv_aligned_width = (frame_width_in_mbs << 7); + u32 col_zero_aligned_width = (frame_width_in_mbs << 2); + u32 col_zero_size = 0, size_colloc = 0, comv_size = 0; + + col_mv_aligned_width = ALIGN(col_mv_aligned_width, 16); + col_zero_aligned_width = ALIGN(col_zero_aligned_width, 16); + col_zero_size = + col_zero_aligned_width * ((frame_height_in_mbs + 1) >> 1); + col_zero_size = ALIGN(col_zero_size, 64); + col_zero_size <<= 1; + col_zero_size = ALIGN(col_zero_size, 512); + size_colloc = col_mv_aligned_width * ((frame_height_in_mbs + 1) >> 1); + size_colloc = ALIGN(size_colloc, 64); + size_colloc <<= 1; + size_colloc = ALIGN(size_colloc, 512); + size_colloc += (col_zero_size + SIZE_H264D_BUFTAB_T * 2); + comv_size = size_colloc * yuv_buf_min_count; + comv_size += 512; + + return comv_size; +} + +static u32 size_h264d_bse_cmd_buf(u32 height) +{ + u32 aligned_height = ALIGN(height, 32); + + return min_t(u32, (((aligned_height + 15) >> 4) * 3 * 4), + H264D_MAX_SLICE) * SIZE_H264D_BSE_CMD_PER_BUF; +} + +static u32 size_h264d_vpp_cmd_buf(u32 height) +{ + u32 aligned_height = ALIGN(height, 32); + u32 size; + + size = min_t(u32, (((aligned_height + 15) >> 4) * 3 * 4), + H264D_MAX_SLICE) * SIZE_H264D_VPP_CMD_PER_BUF; + if (size > VPP_CMD_MAX_SIZE) + size = VPP_CMD_MAX_SIZE; + + return size; +} + +static u32 hfi_iris2_h264d_non_comv_size(u32 width, u32 height, + u32 num_vpp_pipes) +{ + u32 size_bse, size_vpp, size; + + size_bse = size_h264d_bse_cmd_buf(height); + size_vpp = size_h264d_vpp_cmd_buf(height); + size = + ALIGN(size_bse, HFI_DMA_ALIGNMENT) + + ALIGN(size_vpp, HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_HW_PIC(SIZE_H264D_HW_PIC_T), HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_H264D_LB_FE_TOP_DATA(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_H264D_LB_FE_TOP_CTRL(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_H264D_LB_FE_LEFT_CTRL(width, height), + HFI_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_H264D_LB_SE_TOP_CTRL(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_H264D_LB_SE_LEFT_CTRL(width, height), + HFI_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_H264D_LB_PE_TOP_DATA(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_H264D_LB_VSP_TOP(width, height), HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_H264D_LB_RECON_DMA_METADATA_WR(width, height), + HFI_DMA_ALIGNMENT) * 2 + + ALIGN(SIZE_H264D_QP(width, height), HFI_DMA_ALIGNMENT); + + return ALIGN(size, HFI_DMA_ALIGNMENT); +} + +static u32 size_h265d_bse_cmd_buf(u32 width, u32 height) +{ + u32 size; + + size = (ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) * + (ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) * + NUM_HW_PIC_BUF; + size = min_t(u32, size, H265D_MAX_SLICE + 1); + size = 2 * size * SIZE_H265D_BSE_CMD_PER_BUF; + + return ALIGN(size, HFI_DMA_ALIGNMENT); +} + +static u32 size_h265d_vpp_cmd_buf(u32 width, u32 height) +{ + u32 size; + + size = (ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) * + (ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) * + NUM_HW_PIC_BUF; + size = min_t(u32, size, H265D_MAX_SLICE + 1); + size = ALIGN(size, 4); + size = 2 * size * SIZE_H265D_VPP_CMD_PER_BUF; + size = ALIGN(size, HFI_DMA_ALIGNMENT); + if (size > VPP_CMD_MAX_SIZE) + size = VPP_CMD_MAX_SIZE; + + return size; +} + +static u32 hfi_iris2_h265d_comv_size(u32 width, u32 height, + u32 yuv_buf_count_min) +{ + u32 size; + + size = ALIGN(((((width + 15) >> 4) * ((height + 15) >> 4)) << 8), 512); + size *= yuv_buf_count_min; + size += 512; + + return size; +} + +static u32 hfi_iris2_h265d_non_comv_size(u32 width, u32 height, + u32 num_vpp_pipes) +{ + u32 size_bse, size_vpp, size; + + size_bse = size_h265d_bse_cmd_buf(width, height); + size_vpp = size_h265d_vpp_cmd_buf(width, height); + size = + ALIGN(size_bse, HFI_DMA_ALIGNMENT) + + ALIGN(size_vpp, HFI_DMA_ALIGNMENT) + + ALIGN(NUM_HW_PIC_BUF * 20 * 22 * 4, HFI_DMA_ALIGNMENT) + + ALIGN(2 * sizeof(u16) * + (ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) * + (ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_HW_PIC(SIZE_H265D_HW_PIC_T), HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_H265D_LB_FE_TOP_DATA(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_H265D_LB_FE_TOP_CTRL(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_H265D_LB_FE_LEFT_CTRL(width, height), + HFI_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(size_h265d_lb_se_left_ctrl(width, height), + HFI_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_H265D_LB_SE_TOP_CTRL(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_H265D_LB_PE_TOP_DATA(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_H265D_LB_VSP_TOP(width, height), HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_H265D_LB_VSP_LEFT(width, height), + HFI_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_H265D_LB_RECON_DMA_METADATA_WR(width, height), + HFI_DMA_ALIGNMENT) + * 4 + + ALIGN(SIZE_H265D_QP(width, height), HFI_DMA_ALIGNMENT); + + return ALIGN(size, HFI_DMA_ALIGNMENT); +} + +static u32 hfi_iris2_vp8d_comv_size(u32 width, u32 height, + u32 yuv_min_buf_count) +{ + return (((width + 15) >> 4) * ((height + 15) >> 4) * 8 * 2); +} + +static u32 h264d_scratch1_size(u32 width, u32 height, u32 min_buf_count, + bool split_mode_enabled, u32 num_vpp_pipes) +{ + u32 co_mv_size, nonco_mv_size, vpss_lb_size = 0; + + co_mv_size = hfi_iris2_h264d_comv_size(width, height, min_buf_count); + nonco_mv_size = hfi_iris2_h264d_non_comv_size(width, height, + num_vpp_pipes); + if (split_mode_enabled) + vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes); + + return co_mv_size + nonco_mv_size + vpss_lb_size; +} + +static u32 h265d_scratch1_size(u32 width, u32 height, u32 min_buf_count, + bool split_mode_enabled, u32 num_vpp_pipes) +{ + u32 co_mv_size, nonco_mv_size, vpss_lb_size = 0; + + co_mv_size = hfi_iris2_h265d_comv_size(width, height, min_buf_count); + nonco_mv_size = hfi_iris2_h265d_non_comv_size(width, height, + num_vpp_pipes); + if (split_mode_enabled) + vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes); + + return co_mv_size + nonco_mv_size + vpss_lb_size + + HDR10_HIST_EXTRADATA_SIZE; +} + +static u32 vp8d_scratch1_size(u32 width, u32 height, u32 min_buf_count, + bool split_mode_enabled, u32 num_vpp_pipes) +{ + u32 vpss_lb_size = 0, size; + + size = hfi_iris2_vp8d_comv_size(width, height, 0); + size += ALIGN(size_vpxd_lb_fe_left_ctrl(width, height), + HFI_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(size_vpxd_lb_se_left_ctrl(width, height), + HFI_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_VP8D_LB_VSP_TOP(width, height), HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_VPXD_LB_FE_TOP_CTRL(width, height), + HFI_DMA_ALIGNMENT) + + 2 * ALIGN(SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_VPXD_LB_SE_TOP_CTRL(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_VP8D_LB_PE_TOP_DATA(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_VP8D_LB_FE_TOP_DATA(width, height), + HFI_DMA_ALIGNMENT); + if (split_mode_enabled) + vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes); + + size += vpss_lb_size; + + return size; +} + +static u32 vp9d_scratch1_size(u32 width, u32 height, u32 min_buf_count, + bool split_mode_enabled, u32 num_vpp_pipes) +{ + u32 vpss_lb_size = 0; + u32 size; + + size = + ALIGN(size_vpxd_lb_fe_left_ctrl(width, height), + HFI_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(size_vpxd_lb_se_left_ctrl(width, height), + HFI_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_VP9D_LB_VSP_TOP(width, height), HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_VPXD_LB_FE_TOP_CTRL(width, height), + HFI_DMA_ALIGNMENT) + + 2 * ALIGN(SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_VPXD_LB_SE_TOP_CTRL(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_VP9D_LB_PE_TOP_DATA(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_VP9D_LB_FE_TOP_DATA(width, height), + HFI_DMA_ALIGNMENT); + + if (split_mode_enabled) + vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes); + + size += vpss_lb_size + HDR10_HIST_EXTRADATA_SIZE; + + return size; +} + +static u32 mpeg2d_scratch1_size(u32 width, u32 height, u32 min_buf_count, + bool split_mode_enabled, u32 num_vpp_pipes) +{ + u32 vpss_lb_size = 0; + u32 size; + + size = + ALIGN(size_vpxd_lb_fe_left_ctrl(width, height), + HFI_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(size_vpxd_lb_se_left_ctrl(width, height), + HFI_DMA_ALIGNMENT) * num_vpp_pipes + + ALIGN(SIZE_VP8D_LB_VSP_TOP(width, height), HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_VPXD_LB_FE_TOP_CTRL(width, height), + HFI_DMA_ALIGNMENT) + + 2 * ALIGN(SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_VPXD_LB_SE_TOP_CTRL(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_VP8D_LB_PE_TOP_DATA(width, height), + HFI_DMA_ALIGNMENT) + + ALIGN(SIZE_VP8D_LB_FE_TOP_DATA(width, height), + HFI_DMA_ALIGNMENT); + + if (split_mode_enabled) + vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes); + + size += vpss_lb_size; + + return size; +} + +static u32 +calculate_enc_scratch1_size(u32 width, u32 height, u32 lcu_size, u32 num_ref, + bool ten_bit, u32 num_vpp_pipes, bool is_h265) +{ + u32 line_buf_ctrl_size, line_buf_data_size, leftline_buf_ctrl_size; + u32 line_buf_sde_size, sps_pps_slice_hdr, topline_buf_ctrl_size_FE; + u32 leftline_buf_ctrl_size_FE, line_buf_recon_pix_size; + u32 leftline_buf_recon_pix_size, lambda_lut_size, override_buffer_size; + u32 col_mv_buf_size, vpp_reg_buffer_size, ir_buffer_size; + u32 vpss_line_buf, leftline_buf_meta_recony, h265e_colrcbuf_size; + u32 h265e_framerc_bufsize, h265e_lcubitcnt_bufsize; + u32 h265e_lcubitmap_bufsize, se_stats_bufsize; + u32 bse_reg_buffer_size, bse_slice_cmd_buffer_size, slice_info_bufsize; + u32 line_buf_ctrl_size_buffid2, slice_cmd_buffer_size; + u32 width_lcu_num, height_lcu_num, width_coded, height_coded; + u32 frame_num_lcu, linebuf_meta_recon_uv, topline_bufsize_fe_1stg_sao; + u32 size, bit_depth, num_lcu_mb; + u32 vpss_line_buffer_size_1; + + width_lcu_num = (width + lcu_size - 1) / lcu_size; + height_lcu_num = (height + lcu_size - 1) / lcu_size; + frame_num_lcu = width_lcu_num * height_lcu_num; + width_coded = width_lcu_num * lcu_size; + height_coded = height_lcu_num * lcu_size; + num_lcu_mb = (height_coded / lcu_size) * + ((width_coded + lcu_size * 8) / lcu_size); + slice_info_bufsize = 256 + (frame_num_lcu << 4); + slice_info_bufsize = ALIGN(slice_info_bufsize, HFI_DMA_ALIGNMENT); + line_buf_ctrl_size = ALIGN(width_coded, HFI_DMA_ALIGNMENT); + line_buf_ctrl_size_buffid2 = ALIGN(width_coded, HFI_DMA_ALIGNMENT); + + bit_depth = ten_bit ? 10 : 8; + line_buf_data_size = + (((((bit_depth * width_coded + 1024) + + (HFI_DMA_ALIGNMENT - 1)) & (~(HFI_DMA_ALIGNMENT - 1))) * 1) + + (((((bit_depth * width_coded + 1024) >> 1) + + (HFI_DMA_ALIGNMENT - 1)) & (~(HFI_DMA_ALIGNMENT - 1))) * 2)); + + leftline_buf_ctrl_size = is_h265 ? + ((height_coded + 32) / 32 * 4 * 16) : + ((height_coded + 15) / 16 * 5 * 16); + + if (num_vpp_pipes > 1) { + leftline_buf_ctrl_size += 512; + leftline_buf_ctrl_size = + ALIGN(leftline_buf_ctrl_size, 512) * num_vpp_pipes; + } + + leftline_buf_ctrl_size = + ALIGN(leftline_buf_ctrl_size, HFI_DMA_ALIGNMENT); + leftline_buf_recon_pix_size = (((ten_bit + 1) * 2 * + (height_coded) + HFI_DMA_ALIGNMENT) + + (HFI_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1) & + (~((HFI_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1)) * 1; + + topline_buf_ctrl_size_FE = is_h265 ? (64 * (width_coded >> 5)) : + (HFI_DMA_ALIGNMENT + 16 * (width_coded >> 4)); + topline_buf_ctrl_size_FE = + ALIGN(topline_buf_ctrl_size_FE, HFI_DMA_ALIGNMENT); + leftline_buf_ctrl_size_FE = + (((HFI_DMA_ALIGNMENT + 64 * (height_coded >> 4)) + + (HFI_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1) & + (~((HFI_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1)) * 1) * + num_vpp_pipes; + leftline_buf_meta_recony = (HFI_DMA_ALIGNMENT + 64 * + ((height_coded) / (8 * (ten_bit ? 4 : 8)))); + leftline_buf_meta_recony = + ALIGN(leftline_buf_meta_recony, HFI_DMA_ALIGNMENT); + leftline_buf_meta_recony = leftline_buf_meta_recony * num_vpp_pipes; + linebuf_meta_recon_uv = (HFI_DMA_ALIGNMENT + 64 * + ((height_coded) / (4 * (ten_bit ? 4 : 8)))); + linebuf_meta_recon_uv = ALIGN(linebuf_meta_recon_uv, HFI_DMA_ALIGNMENT); + linebuf_meta_recon_uv = linebuf_meta_recon_uv * num_vpp_pipes; + line_buf_recon_pix_size = ((ten_bit ? 3 : 2) * width_coded); + line_buf_recon_pix_size = + ALIGN(line_buf_recon_pix_size, HFI_DMA_ALIGNMENT); + slice_cmd_buffer_size = ALIGN(20480, HFI_DMA_ALIGNMENT); + sps_pps_slice_hdr = 2048 + 4096; + col_mv_buf_size = is_h265 ? (16 * ((frame_num_lcu << 2) + 32)) : + (3 * 16 * (width_lcu_num * height_lcu_num + 32)); + col_mv_buf_size = + ALIGN(col_mv_buf_size, HFI_DMA_ALIGNMENT) * (num_ref + 1); + h265e_colrcbuf_size = + (((width_lcu_num + 7) >> 3) * 16 * 2 * height_lcu_num); + if (num_vpp_pipes > 1) + h265e_colrcbuf_size = + ALIGN(h265e_colrcbuf_size, HFI_DMA_ALIGNMENT) * + num_vpp_pipes; + + h265e_colrcbuf_size = ALIGN(h265e_colrcbuf_size, HFI_DMA_ALIGNMENT) * + HFI_MAX_COL_FRAME; + h265e_framerc_bufsize = (is_h265) ? (256 + 16 * + (14 + (((height_coded >> 5) + 7) >> 3))) : + (256 + 16 * (14 + (((height_coded >> 4) + 7) >> 3))); + h265e_framerc_bufsize *= 6; /* multiply by max numtilescol */ + if (num_vpp_pipes > 1) + h265e_framerc_bufsize = + ALIGN(h265e_framerc_bufsize, HFI_DMA_ALIGNMENT) * + num_vpp_pipes; + + h265e_framerc_bufsize = ALIGN(h265e_framerc_bufsize, 512) * + HFI_MAX_COL_FRAME; + h265e_lcubitcnt_bufsize = 256 + 4 * frame_num_lcu; + h265e_lcubitcnt_bufsize = + ALIGN(h265e_lcubitcnt_bufsize, HFI_DMA_ALIGNMENT); + h265e_lcubitmap_bufsize = 256 + (frame_num_lcu >> 3); + h265e_lcubitmap_bufsize = + ALIGN(h265e_lcubitmap_bufsize, HFI_DMA_ALIGNMENT); + line_buf_sde_size = 256 + 16 * (width_coded >> 4); + line_buf_sde_size = ALIGN(line_buf_sde_size, HFI_DMA_ALIGNMENT); + if ((width_coded * height_coded) > (4096 * 2160)) + se_stats_bufsize = 0; + else if ((width_coded * height_coded) > (1920 * 1088)) + se_stats_bufsize = (40 * 4 * frame_num_lcu + 256 + 256); + else + se_stats_bufsize = (1024 * frame_num_lcu + 256 + 256); + + se_stats_bufsize = ALIGN(se_stats_bufsize, HFI_DMA_ALIGNMENT) * 2; + bse_slice_cmd_buffer_size = (((8192 << 2) + 7) & (~7)) * 6; + bse_reg_buffer_size = (((512 << 3) + 7) & (~7)) * 4; + vpp_reg_buffer_size = + (((HFI_VENUS_VPPSG_MAX_REGISTERS << 3) + 31) & (~31)) * 10; + lambda_lut_size = 256 * 11; + override_buffer_size = 16 * ((num_lcu_mb + 7) >> 3); + override_buffer_size = + ALIGN(override_buffer_size, HFI_DMA_ALIGNMENT) * 2; + ir_buffer_size = (((frame_num_lcu << 1) + 7) & (~7)) * 3; + vpss_line_buffer_size_1 = (((8192 >> 2) << 5) * num_vpp_pipes) + 64; + vpss_line_buf = + (((((max(width_coded, height_coded) + 3) >> 2) << 5) + 256) * + 16) + vpss_line_buffer_size_1; + topline_bufsize_fe_1stg_sao = 16 * (width_coded >> 5); + topline_bufsize_fe_1stg_sao = + ALIGN(topline_bufsize_fe_1stg_sao, HFI_DMA_ALIGNMENT); + + size = + line_buf_ctrl_size + line_buf_data_size + + line_buf_ctrl_size_buffid2 + leftline_buf_ctrl_size + + vpss_line_buf + col_mv_buf_size + topline_buf_ctrl_size_FE + + leftline_buf_ctrl_size_FE + line_buf_recon_pix_size + + leftline_buf_recon_pix_size + + leftline_buf_meta_recony + linebuf_meta_recon_uv + + h265e_colrcbuf_size + h265e_framerc_bufsize + + h265e_lcubitcnt_bufsize + h265e_lcubitmap_bufsize + + line_buf_sde_size + + topline_bufsize_fe_1stg_sao + override_buffer_size + + bse_reg_buffer_size + vpp_reg_buffer_size + sps_pps_slice_hdr + + slice_cmd_buffer_size + bse_slice_cmd_buffer_size + + ir_buffer_size + slice_info_bufsize + lambda_lut_size + + se_stats_bufsize + 1024; + + return size; +} + +static u32 h264e_scratch1_size(u32 width, u32 height, u32 num_ref, bool ten_bit, + u32 num_vpp_pipes) +{ + return calculate_enc_scratch1_size(width, height, 16, num_ref, ten_bit, + num_vpp_pipes, false); +} + +static u32 h265e_scratch1_size(u32 width, u32 height, u32 num_ref, bool ten_bit, + u32 num_vpp_pipes) +{ + return calculate_enc_scratch1_size(width, height, 32, num_ref, ten_bit, + num_vpp_pipes, true); +} + +static u32 vp8e_scratch1_size(u32 width, u32 height, u32 num_ref, bool ten_bit, + u32 num_vpp_pipes) +{ + return calculate_enc_scratch1_size(width, height, 16, num_ref, ten_bit, + 1, false); +} + +static u32 ubwc_metadata_plane_stride(u32 width, u32 metadata_stride_multi, + u32 tile_width_pels) +{ + return ALIGN(((width + (tile_width_pels - 1)) / tile_width_pels), + metadata_stride_multi); +} + +static u32 ubwc_metadata_plane_bufheight(u32 height, u32 metadata_height_multi, + u32 tile_height_pels) +{ + return ALIGN(((height + (tile_height_pels - 1)) / tile_height_pels), + metadata_height_multi); +} + +static u32 ubwc_metadata_plane_buffer_size(u32 metadata_stride, + u32 metadata_buf_height) +{ + return ALIGN(metadata_stride * metadata_buf_height, SZ_4K); +} + +static u32 enc_scratch2_size(u32 width, u32 height, u32 num_ref, bool ten_bit) +{ + u32 aligned_width, aligned_height, chroma_height, ref_buf_height; + u32 luma_size, chroma_size; + u32 metadata_stride, meta_buf_height, meta_size_y, meta_size_c; + u32 ref_luma_stride_bytes, ref_chroma_height_bytes; + u32 ref_buf_size, ref_stride; + u32 size; + + if (!ten_bit) { + aligned_height = ALIGN(height, HFI_VENUS_HEIGHT_ALIGNMENT); + chroma_height = height >> 1; + chroma_height = ALIGN(chroma_height, + HFI_VENUS_HEIGHT_ALIGNMENT); + aligned_width = ALIGN(width, HFI_VENUS_WIDTH_ALIGNMENT); + metadata_stride = + ubwc_metadata_plane_stride(width, 64, + NV12_UBWC_Y_TILE_WIDTH); + meta_buf_height = + ubwc_metadata_plane_bufheight(height, 16, + NV12_UBWC_Y_TILE_HEIGHT); + meta_size_y = ubwc_metadata_plane_buffer_size(metadata_stride, + meta_buf_height); + meta_size_c = ubwc_metadata_plane_buffer_size(metadata_stride, + meta_buf_height); + size = (aligned_height + chroma_height) * aligned_width + + meta_size_y + meta_size_c; + size = (size * (num_ref + 3)) + 4096; + } else { + ref_buf_height = (height + (HFI_VENUS_HEIGHT_ALIGNMENT - 1)) + & (~(HFI_VENUS_HEIGHT_ALIGNMENT - 1)); + ref_luma_stride_bytes = + ((width + SYSTEM_LAL_TILE10 - 1) / SYSTEM_LAL_TILE10) * + SYSTEM_LAL_TILE10; + ref_stride = 4 * (ref_luma_stride_bytes / 3); + ref_stride = (ref_stride + (128 - 1)) & (~(128 - 1)); + luma_size = ref_buf_height * ref_stride; + ref_chroma_height_bytes = (((height + 1) >> 1) + + (32 - 1)) & (~(32 - 1)); + chroma_size = ref_stride * ref_chroma_height_bytes; + luma_size = (luma_size + (SZ_4K - 1)) & (~(SZ_4K - 1)); + chroma_size = (chroma_size + (SZ_4K - 1)) & (~(SZ_4K - 1)); + ref_buf_size = luma_size + chroma_size; + metadata_stride = + ubwc_metadata_plane_stride(width, + METADATA_STRIDE_MULTIPLE, + TP10_UBWC_Y_TILE_WIDTH); + meta_buf_height = + ubwc_metadata_plane_bufheight(height, + METADATA_HEIGHT_MULTIPLE, + TP10_UBWC_Y_TILE_HEIGHT); + meta_size_y = ubwc_metadata_plane_buffer_size(metadata_stride, + meta_buf_height); + meta_size_c = ubwc_metadata_plane_buffer_size(metadata_stride, + meta_buf_height); + size = ref_buf_size + meta_size_y + meta_size_c; + size = (size * (num_ref + 3)) + 4096; + } + + return size; +} + +static u32 enc_persist_size(void) +{ + return HFI_IRIS2_ENC_PERSIST_SIZE; +} + +static u32 h264d_persist1_size(void) +{ + return ALIGN((SIZE_SLIST_BUF_H264 * NUM_SLIST_BUF_H264 + + NUM_HW_PIC_BUF * SIZE_SEI_USERDATA), HFI_DMA_ALIGNMENT); +} + +static u32 h265d_persist1_size(void) +{ + return ALIGN((SIZE_SLIST_BUF_H265 * NUM_SLIST_BUF_H265 + H265_NUM_TILE + * sizeof(u32) + NUM_HW_PIC_BUF * SIZE_SEI_USERDATA), HFI_DMA_ALIGNMENT); +} + +static u32 vp8d_persist1_size(void) +{ + return ALIGN(VP8_NUM_PROBABILITY_TABLE_BUF * VP8_PROB_TABLE_SIZE, + HFI_DMA_ALIGNMENT); +} + +static u32 vp9d_persist1_size(void) +{ + return + ALIGN(VP9_NUM_PROBABILITY_TABLE_BUF * VP9_PROB_TABLE_SIZE, + HFI_DMA_ALIGNMENT) + + ALIGN(HFI_IRIS2_VP9D_COMV_SIZE, HFI_DMA_ALIGNMENT) + + ALIGN(MAX_SUPERFRAME_HEADER_LEN, HFI_DMA_ALIGNMENT) + + ALIGN(VP9_UDC_HEADER_BUF_SIZE, HFI_DMA_ALIGNMENT) + + ALIGN(VP9_NUM_FRAME_INFO_BUF * CCE_TILE_OFFSET_SIZE, + HFI_DMA_ALIGNMENT); +} + +static u32 mpeg2d_persist1_size(void) +{ + return QMATRIX_SIZE + MP2D_QPDUMP_SIZE; +} + +struct dec_bufsize_ops { + u32 (*scratch)(u32 width, u32 height, bool is_interlaced); + u32 (*scratch1)(u32 width, u32 height, u32 min_buf_count, + bool split_mode_enabled, u32 num_vpp_pipes); + u32 (*persist1)(void); +}; + +struct enc_bufsize_ops { + u32 (*scratch)(u32 width, u32 height, u32 work_mode, u32 num_vpp_pipes, + u32 rc_type); + u32 (*scratch1)(u32 width, u32 height, u32 num_ref, bool ten_bit, + u32 num_vpp_pipes); + u32 (*scratch2)(u32 width, u32 height, u32 num_ref, bool ten_bit); + u32 (*persist)(void); +}; + +static struct dec_bufsize_ops dec_h264_ops = { + .scratch = h264d_scratch_size, + .scratch1 = h264d_scratch1_size, + .persist1 = h264d_persist1_size, +}; + +static struct dec_bufsize_ops dec_h265_ops = { + .scratch = h265d_scratch_size, + .scratch1 = h265d_scratch1_size, + .persist1 = h265d_persist1_size, +}; + +static struct dec_bufsize_ops dec_vp8_ops = { + .scratch = vpxd_scratch_size, + .scratch1 = vp8d_scratch1_size, + .persist1 = vp8d_persist1_size, +}; + +static struct dec_bufsize_ops dec_vp9_ops = { + .scratch = vpxd_scratch_size, + .scratch1 = vp9d_scratch1_size, + .persist1 = vp9d_persist1_size, +}; + +static struct dec_bufsize_ops dec_mpeg2_ops = { + .scratch = mpeg2d_scratch_size, + .scratch1 = mpeg2d_scratch1_size, + .persist1 = mpeg2d_persist1_size, +}; + +static struct enc_bufsize_ops enc_h264_ops = { + .scratch = h264e_scratch_size, + .scratch1 = h264e_scratch1_size, + .scratch2 = enc_scratch2_size, + .persist = enc_persist_size, +}; + +static struct enc_bufsize_ops enc_h265_ops = { + .scratch = h265e_scratch_size, + .scratch1 = h265e_scratch1_size, + .scratch2 = enc_scratch2_size, + .persist = enc_persist_size, +}; + +static struct enc_bufsize_ops enc_vp8_ops = { + .scratch = vp8e_scratch_size, + .scratch1 = vp8e_scratch1_size, + .scratch2 = enc_scratch2_size, + .persist = enc_persist_size, +}; + +static u32 +calculate_dec_input_frame_size(u32 width, u32 height, u32 codec, + u32 max_mbs_per_frame, u32 buffer_size_limit) +{ + u32 frame_size, num_mbs; + u32 div_factor = 1; + u32 base_res_mbs = NUM_MBS_4K; + + /* + * Decoder input size calculation: + * If clip is 8k buffer size is calculated for 8k : 8k mbs/4 + * For 8k cases we expect width/height to be set always. + * In all other cases size is calculated for 4k: + * 4k mbs for VP8/VP9 and 4k/2 for remaining codecs + */ + num_mbs = (ALIGN(height, 16) * ALIGN(width, 16)) / 256; + if (num_mbs > NUM_MBS_4K) { + div_factor = 4; + base_res_mbs = max_mbs_per_frame; + } else { + base_res_mbs = NUM_MBS_4K; + if (codec == V4L2_PIX_FMT_VP9) + div_factor = 1; + else + div_factor = 2; + } + + frame_size = base_res_mbs * MB_SIZE_IN_PIXEL * 3 / 2 / div_factor; + + /* multiply by 10/8 (1.25) to get size for 10 bit case */ + if (codec == V4L2_PIX_FMT_VP9 || codec == V4L2_PIX_FMT_HEVC) + frame_size = frame_size + (frame_size >> 2); + + if (buffer_size_limit && buffer_size_limit < frame_size) + frame_size = buffer_size_limit; + + return ALIGN(frame_size, SZ_4K); +} + +static int output_buffer_count(u32 session_type, u32 codec) +{ + u32 output_min_count; + + if (session_type == VIDC_SESSION_TYPE_DEC) { + switch (codec) { + case V4L2_PIX_FMT_MPEG2: + case V4L2_PIX_FMT_VP8: + output_min_count = 6; + break; + case V4L2_PIX_FMT_VP9: + output_min_count = 11; + break; + case V4L2_PIX_FMT_H264: + case V4L2_PIX_FMT_HEVC: + default: + output_min_count = 18; + break; + } + } else { + output_min_count = MIN_ENC_OUTPUT_BUFFERS; + } + + return output_min_count; +} + +static int bufreq_dec(struct hfi_plat_buffers_params *params, u32 buftype, + struct hfi_buffer_requirements *bufreq) +{ + enum hfi_version version = params->version; + u32 codec = params->codec; + u32 width = params->width, height = params->height, out_min_count; + u32 out_width = params->out_width, out_height = params->out_height; + struct dec_bufsize_ops *dec_ops; + bool is_secondary_output = params->dec.is_secondary_output; + bool is_interlaced = params->dec.is_interlaced; + u32 max_mbs_per_frame = params->dec.max_mbs_per_frame; + u32 buffer_size_limit = params->dec.buffer_size_limit; + u32 num_vpp_pipes = params->num_vpp_pipes; + + switch (codec) { + case V4L2_PIX_FMT_H264: + dec_ops = &dec_h264_ops; + break; + case V4L2_PIX_FMT_HEVC: + dec_ops = &dec_h265_ops; + break; + case V4L2_PIX_FMT_VP8: + dec_ops = &dec_vp8_ops; + break; + case V4L2_PIX_FMT_VP9: + dec_ops = &dec_vp9_ops; + break; + case V4L2_PIX_FMT_MPEG2: + dec_ops = &dec_mpeg2_ops; + break; + default: + return -EINVAL; + } + + out_min_count = output_buffer_count(VIDC_SESSION_TYPE_DEC, codec); + /* Max of driver and FW count */ + out_min_count = max(out_min_count, hfi_bufreq_get_count_min(bufreq, version)); + + bufreq->type = buftype; + bufreq->region_size = 0; + bufreq->count_actual = 1; + hfi_bufreq_set_count_min(bufreq, version, 1); + hfi_bufreq_set_hold_count(bufreq, version, 1); + bufreq->contiguous = 1; + bufreq->alignment = 256; + + if (buftype == HFI_BUFFER_INPUT) { + hfi_bufreq_set_count_min(bufreq, version, MIN_INPUT_BUFFERS); + bufreq->size = + calculate_dec_input_frame_size(width, height, codec, + max_mbs_per_frame, + buffer_size_limit); + } else if (buftype == HFI_BUFFER_OUTPUT || buftype == HFI_BUFFER_OUTPUT2) { + hfi_bufreq_set_count_min(bufreq, version, out_min_count); + bufreq->size = + venus_helper_get_framesz_raw(params->hfi_color_fmt, + out_width, out_height); + if (buftype == HFI_BUFFER_OUTPUT && + params->dec.is_secondary_output) + bufreq->size = + venus_helper_get_framesz_raw(params->hfi_dpb_color_fmt, + out_width, out_height); + } else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH(version)) { + bufreq->size = dec_ops->scratch(width, height, is_interlaced); + } else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH_1(version)) { + bufreq->size = dec_ops->scratch1(width, height, VB2_MAX_FRAME, + is_secondary_output, + num_vpp_pipes); + } else if (buftype == HFI_BUFFER_INTERNAL_PERSIST_1) { + bufreq->size = dec_ops->persist1(); + } else { + bufreq->size = 0; + } + + return 0; +} + +static int bufreq_enc(struct hfi_plat_buffers_params *params, u32 buftype, + struct hfi_buffer_requirements *bufreq) +{ + enum hfi_version version = params->version; + struct enc_bufsize_ops *enc_ops; + u32 width = params->width; + u32 height = params->height; + bool is_tenbit = params->enc.is_tenbit; + u32 num_bframes = params->enc.num_b_frames; + u32 codec = params->codec; + u32 work_mode = params->enc.work_mode; + u32 rc_type = params->enc.rc_type; + u32 num_vpp_pipes = params->num_vpp_pipes; + u32 num_ref, count_min; + + switch (codec) { + case V4L2_PIX_FMT_H264: + enc_ops = &enc_h264_ops; + break; + case V4L2_PIX_FMT_HEVC: + enc_ops = &enc_h265_ops; + break; + case V4L2_PIX_FMT_VP8: + enc_ops = &enc_vp8_ops; + break; + default: + return -EINVAL; + } + + num_ref = num_bframes > 0 ? num_bframes + 1 : 1; + + bufreq->type = buftype; + bufreq->region_size = 0; + bufreq->count_actual = 1; + hfi_bufreq_set_count_min(bufreq, version, 1); + hfi_bufreq_set_hold_count(bufreq, version, 1); + bufreq->contiguous = 1; + bufreq->alignment = 256; + + if (buftype == HFI_BUFFER_INPUT) { + hfi_bufreq_set_count_min(bufreq, version, MIN_INPUT_BUFFERS); + bufreq->size = + venus_helper_get_framesz_raw(params->hfi_color_fmt, + width, height); + } else if (buftype == HFI_BUFFER_OUTPUT || + buftype == HFI_BUFFER_OUTPUT2) { + count_min = output_buffer_count(VIDC_SESSION_TYPE_ENC, codec); + hfi_bufreq_set_count_min(bufreq, version, count_min); + bufreq->size = calculate_enc_output_frame_size(width, height, + rc_type); + } else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH(version)) { + bufreq->size = enc_ops->scratch(width, height, work_mode, + num_vpp_pipes, rc_type); + } else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH_1(version)) { + bufreq->size = enc_ops->scratch1(width, height, num_ref, + is_tenbit, num_vpp_pipes); + } else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH_2(version)) { + bufreq->size = enc_ops->scratch2(width, height, num_ref, + is_tenbit); + } else if (buftype == HFI_BUFFER_INTERNAL_PERSIST) { + bufreq->size = enc_ops->persist(); + } else { + bufreq->size = 0; + } + + return 0; +} + +int hfi_plat_bufreq_v6(struct hfi_plat_buffers_params *params, u32 session_type, + u32 buftype, struct hfi_buffer_requirements *bufreq) +{ + if (session_type == VIDC_SESSION_TYPE_DEC) + return bufreq_dec(params, buftype, bufreq); + else + return bufreq_enc(params, buftype, bufreq); +} diff --git a/drivers/media/platform/qcom/venus/hfi_platform.c b/drivers/media/platform/qcom/venus/hfi_platform.c new file mode 100644 index 0000000000..643e5aa138 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_platform.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ +#include <linux/of.h> +#include "hfi_platform.h" +#include "core.h" + +const struct hfi_platform *hfi_platform_get(enum hfi_version version) +{ + switch (version) { + case HFI_VERSION_4XX: + return &hfi_plat_v4; + case HFI_VERSION_6XX: + return &hfi_plat_v6; + default: + break; + } + + return NULL; +} + +unsigned long +hfi_platform_get_codec_vpp_freq(enum hfi_version version, u32 codec, u32 session_type) +{ + const struct hfi_platform *plat; + unsigned long freq = 0; + + plat = hfi_platform_get(version); + if (!plat) + return 0; + + if (plat->codec_vpp_freq) + freq = plat->codec_vpp_freq(session_type, codec); + + return freq; +} + +unsigned long +hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 codec, u32 session_type) +{ + const struct hfi_platform *plat; + unsigned long freq = 0; + + plat = hfi_platform_get(version); + if (!plat) + return 0; + + if (plat->codec_vpp_freq) + freq = plat->codec_vsp_freq(session_type, codec); + + return freq; +} + +unsigned long +hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec, u32 session_type) +{ + const struct hfi_platform *plat; + unsigned long freq = 0; + + plat = hfi_platform_get(version); + if (!plat) + return 0; + + if (plat->codec_lp_freq) + freq = plat->codec_lp_freq(session_type, codec); + + return freq; +} + +int +hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs, u32 *dec_codecs, u32 *count) +{ + const struct hfi_platform *plat; + + plat = hfi_platform_get(core->res->hfi_version); + if (!plat) + return -EINVAL; + + if (plat->codecs) + plat->codecs(enc_codecs, dec_codecs, count); + + if (IS_IRIS2_1(core)) { + *enc_codecs &= ~HFI_VIDEO_CODEC_VP8; + *dec_codecs &= ~HFI_VIDEO_CODEC_VP8; + } + + return 0; +} + diff --git a/drivers/media/platform/qcom/venus/hfi_platform.h b/drivers/media/platform/qcom/venus/hfi_platform.h new file mode 100644 index 0000000000..ec89a90a81 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_platform.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __HFI_PLATFORM_H__ +#define __HFI_PLATFORM_H__ + +#include <linux/types.h> +#include <linux/videodev2.h> + +#include "hfi.h" +#include "hfi_plat_bufs.h" +#include "hfi_helper.h" + +#define MAX_PLANES 4 +#define MAX_FMT_ENTRIES 32 +#define MAX_CAP_ENTRIES 32 +#define MAX_ALLOC_MODE_ENTRIES 16 +#define MAX_CODEC_NUM 32 +#define MAX_SESSIONS 16 + +struct raw_formats { + u32 buftype; + u32 fmt; +}; + +struct hfi_plat_caps { + u32 codec; + u32 domain; + bool cap_bufs_mode_dynamic; + unsigned int num_caps; + struct hfi_capability caps[MAX_CAP_ENTRIES]; + unsigned int num_pl; + struct hfi_profile_level pl[HFI_MAX_PROFILE_COUNT]; + unsigned int num_fmts; + struct raw_formats fmts[MAX_FMT_ENTRIES]; + bool valid; /* used only for Venus v1xx */ +}; + +struct hfi_platform_codec_freq_data { + u32 pixfmt; + u32 session_type; + unsigned long vpp_freq; + unsigned long vsp_freq; + unsigned long low_power_freq; +}; + +struct hfi_platform { + unsigned long (*codec_vpp_freq)(u32 session_type, u32 codec); + unsigned long (*codec_vsp_freq)(u32 session_type, u32 codec); + unsigned long (*codec_lp_freq)(u32 session_type, u32 codec); + void (*codecs)(u32 *enc_codecs, u32 *dec_codecs, u32 *count); + const struct hfi_plat_caps *(*capabilities)(unsigned int *entries); + int (*bufreq)(struct hfi_plat_buffers_params *params, u32 session_type, + u32 buftype, struct hfi_buffer_requirements *bufreq); +}; + +extern const struct hfi_platform hfi_plat_v4; +extern const struct hfi_platform hfi_plat_v6; + +const struct hfi_platform *hfi_platform_get(enum hfi_version version); +unsigned long hfi_platform_get_codec_vpp_freq(enum hfi_version version, u32 codec, + u32 session_type); +unsigned long hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 codec, + u32 session_type); +unsigned long hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec, + u32 session_type); +int hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs, u32 *dec_codecs, + u32 *count); +#endif diff --git a/drivers/media/platform/qcom/venus/hfi_platform_v4.c b/drivers/media/platform/qcom/venus/hfi_platform_v4.c new file mode 100644 index 0000000000..e3f0a90a56 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_platform_v4.c @@ -0,0 +1,331 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ +#include "hfi_platform.h" + +static const struct hfi_plat_caps caps[] = { +{ + .codec = HFI_VIDEO_CODEC_H264, + .domain = VIDC_SESSION_TYPE_DEC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 1}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 1}, + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 2073600, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1}, + .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1}, + .num_caps = 10, + .pl[0] = {HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_52}, + .pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_52}, + .pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_52}, + .pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_52}, + .pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_52}, + .num_pl = 5, + .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12}, + .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21}, + .num_fmts = 4, +}, { + .codec = HFI_VIDEO_CODEC_HEVC, + .domain = VIDC_SESSION_TYPE_DEC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 1}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 1}, + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 2073600, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1}, + .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1}, + .num_caps = 10, + .pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0 << 28}, + .pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0 << 28}, + .num_pl = 2, + .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[1] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC}, + .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12}, + .fmts[4] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21}, + .fmts[5] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_P010}, + .fmts[6] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_YUV420_TP10_UBWC}, + .num_fmts = 7, +}, { + .codec = HFI_VIDEO_CODEC_VP8, + .domain = VIDC_SESSION_TYPE_DEC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 1}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 1}, + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 2073600, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1}, + .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1}, + .num_caps = 10, + .pl[0] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_0}, + .pl[1] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_1}, + .pl[2] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_2}, + .pl[3] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_3}, + .num_pl = 4, + .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12}, + .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21}, + .num_fmts = 4, +}, { + .codec = HFI_VIDEO_CODEC_VP9, + .domain = VIDC_SESSION_TYPE_DEC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 1}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 1}, + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 2073600, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1}, + .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1}, + .num_caps = 10, + .pl[0] = {HFI_VP9_PROFILE_P0, 200}, + .pl[1] = {HFI_VP9_PROFILE_P2_10B, 200}, + .num_pl = 2, + .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[1] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC}, + .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12}, + .fmts[4] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21}, + .fmts[5] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_P010}, + .fmts[6] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_YUV420_TP10_UBWC}, + .num_fmts = 7, +}, { + .codec = HFI_VIDEO_CODEC_MPEG2, + .domain = VIDC_SESSION_TYPE_DEC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 1920, 1}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 1920, 1}, + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 8160, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 40000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 244800, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 30, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1}, + .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 1, 1}, + .num_caps = 10, + .pl[0] = {HFI_MPEG2_PROFILE_SIMPLE, HFI_MPEG2_LEVEL_H14}, + .pl[1] = {HFI_MPEG2_PROFILE_MAIN, HFI_MPEG2_LEVEL_H14}, + .num_pl = 2, + .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12}, + .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21}, + .num_fmts = 4, +}, { + .codec = HFI_VIDEO_CODEC_H264, + .domain = VIDC_SESSION_TYPE_ENC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 16}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 16}, + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 1036800, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 3, 1}, + .caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1}, + .caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 5, 1}, + .caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 4, 1}, + .caps[12] = {HFI_CAPABILITY_LCU_SIZE, 16, 16, 1}, + .caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1}, + .caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1}, + .caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 51, 1}, + .caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 51, 1}, + .caps[17] = {HFI_CAPABILITY_B_FRAME_QP, 0, 51, 1}, + .caps[18] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1}, + .caps[19] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1}, + .caps[20] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1}, + .num_caps = 21, + .pl[0] = {HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_52}, + .pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_52}, + .pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_52}, + .pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_52}, + .pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_52}, + .num_pl = 5, + .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12}, + .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC}, + .fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010}, + .num_fmts = 4, +}, { + .codec = HFI_VIDEO_CODEC_HEVC, + .domain = VIDC_SESSION_TYPE_ENC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 16}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 16}, + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 1036800, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 3, 1}, + .caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1}, + .caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 5, 1}, + .caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 4, 1}, + .caps[12] = {HFI_CAPABILITY_LCU_SIZE, 32, 32, 1}, + .caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1}, + .caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1}, + .caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 63, 1}, + .caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 63, 1}, + .caps[17] = {HFI_CAPABILITY_B_FRAME_QP, 0, 63, 1}, + .caps[18] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1}, + .caps[19] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1}, + .caps[20] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1}, + .caps[21] = {HFI_CAPABILITY_ROTATION, 1, 4, 90}, + .caps[22] = {HFI_CAPABILITY_BLUR_WIDTH, 96, 4096, 16}, + .caps[23] = {HFI_CAPABILITY_BLUR_HEIGHT, 96, 4096, 16}, + .num_caps = 24, + .pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0}, + .pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0}, + .num_pl = 2, + .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12}, + .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC}, + .fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010}, + .num_fmts = 4, +}, { + .codec = HFI_VIDEO_CODEC_VP8, + .domain = VIDC_SESSION_TYPE_ENC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 16}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 16}, + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 1036800, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 240, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 3, 1}, + .caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1}, + .caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 3, 1}, + .caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 2, 1}, + .caps[12] = {HFI_CAPABILITY_LCU_SIZE, 16, 16, 1}, + .caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1}, + .caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1}, + .caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 127, 1}, + .caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 127, 1}, + .caps[17] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1}, + .caps[18] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1}, + .caps[19] = {HFI_CAPABILITY_BLUR_WIDTH, 96, 4096, 16}, + .caps[20] = {HFI_CAPABILITY_BLUR_HEIGHT, 96, 4096, 16}, + .caps[21] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1}, + .caps[22] = {HFI_CAPABILITY_ROTATION, 1, 4, 90}, + .num_caps = 23, + .pl[0] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_0}, + .pl[1] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_1}, + .pl[2] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_2}, + .pl[3] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_3}, + .num_pl = 4, + .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12}, + .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC}, + .fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010}, + .num_fmts = 4, +} }; + +static const struct hfi_plat_caps *get_capabilities(unsigned int *entries) +{ + *entries = ARRAY_SIZE(caps); + return caps; +} + +static void get_codecs(u32 *enc_codecs, u32 *dec_codecs, u32 *count) +{ + *enc_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC | + HFI_VIDEO_CODEC_VP8; + *dec_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC | + HFI_VIDEO_CODEC_VP8 | HFI_VIDEO_CODEC_VP9 | + HFI_VIDEO_CODEC_MPEG2; + *count = 8; +} + +static const struct hfi_platform_codec_freq_data codec_freq_data[] = { + { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 10, 320 }, + { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 10, 320 }, + { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 10, 320 }, + { V4L2_PIX_FMT_MPEG2, VIDC_SESSION_TYPE_DEC, 200, 10, 200 }, + { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 200, 10, 200 }, + { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 200, 10, 200 }, + { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_DEC, 200, 10, 200 }, + { V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 10, 200 }, +}; + +static const struct hfi_platform_codec_freq_data * +get_codec_freq_data(u32 session_type, u32 pixfmt) +{ + const struct hfi_platform_codec_freq_data *data = codec_freq_data; + unsigned int i, data_size = ARRAY_SIZE(codec_freq_data); + const struct hfi_platform_codec_freq_data *found = NULL; + + for (i = 0; i < data_size; i++) { + if (data[i].pixfmt == pixfmt && data[i].session_type == session_type) { + found = &data[i]; + break; + } + } + + return found; +} + +static unsigned long codec_vpp_freq(u32 session_type, u32 codec) +{ + const struct hfi_platform_codec_freq_data *data; + + data = get_codec_freq_data(session_type, codec); + if (data) + return data->vpp_freq; + + return 0; +} + +static unsigned long codec_vsp_freq(u32 session_type, u32 codec) +{ + const struct hfi_platform_codec_freq_data *data; + + data = get_codec_freq_data(session_type, codec); + if (data) + return data->vsp_freq; + + return 0; +} + +static unsigned long codec_lp_freq(u32 session_type, u32 codec) +{ + const struct hfi_platform_codec_freq_data *data; + + data = get_codec_freq_data(session_type, codec); + if (data) + return data->low_power_freq; + + return 0; +} + +const struct hfi_platform hfi_plat_v4 = { + .codec_vpp_freq = codec_vpp_freq, + .codec_vsp_freq = codec_vsp_freq, + .codec_lp_freq = codec_lp_freq, + .codecs = get_codecs, + .capabilities = get_capabilities, +}; diff --git a/drivers/media/platform/qcom/venus/hfi_platform_v6.c b/drivers/media/platform/qcom/venus/hfi_platform_v6.c new file mode 100644 index 0000000000..4e8af645f8 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_platform_v6.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ +#include "hfi_platform.h" + +static const struct hfi_plat_caps caps[] = { +{ + .codec = HFI_VIDEO_CODEC_H264, + .domain = VIDC_SESSION_TYPE_DEC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 8192, 1}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 8192, 1}, + /* ((5760 * 2880) / 256) */ + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 138240, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 220000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 65536, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 65536, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 7833600, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 960, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1}, + .num_caps = 9, + .pl[0] = {HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_52}, + .pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_52}, + .pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_52}, + .pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_52}, + .pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_52}, + .num_pl = 5, + .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12}, + .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21}, + .num_fmts = 4, +}, { + .codec = HFI_VIDEO_CODEC_HEVC, + .domain = VIDC_SESSION_TYPE_DEC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 8192, 1}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 8192, 1}, + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 138240, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 220000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 65536, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 65536, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 7833600, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 960, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1}, + .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1}, + .num_caps = 10, + .pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0}, + .pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0}, + .num_pl = 2, + .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[1] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC}, + .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12}, + .fmts[4] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21}, + .fmts[5] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_P010}, + .fmts[6] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_YUV420_TP10_UBWC}, + .num_fmts = 7, +}, { + .codec = HFI_VIDEO_CODEC_VP8, + .domain = VIDC_SESSION_TYPE_DEC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 4096, 1}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 4096, 1}, + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 36864, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 100000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 65536, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 65536, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 4423680, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 120, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1}, + .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1}, + .num_caps = 10, + .pl[0] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_0}, + .pl[1] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_1}, + .pl[2] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_2}, + .pl[3] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_3}, + .num_pl = 4, + .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12}, + .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21}, + .num_fmts = 4, +}, { + .codec = HFI_VIDEO_CODEC_VP9, + .domain = VIDC_SESSION_TYPE_DEC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 8192, 1}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 8192, 1}, + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 138240, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 220000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 65536, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 65536, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 7833600, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 960, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1}, + .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1}, + .num_caps = 10, + .pl[0] = {HFI_VP9_PROFILE_P0, 200}, + .pl[1] = {HFI_VP9_PROFILE_P2_10B, 200}, + .num_pl = 2, + .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[1] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC}, + .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12}, + .fmts[4] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21}, + .fmts[5] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_P010}, + .fmts[6] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_YUV420_TP10_UBWC}, + .num_fmts = 7, +}, { + .codec = HFI_VIDEO_CODEC_MPEG2, + .domain = VIDC_SESSION_TYPE_DEC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 1920, 1}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 1920, 1}, + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 8160, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 40000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 65536, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 65536, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 7833600, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 30, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1}, + .caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 1, 1}, + .num_caps = 10, + .pl[0] = {HFI_MPEG2_PROFILE_SIMPLE, HFI_MPEG2_LEVEL_H14}, + .pl[1] = {HFI_MPEG2_PROFILE_MAIN, HFI_MPEG2_LEVEL_H14}, + .num_pl = 2, + .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12}, + .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21}, + .num_fmts = 4, +}, { + .codec = HFI_VIDEO_CODEC_H264, + .domain = VIDC_SESSION_TYPE_ENC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 8192, 1}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 8192, 1}, + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 138240, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 220000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 7833600, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 960, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1}, + .caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1}, + .caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 6, 1}, + .caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 2, 1}, + .caps[12] = {HFI_CAPABILITY_LCU_SIZE, 16, 16, 1}, + .caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1}, + .caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 6, 1}, + .caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 51, 1}, + .caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 51, 1}, + .caps[17] = {HFI_CAPABILITY_B_FRAME_QP, 0, 51, 1}, + .caps[18] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1}, + .caps[19] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1}, + .caps[20] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1}, + .num_caps = 21, + .pl[0] = {HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_52}, + .pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_52}, + .pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_52}, + .pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_52}, + .pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_52}, + .num_pl = 5, + .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12}, + .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC}, + .fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010}, + .num_fmts = 4, +}, { + .codec = HFI_VIDEO_CODEC_HEVC, + .domain = VIDC_SESSION_TYPE_ENC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 8192, 16}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 8192, 16}, + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 138240, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 160000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 7833600, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 960, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1}, + .caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1}, + .caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 5, 1}, + .caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 2, 1}, + .caps[12] = {HFI_CAPABILITY_LCU_SIZE, 32, 32, 1}, + .caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1}, + .caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1}, + .caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 51, 1}, + .caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 51, 1}, + .caps[17] = {HFI_CAPABILITY_B_FRAME_QP, 0, 51, 1}, + .caps[18] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1}, + .caps[19] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1}, + .caps[20] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1}, + .caps[21] = {HFI_CAPABILITY_ROTATION, 1, 4, 90}, + .caps[22] = {HFI_CAPABILITY_BLUR_WIDTH, 96, 4096, 16}, + .caps[23] = {HFI_CAPABILITY_BLUR_HEIGHT, 96, 4096, 16}, + .num_caps = 24, + .pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0}, + .pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0}, + .num_pl = 2, + .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12}, + .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC}, + .fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010}, + .num_fmts = 4, +}, { + .codec = HFI_VIDEO_CODEC_VP8, + .domain = VIDC_SESSION_TYPE_ENC, + .cap_bufs_mode_dynamic = true, + .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 4096, 16}, + .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 4096, 16}, + .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 36864, 1}, + .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 74000000, 1}, + .caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1}, + .caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1}, + .caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 4423680, 1}, + .caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 120, 1}, + .caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1}, + .caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1}, + .caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 3, 1}, + .caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 2, 1}, + .caps[12] = {HFI_CAPABILITY_LCU_SIZE, 16, 16, 1}, + .caps[13] = {HFI_CAPABILITY_BFRAME, 0, 0, 1}, + .caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1}, + .caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 127, 1}, + .caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 127, 1}, + .caps[17] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1}, + .caps[18] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1}, + .caps[19] = {HFI_CAPABILITY_BLUR_WIDTH, 96, 4096, 16}, + .caps[20] = {HFI_CAPABILITY_BLUR_HEIGHT, 96, 4096, 16}, + .caps[21] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1}, + .caps[22] = {HFI_CAPABILITY_ROTATION, 1, 4, 90}, + .num_caps = 23, + .pl[0] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_0}, + .pl[1] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_1}, + .pl[2] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_2}, + .pl[3] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_3}, + .num_pl = 4, + .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12}, + .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC}, + .fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC}, + .fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010}, + .num_fmts = 4, +} }; + +static const struct hfi_plat_caps *get_capabilities(unsigned int *entries) +{ + *entries = ARRAY_SIZE(caps); + return caps; +} + +static void get_codecs(u32 *enc_codecs, u32 *dec_codecs, u32 *count) +{ + *enc_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC | + HFI_VIDEO_CODEC_VP8; + *dec_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC | + HFI_VIDEO_CODEC_VP8 | HFI_VIDEO_CODEC_VP9 | + HFI_VIDEO_CODEC_MPEG2; + *count = 8; +} + +static const struct hfi_platform_codec_freq_data codec_freq_data[] = { + { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 25, 320 }, + { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 25, 320 }, + { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 60, 320 }, + { V4L2_PIX_FMT_MPEG2, VIDC_SESSION_TYPE_DEC, 200, 25, 200 }, + { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 200, 25, 200 }, + { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 200, 25, 200 }, + { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_DEC, 200, 60, 200 }, + { V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 60, 200 }, +}; + +static const struct hfi_platform_codec_freq_data * +get_codec_freq_data(u32 session_type, u32 pixfmt) +{ + const struct hfi_platform_codec_freq_data *data = codec_freq_data; + unsigned int i, data_size = ARRAY_SIZE(codec_freq_data); + const struct hfi_platform_codec_freq_data *found = NULL; + + for (i = 0; i < data_size; i++) { + if (data[i].pixfmt == pixfmt && data[i].session_type == session_type) { + found = &data[i]; + break; + } + } + + return found; +} + +static unsigned long codec_vpp_freq(u32 session_type, u32 codec) +{ + const struct hfi_platform_codec_freq_data *data; + + data = get_codec_freq_data(session_type, codec); + if (data) + return data->vpp_freq; + + return 0; +} + +static unsigned long codec_vsp_freq(u32 session_type, u32 codec) +{ + const struct hfi_platform_codec_freq_data *data; + + data = get_codec_freq_data(session_type, codec); + if (data) + return data->vsp_freq; + + return 0; +} + +static unsigned long codec_lp_freq(u32 session_type, u32 codec) +{ + const struct hfi_platform_codec_freq_data *data; + + data = get_codec_freq_data(session_type, codec); + if (data) + return data->low_power_freq; + + return 0; +} + +const struct hfi_platform hfi_plat_v6 = { + .codec_vpp_freq = codec_vpp_freq, + .codec_vsp_freq = codec_vsp_freq, + .codec_lp_freq = codec_lp_freq, + .codecs = get_codecs, + .capabilities = get_capabilities, + .bufreq = hfi_plat_bufreq_v6, +}; diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c new file mode 100644 index 0000000000..f9437b6412 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_venus.c @@ -0,0 +1,1759 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/slab.h> + +#include "core.h" +#include "hfi_cmds.h" +#include "hfi_msgs.h" +#include "hfi_venus.h" +#include "hfi_venus_io.h" +#include "firmware.h" + +#define HFI_MASK_QHDR_TX_TYPE 0xff000000 +#define HFI_MASK_QHDR_RX_TYPE 0x00ff0000 +#define HFI_MASK_QHDR_PRI_TYPE 0x0000ff00 +#define HFI_MASK_QHDR_ID_TYPE 0x000000ff + +#define HFI_HOST_TO_CTRL_CMD_Q 0 +#define HFI_CTRL_TO_HOST_MSG_Q 1 +#define HFI_CTRL_TO_HOST_DBG_Q 2 +#define HFI_MASK_QHDR_STATUS 0x000000ff + +#define IFACEQ_NUM 3 +#define IFACEQ_CMD_IDX 0 +#define IFACEQ_MSG_IDX 1 +#define IFACEQ_DBG_IDX 2 +#define IFACEQ_MAX_BUF_COUNT 50 +#define IFACEQ_MAX_PARALLEL_CLNTS 16 +#define IFACEQ_DFLT_QHDR 0x01010000 + +#define POLL_INTERVAL_US 50 + +#define IFACEQ_MAX_PKT_SIZE 1024 +#define IFACEQ_MED_PKT_SIZE 768 +#define IFACEQ_MIN_PKT_SIZE 8 +#define IFACEQ_VAR_SMALL_PKT_SIZE 100 +#define IFACEQ_VAR_LARGE_PKT_SIZE 512 +#define IFACEQ_VAR_HUGE_PKT_SIZE (1024 * 12) + +struct hfi_queue_table_header { + u32 version; + u32 size; + u32 qhdr0_offset; + u32 qhdr_size; + u32 num_q; + u32 num_active_q; +}; + +struct hfi_queue_header { + u32 status; + u32 start_addr; + u32 type; + u32 q_size; + u32 pkt_size; + u32 pkt_drop_cnt; + u32 rx_wm; + u32 tx_wm; + u32 rx_req; + u32 tx_req; + u32 rx_irq_status; + u32 tx_irq_status; + u32 read_idx; + u32 write_idx; +}; + +#define IFACEQ_TABLE_SIZE \ + (sizeof(struct hfi_queue_table_header) + \ + sizeof(struct hfi_queue_header) * IFACEQ_NUM) + +#define IFACEQ_QUEUE_SIZE (IFACEQ_MAX_PKT_SIZE * \ + IFACEQ_MAX_BUF_COUNT * IFACEQ_MAX_PARALLEL_CLNTS) + +#define IFACEQ_GET_QHDR_START_ADDR(ptr, i) \ + (void *)(((ptr) + sizeof(struct hfi_queue_table_header)) + \ + ((i) * sizeof(struct hfi_queue_header))) + +#define QDSS_SIZE SZ_4K +#define SFR_SIZE SZ_4K +#define QUEUE_SIZE \ + (IFACEQ_TABLE_SIZE + (IFACEQ_QUEUE_SIZE * IFACEQ_NUM)) + +#define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K) +#define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K) +#define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K) +#define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \ + ALIGNED_QDSS_SIZE, SZ_1M) + +struct mem_desc { + dma_addr_t da; /* device address */ + void *kva; /* kernel virtual address */ + u32 size; + unsigned long attrs; +}; + +struct iface_queue { + struct hfi_queue_header *qhdr; + struct mem_desc qmem; +}; + +enum venus_state { + VENUS_STATE_DEINIT = 1, + VENUS_STATE_INIT, +}; + +struct venus_hfi_device { + struct venus_core *core; + u32 irq_status; + u32 last_packet_type; + bool power_enabled; + bool suspended; + enum venus_state state; + /* serialize read / write to the shared memory */ + struct mutex lock; + struct completion pwr_collapse_prep; + struct completion release_resource; + struct mem_desc ifaceq_table; + struct mem_desc sfr; + struct iface_queue queues[IFACEQ_NUM]; + u8 pkt_buf[IFACEQ_VAR_HUGE_PKT_SIZE]; + u8 dbg_buf[IFACEQ_VAR_HUGE_PKT_SIZE]; +}; + +static bool venus_pkt_debug; +int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL; +static bool venus_fw_low_power_mode = true; +static int venus_hw_rsp_timeout = 1000; +static bool venus_fw_coverage; + +static void venus_set_state(struct venus_hfi_device *hdev, + enum venus_state state) +{ + mutex_lock(&hdev->lock); + hdev->state = state; + mutex_unlock(&hdev->lock); +} + +static bool venus_is_valid_state(struct venus_hfi_device *hdev) +{ + return hdev->state != VENUS_STATE_DEINIT; +} + +static void venus_dump_packet(struct venus_hfi_device *hdev, const void *packet) +{ + size_t pkt_size = *(u32 *)packet; + + if (!venus_pkt_debug) + return; + + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, packet, + pkt_size, true); +} + +static int venus_write_queue(struct venus_hfi_device *hdev, + struct iface_queue *queue, + void *packet, u32 *rx_req) +{ + struct hfi_queue_header *qhdr; + u32 dwords, new_wr_idx; + u32 empty_space, rd_idx, wr_idx, qsize; + u32 *wr_ptr; + + if (!queue->qmem.kva) + return -EINVAL; + + qhdr = queue->qhdr; + if (!qhdr) + return -EINVAL; + + venus_dump_packet(hdev, packet); + + dwords = (*(u32 *)packet) >> 2; + if (!dwords) + return -EINVAL; + + rd_idx = qhdr->read_idx; + wr_idx = qhdr->write_idx; + qsize = qhdr->q_size; + /* ensure rd/wr indices's are read from memory */ + rmb(); + + if (wr_idx >= rd_idx) + empty_space = qsize - (wr_idx - rd_idx); + else + empty_space = rd_idx - wr_idx; + + if (empty_space <= dwords) { + qhdr->tx_req = 1; + /* ensure tx_req is updated in memory */ + wmb(); + return -ENOSPC; + } + + qhdr->tx_req = 0; + /* ensure tx_req is updated in memory */ + wmb(); + + new_wr_idx = wr_idx + dwords; + wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2)); + + if (wr_ptr < (u32 *)queue->qmem.kva || + wr_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*wr_ptr))) + return -EINVAL; + + if (new_wr_idx < qsize) { + memcpy(wr_ptr, packet, dwords << 2); + } else { + size_t len; + + new_wr_idx -= qsize; + len = (dwords - new_wr_idx) << 2; + memcpy(wr_ptr, packet, len); + memcpy(queue->qmem.kva, packet + len, new_wr_idx << 2); + } + + /* make sure packet is written before updating the write index */ + wmb(); + + qhdr->write_idx = new_wr_idx; + *rx_req = qhdr->rx_req ? 1 : 0; + + /* make sure write index is updated before an interrupt is raised */ + mb(); + + return 0; +} + +static int venus_read_queue(struct venus_hfi_device *hdev, + struct iface_queue *queue, void *pkt, u32 *tx_req) +{ + struct hfi_queue_header *qhdr; + u32 dwords, new_rd_idx; + u32 rd_idx, wr_idx, type, qsize; + u32 *rd_ptr; + u32 recv_request = 0; + int ret = 0; + + if (!queue->qmem.kva) + return -EINVAL; + + qhdr = queue->qhdr; + if (!qhdr) + return -EINVAL; + + type = qhdr->type; + rd_idx = qhdr->read_idx; + wr_idx = qhdr->write_idx; + qsize = qhdr->q_size; + + /* make sure data is valid before using it */ + rmb(); + + /* + * Do not set receive request for debug queue, if set, Venus generates + * interrupt for debug messages even when there is no response message + * available. In general debug queue will not become full as it is being + * emptied out for every interrupt from Venus. Venus will anyway + * generates interrupt if it is full. + */ + if (type & HFI_CTRL_TO_HOST_MSG_Q) + recv_request = 1; + + if (rd_idx == wr_idx) { + qhdr->rx_req = recv_request; + *tx_req = 0; + /* update rx_req field in memory */ + wmb(); + return -ENODATA; + } + + rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2)); + + if (rd_ptr < (u32 *)queue->qmem.kva || + rd_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*rd_ptr))) + return -EINVAL; + + dwords = *rd_ptr >> 2; + if (!dwords) + return -EINVAL; + + new_rd_idx = rd_idx + dwords; + if (((dwords << 2) <= IFACEQ_VAR_HUGE_PKT_SIZE) && rd_idx <= qsize) { + if (new_rd_idx < qsize) { + memcpy(pkt, rd_ptr, dwords << 2); + } else { + size_t len; + + new_rd_idx -= qsize; + len = (dwords - new_rd_idx) << 2; + memcpy(pkt, rd_ptr, len); + memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2); + } + } else { + /* bad packet received, dropping */ + new_rd_idx = qhdr->write_idx; + ret = -EBADMSG; + } + + /* ensure the packet is read before updating read index */ + rmb(); + + qhdr->read_idx = new_rd_idx; + /* ensure updating read index */ + wmb(); + + rd_idx = qhdr->read_idx; + wr_idx = qhdr->write_idx; + /* ensure rd/wr indices are read from memory */ + rmb(); + + if (rd_idx != wr_idx) + qhdr->rx_req = 0; + else + qhdr->rx_req = recv_request; + + *tx_req = qhdr->tx_req ? 1 : 0; + + /* ensure rx_req is stored to memory and tx_req is loaded from memory */ + mb(); + + venus_dump_packet(hdev, pkt); + + return ret; +} + +static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc, + u32 size) +{ + struct device *dev = hdev->core->dev; + + desc->attrs = DMA_ATTR_WRITE_COMBINE; + desc->size = ALIGN(size, SZ_4K); + + desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL, + desc->attrs); + if (!desc->kva) + return -ENOMEM; + + return 0; +} + +static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem) +{ + struct device *dev = hdev->core->dev; + + dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs); +} + +static void venus_set_registers(struct venus_hfi_device *hdev) +{ + const struct venus_resources *res = hdev->core->res; + const struct reg_val *tbl = res->reg_tbl; + unsigned int count = res->reg_tbl_size; + unsigned int i; + + for (i = 0; i < count; i++) + writel(tbl[i].value, hdev->core->base + tbl[i].reg); +} + +static void venus_soft_int(struct venus_hfi_device *hdev) +{ + void __iomem *cpu_ic_base = hdev->core->cpu_ic_base; + u32 clear_bit; + + if (IS_V6(hdev->core)) + clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT_V6); + else + clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT); + + writel(clear_bit, cpu_ic_base + CPU_IC_SOFTINT); +} + +static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev, + void *pkt, bool sync) +{ + struct device *dev = hdev->core->dev; + struct hfi_pkt_hdr *cmd_packet; + struct iface_queue *queue; + u32 rx_req; + int ret; + + if (!venus_is_valid_state(hdev)) + return -EINVAL; + + cmd_packet = (struct hfi_pkt_hdr *)pkt; + hdev->last_packet_type = cmd_packet->pkt_type; + + queue = &hdev->queues[IFACEQ_CMD_IDX]; + + ret = venus_write_queue(hdev, queue, pkt, &rx_req); + if (ret) { + dev_err(dev, "write to iface cmd queue failed (%d)\n", ret); + return ret; + } + + if (sync) { + /* + * Inform video hardware to raise interrupt for synchronous + * commands + */ + queue = &hdev->queues[IFACEQ_MSG_IDX]; + queue->qhdr->rx_req = 1; + /* ensure rx_req is updated in memory */ + wmb(); + } + + if (rx_req) + venus_soft_int(hdev); + + return 0; +} + +static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt, bool sync) +{ + int ret; + + mutex_lock(&hdev->lock); + ret = venus_iface_cmdq_write_nolock(hdev, pkt, sync); + mutex_unlock(&hdev->lock); + + return ret; +} + +static int venus_hfi_core_set_resource(struct venus_core *core, u32 id, + u32 size, u32 addr, void *cookie) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + struct hfi_sys_set_resource_pkt *pkt; + u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; + int ret; + + if (id == VIDC_RESOURCE_NONE) + return 0; + + pkt = (struct hfi_sys_set_resource_pkt *)packet; + + ret = pkt_sys_set_resource(pkt, id, size, addr, cookie); + if (ret) + return ret; + + ret = venus_iface_cmdq_write(hdev, pkt, false); + if (ret) + return ret; + + return 0; +} + +static int venus_boot_core(struct venus_hfi_device *hdev) +{ + struct device *dev = hdev->core->dev; + static const unsigned int max_tries = 100; + u32 ctrl_status = 0, mask_val = 0; + unsigned int count = 0; + void __iomem *cpu_cs_base = hdev->core->cpu_cs_base; + void __iomem *wrapper_base = hdev->core->wrapper_base; + int ret = 0; + + if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) { + mask_val = readl(wrapper_base + WRAPPER_INTR_MASK); + mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BASK_V6 | + WRAPPER_INTR_MASK_A2HCPU_MASK); + } else { + mask_val = WRAPPER_INTR_MASK_A2HVCODEC_MASK; + } + + writel(mask_val, wrapper_base + WRAPPER_INTR_MASK); + if (IS_V1(hdev->core)) + writel(1, cpu_cs_base + CPU_CS_SCIACMDARG3); + + writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT); + while (!ctrl_status && count < max_tries) { + ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0); + if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) { + dev_err(dev, "invalid setting for UC_REGION\n"); + ret = -EINVAL; + break; + } + + usleep_range(500, 1000); + count++; + } + + if (count >= max_tries) + ret = -ETIMEDOUT; + + if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) { + writel(0x1, cpu_cs_base + CPU_CS_H2XSOFTINTEN_V6); + writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6); + } + + return ret; +} + +static u32 venus_hwversion(struct venus_hfi_device *hdev) +{ + struct device *dev = hdev->core->dev; + void __iomem *wrapper_base = hdev->core->wrapper_base; + u32 ver; + u32 major, minor, step; + + ver = readl(wrapper_base + WRAPPER_HW_VERSION); + major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK; + major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT; + minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK; + minor = minor >> WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT; + step = ver & WRAPPER_HW_VERSION_STEP_VERSION_MASK; + + dev_dbg(dev, VDBGL "venus hw version %x.%x.%x\n", major, minor, step); + + return major; +} + +static int venus_run(struct venus_hfi_device *hdev) +{ + struct device *dev = hdev->core->dev; + void __iomem *cpu_cs_base = hdev->core->cpu_cs_base; + int ret; + + /* + * Re-program all of the registers that get reset as a result of + * regulator_disable() and _enable() + */ + venus_set_registers(hdev); + + writel(hdev->ifaceq_table.da, cpu_cs_base + UC_REGION_ADDR); + writel(SHARED_QSIZE, cpu_cs_base + UC_REGION_SIZE); + writel(hdev->ifaceq_table.da, cpu_cs_base + CPU_CS_SCIACMDARG2); + writel(0x01, cpu_cs_base + CPU_CS_SCIACMDARG1); + if (hdev->sfr.da) + writel(hdev->sfr.da, cpu_cs_base + SFR_ADDR); + + ret = venus_boot_core(hdev); + if (ret) { + dev_err(dev, "failed to reset venus core\n"); + return ret; + } + + venus_hwversion(hdev); + + return 0; +} + +static int venus_halt_axi(struct venus_hfi_device *hdev) +{ + void __iomem *wrapper_base = hdev->core->wrapper_base; + void __iomem *vbif_base = hdev->core->vbif_base; + void __iomem *cpu_cs_base = hdev->core->cpu_cs_base; + void __iomem *aon_base = hdev->core->aon_base; + struct device *dev = hdev->core->dev; + u32 val; + u32 mask_val; + int ret; + + if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) { + writel(0x3, cpu_cs_base + CPU_CS_X2RPMH_V6); + + if (IS_IRIS2_1(hdev->core)) + goto skip_aon_mvp_noc; + + writel(0x1, aon_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL); + ret = readl_poll_timeout(aon_base + AON_WRAPPER_MVP_NOC_LPI_STATUS, + val, + val & BIT(0), + POLL_INTERVAL_US, + VBIF_AXI_HALT_ACK_TIMEOUT_US); + if (ret) + return -ETIMEDOUT; + +skip_aon_mvp_noc: + mask_val = (BIT(2) | BIT(1) | BIT(0)); + writel(mask_val, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6); + + writel(0x00, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6); + ret = readl_poll_timeout(wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS_V6, + val, + val == 0, + POLL_INTERVAL_US, + VBIF_AXI_HALT_ACK_TIMEOUT_US); + + if (ret) { + dev_err(dev, "DBLP Release: lpi_status %x\n", val); + return -ETIMEDOUT; + } + return 0; + } + + if (IS_V4(hdev->core)) { + val = readl(wrapper_base + WRAPPER_CPU_AXI_HALT); + val |= WRAPPER_CPU_AXI_HALT_HALT; + writel(val, wrapper_base + WRAPPER_CPU_AXI_HALT); + + ret = readl_poll_timeout(wrapper_base + WRAPPER_CPU_AXI_HALT_STATUS, + val, + val & WRAPPER_CPU_AXI_HALT_STATUS_IDLE, + POLL_INTERVAL_US, + VBIF_AXI_HALT_ACK_TIMEOUT_US); + if (ret) { + dev_err(dev, "AXI bus port halt timeout\n"); + return ret; + } + + return 0; + } + + /* Halt AXI and AXI IMEM VBIF Access */ + val = readl(vbif_base + VBIF_AXI_HALT_CTRL0); + val |= VBIF_AXI_HALT_CTRL0_HALT_REQ; + writel(val, vbif_base + VBIF_AXI_HALT_CTRL0); + + /* Request for AXI bus port halt */ + ret = readl_poll_timeout(vbif_base + VBIF_AXI_HALT_CTRL1, val, + val & VBIF_AXI_HALT_CTRL1_HALT_ACK, + POLL_INTERVAL_US, + VBIF_AXI_HALT_ACK_TIMEOUT_US); + if (ret) { + dev_err(dev, "AXI bus port halt timeout\n"); + return ret; + } + + return 0; +} + +static int venus_power_off(struct venus_hfi_device *hdev) +{ + int ret; + + if (!hdev->power_enabled) + return 0; + + ret = venus_set_hw_state_suspend(hdev->core); + if (ret) + return ret; + + ret = venus_halt_axi(hdev); + if (ret) + return ret; + + hdev->power_enabled = false; + + return 0; +} + +static int venus_power_on(struct venus_hfi_device *hdev) +{ + int ret; + + if (hdev->power_enabled) + return 0; + + ret = venus_set_hw_state_resume(hdev->core); + if (ret) + goto err; + + ret = venus_run(hdev); + if (ret) + goto err_suspend; + + hdev->power_enabled = true; + + return 0; + +err_suspend: + venus_set_hw_state_suspend(hdev->core); +err: + hdev->power_enabled = false; + return ret; +} + +static int venus_iface_msgq_read_nolock(struct venus_hfi_device *hdev, + void *pkt) +{ + struct iface_queue *queue; + u32 tx_req; + int ret; + + if (!venus_is_valid_state(hdev)) + return -EINVAL; + + queue = &hdev->queues[IFACEQ_MSG_IDX]; + + ret = venus_read_queue(hdev, queue, pkt, &tx_req); + if (ret) + return ret; + + if (tx_req) + venus_soft_int(hdev); + + return 0; +} + +static int venus_iface_msgq_read(struct venus_hfi_device *hdev, void *pkt) +{ + int ret; + + mutex_lock(&hdev->lock); + ret = venus_iface_msgq_read_nolock(hdev, pkt); + mutex_unlock(&hdev->lock); + + return ret; +} + +static int venus_iface_dbgq_read_nolock(struct venus_hfi_device *hdev, + void *pkt) +{ + struct iface_queue *queue; + u32 tx_req; + int ret; + + ret = venus_is_valid_state(hdev); + if (!ret) + return -EINVAL; + + queue = &hdev->queues[IFACEQ_DBG_IDX]; + + ret = venus_read_queue(hdev, queue, pkt, &tx_req); + if (ret) + return ret; + + if (tx_req) + venus_soft_int(hdev); + + return 0; +} + +static int venus_iface_dbgq_read(struct venus_hfi_device *hdev, void *pkt) +{ + int ret; + + if (!pkt) + return -EINVAL; + + mutex_lock(&hdev->lock); + ret = venus_iface_dbgq_read_nolock(hdev, pkt); + mutex_unlock(&hdev->lock); + + return ret; +} + +static void venus_set_qhdr_defaults(struct hfi_queue_header *qhdr) +{ + qhdr->status = 1; + qhdr->type = IFACEQ_DFLT_QHDR; + qhdr->q_size = IFACEQ_QUEUE_SIZE / 4; + qhdr->pkt_size = 0; + qhdr->rx_wm = 1; + qhdr->tx_wm = 1; + qhdr->rx_req = 1; + qhdr->tx_req = 0; + qhdr->rx_irq_status = 0; + qhdr->tx_irq_status = 0; + qhdr->read_idx = 0; + qhdr->write_idx = 0; +} + +static void venus_interface_queues_release(struct venus_hfi_device *hdev) +{ + mutex_lock(&hdev->lock); + + venus_free(hdev, &hdev->ifaceq_table); + venus_free(hdev, &hdev->sfr); + + memset(hdev->queues, 0, sizeof(hdev->queues)); + memset(&hdev->ifaceq_table, 0, sizeof(hdev->ifaceq_table)); + memset(&hdev->sfr, 0, sizeof(hdev->sfr)); + + mutex_unlock(&hdev->lock); +} + +static int venus_interface_queues_init(struct venus_hfi_device *hdev) +{ + struct hfi_queue_table_header *tbl_hdr; + struct iface_queue *queue; + struct hfi_sfr *sfr; + struct mem_desc desc = {0}; + unsigned int offset; + unsigned int i; + int ret; + + ret = venus_alloc(hdev, &desc, ALIGNED_QUEUE_SIZE); + if (ret) + return ret; + + hdev->ifaceq_table = desc; + offset = IFACEQ_TABLE_SIZE; + + for (i = 0; i < IFACEQ_NUM; i++) { + queue = &hdev->queues[i]; + queue->qmem.da = desc.da + offset; + queue->qmem.kva = desc.kva + offset; + queue->qmem.size = IFACEQ_QUEUE_SIZE; + offset += queue->qmem.size; + queue->qhdr = + IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i); + + venus_set_qhdr_defaults(queue->qhdr); + + queue->qhdr->start_addr = queue->qmem.da; + + if (i == IFACEQ_CMD_IDX) + queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q; + else if (i == IFACEQ_MSG_IDX) + queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q; + else if (i == IFACEQ_DBG_IDX) + queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q; + } + + tbl_hdr = hdev->ifaceq_table.kva; + tbl_hdr->version = 0; + tbl_hdr->size = IFACEQ_TABLE_SIZE; + tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header); + tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header); + tbl_hdr->num_q = IFACEQ_NUM; + tbl_hdr->num_active_q = IFACEQ_NUM; + + /* + * Set receive request to zero on debug queue as there is no + * need of interrupt from video hardware for debug messages + */ + queue = &hdev->queues[IFACEQ_DBG_IDX]; + queue->qhdr->rx_req = 0; + + ret = venus_alloc(hdev, &desc, ALIGNED_SFR_SIZE); + if (ret) { + hdev->sfr.da = 0; + } else { + hdev->sfr = desc; + sfr = hdev->sfr.kva; + sfr->buf_size = ALIGNED_SFR_SIZE; + } + + /* ensure table and queue header structs are settled in memory */ + wmb(); + + return 0; +} + +static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug) +{ + struct hfi_sys_set_property_pkt *pkt; + u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; + + pkt = (struct hfi_sys_set_property_pkt *)packet; + + pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug); + + return venus_iface_cmdq_write(hdev, pkt, false); +} + +static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode) +{ + struct hfi_sys_set_property_pkt *pkt; + u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; + + pkt = (struct hfi_sys_set_property_pkt *)packet; + + pkt_sys_coverage_config(pkt, mode); + + return venus_iface_cmdq_write(hdev, pkt, false); +} + +static int venus_sys_set_idle_message(struct venus_hfi_device *hdev, + bool enable) +{ + struct hfi_sys_set_property_pkt *pkt; + u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; + + if (!enable) + return 0; + + pkt = (struct hfi_sys_set_property_pkt *)packet; + + pkt_sys_idle_indicator(pkt, enable); + + return venus_iface_cmdq_write(hdev, pkt, false); +} + +static int venus_sys_set_power_control(struct venus_hfi_device *hdev, + bool enable) +{ + struct hfi_sys_set_property_pkt *pkt; + u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; + + pkt = (struct hfi_sys_set_property_pkt *)packet; + + pkt_sys_power_control(pkt, enable); + + return venus_iface_cmdq_write(hdev, pkt, false); +} + +static int venus_sys_set_ubwc_config(struct venus_hfi_device *hdev) +{ + struct hfi_sys_set_property_pkt *pkt; + u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; + const struct venus_resources *res = hdev->core->res; + int ret; + + pkt = (struct hfi_sys_set_property_pkt *)packet; + + pkt_sys_ubwc_config(pkt, res->ubwc_conf); + + ret = venus_iface_cmdq_write(hdev, pkt, false); + if (ret) + return ret; + + return 0; +} + +static int venus_get_queue_size(struct venus_hfi_device *hdev, + unsigned int index) +{ + struct hfi_queue_header *qhdr; + + if (index >= IFACEQ_NUM) + return -EINVAL; + + qhdr = hdev->queues[index].qhdr; + if (!qhdr) + return -EINVAL; + + return abs(qhdr->read_idx - qhdr->write_idx); +} + +static int venus_sys_set_default_properties(struct venus_hfi_device *hdev) +{ + struct device *dev = hdev->core->dev; + const struct venus_resources *res = hdev->core->res; + int ret; + + ret = venus_sys_set_debug(hdev, venus_fw_debug); + if (ret) + dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret); + + /* HFI_PROPERTY_SYS_IDLE_INDICATOR is not supported beyond 8916 (HFI V1) */ + if (IS_V1(hdev->core)) { + ret = venus_sys_set_idle_message(hdev, false); + if (ret) + dev_warn(dev, "setting idle response ON failed (%d)\n", ret); + } + + ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode); + if (ret) + dev_warn(dev, "setting hw power collapse ON failed (%d)\n", + ret); + + /* For specific venus core, it is mandatory to set the UBWC configuration */ + if (res->ubwc_conf) { + ret = venus_sys_set_ubwc_config(hdev); + if (ret) + dev_warn(dev, "setting ubwc config failed (%d)\n", ret); + } + + return ret; +} + +static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type, bool sync) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_pkt pkt; + + pkt_session_cmd(&pkt, pkt_type, inst); + + return venus_iface_cmdq_write(hdev, &pkt, sync); +} + +static void venus_flush_debug_queue(struct venus_hfi_device *hdev) +{ + struct device *dev = hdev->core->dev; + void *packet = hdev->dbg_buf; + + while (!venus_iface_dbgq_read(hdev, packet)) { + struct hfi_msg_sys_coverage_pkt *pkt = packet; + + if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) { + struct hfi_msg_sys_debug_pkt *pkt = packet; + + dev_dbg(dev, VDBGFW "%s", pkt->msg_data); + } + } +} + +static int venus_prepare_power_collapse(struct venus_hfi_device *hdev, + bool wait) +{ + unsigned long timeout = msecs_to_jiffies(venus_hw_rsp_timeout); + struct hfi_sys_pc_prep_pkt pkt; + int ret; + + init_completion(&hdev->pwr_collapse_prep); + + pkt_sys_pc_prep(&pkt); + + ret = venus_iface_cmdq_write(hdev, &pkt, false); + if (ret) + return ret; + + if (!wait) + return 0; + + ret = wait_for_completion_timeout(&hdev->pwr_collapse_prep, timeout); + if (!ret) { + venus_flush_debug_queue(hdev); + return -ETIMEDOUT; + } + + return 0; +} + +static int venus_are_queues_empty(struct venus_hfi_device *hdev) +{ + int ret1, ret2; + + ret1 = venus_get_queue_size(hdev, IFACEQ_MSG_IDX); + if (ret1 < 0) + return ret1; + + ret2 = venus_get_queue_size(hdev, IFACEQ_CMD_IDX); + if (ret2 < 0) + return ret2; + + if (!ret1 && !ret2) + return 1; + + return 0; +} + +static void venus_sfr_print(struct venus_hfi_device *hdev) +{ + struct device *dev = hdev->core->dev; + struct hfi_sfr *sfr = hdev->sfr.kva; + void *p; + + if (!sfr) + return; + + p = memchr(sfr->data, '\0', sfr->buf_size); + /* + * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates + * that Venus is in the process of crashing. + */ + if (!p) + sfr->data[sfr->buf_size - 1] = '\0'; + + dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data); +} + +static void venus_process_msg_sys_error(struct venus_hfi_device *hdev, + void *packet) +{ + struct hfi_msg_event_notify_pkt *event_pkt = packet; + + if (event_pkt->event_id != HFI_EVENT_SYS_ERROR) + return; + + venus_set_state(hdev, VENUS_STATE_DEINIT); + + venus_sfr_print(hdev); +} + +static irqreturn_t venus_isr_thread(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + const struct venus_resources *res; + void *pkt; + u32 msg_ret; + + if (!hdev) + return IRQ_NONE; + + res = hdev->core->res; + pkt = hdev->pkt_buf; + + + while (!venus_iface_msgq_read(hdev, pkt)) { + msg_ret = hfi_process_msg_packet(core, pkt); + switch (msg_ret) { + case HFI_MSG_EVENT_NOTIFY: + venus_process_msg_sys_error(hdev, pkt); + break; + case HFI_MSG_SYS_INIT: + venus_hfi_core_set_resource(core, res->vmem_id, + res->vmem_size, + res->vmem_addr, + hdev); + break; + case HFI_MSG_SYS_RELEASE_RESOURCE: + complete(&hdev->release_resource); + break; + case HFI_MSG_SYS_PC_PREP: + complete(&hdev->pwr_collapse_prep); + break; + default: + break; + } + } + + venus_flush_debug_queue(hdev); + + return IRQ_HANDLED; +} + +static irqreturn_t venus_isr(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + u32 status; + void __iomem *cpu_cs_base; + void __iomem *wrapper_base; + + if (!hdev) + return IRQ_NONE; + + cpu_cs_base = hdev->core->cpu_cs_base; + wrapper_base = hdev->core->wrapper_base; + + status = readl(wrapper_base + WRAPPER_INTR_STATUS); + if (IS_IRIS2(core) || IS_IRIS2_1(core)) { + if (status & WRAPPER_INTR_STATUS_A2H_MASK || + status & WRAPPER_INTR_STATUS_A2HWD_MASK_V6 || + status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK) + hdev->irq_status = status; + } else { + if (status & WRAPPER_INTR_STATUS_A2H_MASK || + status & WRAPPER_INTR_STATUS_A2HWD_MASK || + status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK) + hdev->irq_status = status; + } + writel(1, cpu_cs_base + CPU_CS_A2HSOFTINTCLR); + if (!(IS_IRIS2(core) || IS_IRIS2_1(core))) + writel(status, wrapper_base + WRAPPER_INTR_CLEAR); + + return IRQ_WAKE_THREAD; +} + +static int venus_core_init(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + struct device *dev = core->dev; + struct hfi_sys_get_property_pkt version_pkt; + struct hfi_sys_init_pkt pkt; + int ret; + + pkt_sys_init(&pkt, HFI_VIDEO_ARCH_OX); + + venus_set_state(hdev, VENUS_STATE_INIT); + + ret = venus_iface_cmdq_write(hdev, &pkt, false); + if (ret) + return ret; + + pkt_sys_image_version(&version_pkt); + + ret = venus_iface_cmdq_write(hdev, &version_pkt, false); + if (ret) + dev_warn(dev, "failed to send image version pkt to fw\n"); + + ret = venus_sys_set_default_properties(hdev); + if (ret) + return ret; + + return 0; +} + +static int venus_core_deinit(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + + venus_set_state(hdev, VENUS_STATE_DEINIT); + hdev->suspended = true; + hdev->power_enabled = false; + + return 0; +} + +static int venus_core_ping(struct venus_core *core, u32 cookie) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + struct hfi_sys_ping_pkt pkt; + + pkt_sys_ping(&pkt, cookie); + + return venus_iface_cmdq_write(hdev, &pkt, false); +} + +static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + struct hfi_sys_test_ssr_pkt pkt; + int ret; + + ret = pkt_sys_ssr_cmd(&pkt, trigger_type); + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, &pkt, false); +} + +static int venus_session_init(struct venus_inst *inst, u32 session_type, + u32 codec) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_init_pkt pkt; + int ret; + + ret = venus_sys_set_debug(hdev, venus_fw_debug); + if (ret) + goto err; + + ret = pkt_session_init(&pkt, inst, session_type, codec); + if (ret) + goto err; + + ret = venus_iface_cmdq_write(hdev, &pkt, true); + if (ret) + goto err; + + return 0; + +err: + venus_flush_debug_queue(hdev); + return ret; +} + +static int venus_session_end(struct venus_inst *inst) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct device *dev = hdev->core->dev; + + if (venus_fw_coverage) { + if (venus_sys_set_coverage(hdev, venus_fw_coverage)) + dev_warn(dev, "fw coverage msg ON failed\n"); + } + + return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END, true); +} + +static int venus_session_abort(struct venus_inst *inst) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + + venus_flush_debug_queue(hdev); + + return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT, true); +} + +static int venus_session_flush(struct venus_inst *inst, u32 flush_mode) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_flush_pkt pkt; + int ret; + + ret = pkt_session_flush(&pkt, inst, flush_mode); + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, &pkt, true); +} + +static int venus_session_start(struct venus_inst *inst) +{ + return venus_session_cmd(inst, HFI_CMD_SESSION_START, true); +} + +static int venus_session_stop(struct venus_inst *inst) +{ + return venus_session_cmd(inst, HFI_CMD_SESSION_STOP, true); +} + +static int venus_session_continue(struct venus_inst *inst) +{ + return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE, false); +} + +static int venus_session_etb(struct venus_inst *inst, + struct hfi_frame_data *in_frame) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + u32 session_type = inst->session_type; + int ret; + + if (session_type == VIDC_SESSION_TYPE_DEC) { + struct hfi_session_empty_buffer_compressed_pkt pkt; + + ret = pkt_session_etb_decoder(&pkt, inst, in_frame); + if (ret) + return ret; + + ret = venus_iface_cmdq_write(hdev, &pkt, false); + } else if (session_type == VIDC_SESSION_TYPE_ENC) { + struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt; + + ret = pkt_session_etb_encoder(&pkt, inst, in_frame); + if (ret) + return ret; + + ret = venus_iface_cmdq_write(hdev, &pkt, false); + } else { + ret = -EINVAL; + } + + return ret; +} + +static int venus_session_ftb(struct venus_inst *inst, + struct hfi_frame_data *out_frame) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_fill_buffer_pkt pkt; + int ret; + + ret = pkt_session_ftb(&pkt, inst, out_frame); + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, &pkt, false); +} + +static int venus_session_set_buffers(struct venus_inst *inst, + struct hfi_buffer_desc *bd) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_set_buffers_pkt *pkt; + u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE]; + int ret; + + if (bd->buffer_type == HFI_BUFFER_INPUT) + return 0; + + pkt = (struct hfi_session_set_buffers_pkt *)packet; + + ret = pkt_session_set_buffers(pkt, inst, bd); + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, pkt, false); +} + +static int venus_session_unset_buffers(struct venus_inst *inst, + struct hfi_buffer_desc *bd) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_release_buffer_pkt *pkt; + u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE]; + int ret; + + if (bd->buffer_type == HFI_BUFFER_INPUT) + return 0; + + pkt = (struct hfi_session_release_buffer_pkt *)packet; + + ret = pkt_session_unset_buffers(pkt, inst, bd); + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, pkt, true); +} + +static int venus_session_load_res(struct venus_inst *inst) +{ + return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES, true); +} + +static int venus_session_release_res(struct venus_inst *inst) +{ + return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES, true); +} + +static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr, + u32 seq_hdr_len) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_parse_sequence_header_pkt *pkt; + u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; + int ret; + + pkt = (struct hfi_session_parse_sequence_header_pkt *)packet; + + ret = pkt_session_parse_seq_header(pkt, inst, seq_hdr, seq_hdr_len); + if (ret) + return ret; + + ret = venus_iface_cmdq_write(hdev, pkt, false); + if (ret) + return ret; + + return 0; +} + +static int venus_session_get_seq_hdr(struct venus_inst *inst, u32 seq_hdr, + u32 seq_hdr_len) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_get_sequence_header_pkt *pkt; + u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; + int ret; + + pkt = (struct hfi_session_get_sequence_header_pkt *)packet; + + ret = pkt_session_get_seq_hdr(pkt, inst, seq_hdr, seq_hdr_len); + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, pkt, false); +} + +static int venus_session_set_property(struct venus_inst *inst, u32 ptype, + void *pdata) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_set_property_pkt *pkt; + u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE]; + int ret; + + pkt = (struct hfi_session_set_property_pkt *)packet; + + ret = pkt_session_set_property(pkt, inst, ptype, pdata); + if (ret == -ENOTSUPP) + return 0; + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, pkt, false); +} + +static int venus_session_get_property(struct venus_inst *inst, u32 ptype) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_get_property_pkt pkt; + int ret; + + ret = pkt_session_get_property(&pkt, inst, ptype); + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, &pkt, true); +} + +static int venus_resume(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + int ret = 0; + + mutex_lock(&hdev->lock); + + if (!hdev->suspended) + goto unlock; + + ret = venus_power_on(hdev); + +unlock: + if (!ret) + hdev->suspended = false; + + mutex_unlock(&hdev->lock); + + return ret; +} + +static int venus_suspend_1xx(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + struct device *dev = core->dev; + void __iomem *cpu_cs_base = hdev->core->cpu_cs_base; + u32 ctrl_status; + int ret; + + if (!hdev->power_enabled || hdev->suspended) + return 0; + + mutex_lock(&hdev->lock); + ret = venus_is_valid_state(hdev); + mutex_unlock(&hdev->lock); + + if (!ret) { + dev_err(dev, "bad state, cannot suspend\n"); + return -EINVAL; + } + + ret = venus_prepare_power_collapse(hdev, true); + if (ret) { + dev_err(dev, "prepare for power collapse fail (%d)\n", ret); + return ret; + } + + mutex_lock(&hdev->lock); + + if (hdev->last_packet_type != HFI_CMD_SYS_PC_PREP) { + mutex_unlock(&hdev->lock); + return -EINVAL; + } + + ret = venus_are_queues_empty(hdev); + if (ret < 0 || !ret) { + mutex_unlock(&hdev->lock); + return -EINVAL; + } + + ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0); + if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) { + mutex_unlock(&hdev->lock); + return -EINVAL; + } + + ret = venus_power_off(hdev); + if (ret) { + mutex_unlock(&hdev->lock); + return ret; + } + + hdev->suspended = true; + + mutex_unlock(&hdev->lock); + + return 0; +} + +static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev) +{ + void __iomem *wrapper_base = hdev->core->wrapper_base; + void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base; + void __iomem *cpu_cs_base = hdev->core->cpu_cs_base; + u32 ctrl_status, cpu_status; + + if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) + cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6); + else + cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS); + ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0); + + if (cpu_status & WRAPPER_CPU_STATUS_WFI && + ctrl_status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK) + return true; + + return false; +} + +static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev) +{ + void __iomem *wrapper_base = hdev->core->wrapper_base; + void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base; + void __iomem *cpu_cs_base = hdev->core->cpu_cs_base; + u32 ctrl_status, cpu_status; + + if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) + cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6); + else + cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS); + ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0); + + if (cpu_status & WRAPPER_CPU_STATUS_WFI && + ctrl_status & CPU_CS_SCIACMDARG0_PC_READY) + return true; + + return false; +} + +static int venus_suspend_3xx(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + struct device *dev = core->dev; + void __iomem *cpu_cs_base = hdev->core->cpu_cs_base; + u32 ctrl_status; + bool val; + int ret; + + if (!hdev->power_enabled || hdev->suspended) + return 0; + + mutex_lock(&hdev->lock); + ret = venus_is_valid_state(hdev); + mutex_unlock(&hdev->lock); + + if (!ret) { + dev_err(dev, "bad state, cannot suspend\n"); + return -EINVAL; + } + + ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0); + if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY) + goto power_off; + + /* + * Power collapse sequence for Venus 3xx and 4xx versions: + * 1. Check for ARM9 and video core to be idle by checking WFI bit + * (bit 0) in CPU status register and by checking Idle (bit 30) in + * Control status register for video core. + * 2. Send a command to prepare for power collapse. + * 3. Check for WFI and PC_READY bits. + */ + ret = readx_poll_timeout(venus_cpu_and_video_core_idle, hdev, val, val, + 1500, 100 * 1500); + if (ret) { + dev_err(dev, "wait for cpu and video core idle fail (%d)\n", ret); + return ret; + } + + ret = venus_prepare_power_collapse(hdev, false); + if (ret) { + dev_err(dev, "prepare for power collapse fail (%d)\n", ret); + return ret; + } + + ret = readx_poll_timeout(venus_cpu_idle_and_pc_ready, hdev, val, val, + 1500, 100 * 1500); + if (ret) + return ret; + +power_off: + mutex_lock(&hdev->lock); + + ret = venus_power_off(hdev); + if (ret) { + dev_err(dev, "venus_power_off (%d)\n", ret); + mutex_unlock(&hdev->lock); + return ret; + } + + hdev->suspended = true; + + mutex_unlock(&hdev->lock); + + return 0; +} + +static int venus_suspend(struct venus_core *core) +{ + if (IS_V3(core) || IS_V4(core) || IS_V6(core)) + return venus_suspend_3xx(core); + + return venus_suspend_1xx(core); +} + +static const struct hfi_ops venus_hfi_ops = { + .core_init = venus_core_init, + .core_deinit = venus_core_deinit, + .core_ping = venus_core_ping, + .core_trigger_ssr = venus_core_trigger_ssr, + + .session_init = venus_session_init, + .session_end = venus_session_end, + .session_abort = venus_session_abort, + .session_flush = venus_session_flush, + .session_start = venus_session_start, + .session_stop = venus_session_stop, + .session_continue = venus_session_continue, + .session_etb = venus_session_etb, + .session_ftb = venus_session_ftb, + .session_set_buffers = venus_session_set_buffers, + .session_unset_buffers = venus_session_unset_buffers, + .session_load_res = venus_session_load_res, + .session_release_res = venus_session_release_res, + .session_parse_seq_hdr = venus_session_parse_seq_hdr, + .session_get_seq_hdr = venus_session_get_seq_hdr, + .session_set_property = venus_session_set_property, + .session_get_property = venus_session_get_property, + + .resume = venus_resume, + .suspend = venus_suspend, + + .isr = venus_isr, + .isr_thread = venus_isr_thread, +}; + +void venus_hfi_destroy(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + + core->priv = NULL; + venus_interface_queues_release(hdev); + mutex_destroy(&hdev->lock); + kfree(hdev); + core->ops = NULL; +} + +int venus_hfi_create(struct venus_core *core) +{ + struct venus_hfi_device *hdev; + int ret; + + hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); + if (!hdev) + return -ENOMEM; + + mutex_init(&hdev->lock); + + hdev->core = core; + hdev->suspended = true; + core->priv = hdev; + core->ops = &venus_hfi_ops; + + ret = venus_interface_queues_init(hdev); + if (ret) + goto err_kfree; + + return 0; + +err_kfree: + kfree(hdev); + core->priv = NULL; + core->ops = NULL; + return ret; +} + +void venus_hfi_queues_reinit(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + struct hfi_queue_table_header *tbl_hdr; + struct iface_queue *queue; + struct hfi_sfr *sfr; + unsigned int i; + + mutex_lock(&hdev->lock); + + for (i = 0; i < IFACEQ_NUM; i++) { + queue = &hdev->queues[i]; + queue->qhdr = + IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i); + + venus_set_qhdr_defaults(queue->qhdr); + + queue->qhdr->start_addr = queue->qmem.da; + + if (i == IFACEQ_CMD_IDX) + queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q; + else if (i == IFACEQ_MSG_IDX) + queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q; + else if (i == IFACEQ_DBG_IDX) + queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q; + } + + tbl_hdr = hdev->ifaceq_table.kva; + tbl_hdr->version = 0; + tbl_hdr->size = IFACEQ_TABLE_SIZE; + tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header); + tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header); + tbl_hdr->num_q = IFACEQ_NUM; + tbl_hdr->num_active_q = IFACEQ_NUM; + + /* + * Set receive request to zero on debug queue as there is no + * need of interrupt from video hardware for debug messages + */ + queue = &hdev->queues[IFACEQ_DBG_IDX]; + queue->qhdr->rx_req = 0; + + sfr = hdev->sfr.kva; + sfr->buf_size = ALIGNED_SFR_SIZE; + + /* ensure table and queue header structs are settled in memory */ + wmb(); + + mutex_unlock(&hdev->lock); +} diff --git a/drivers/media/platform/qcom/venus/hfi_venus.h b/drivers/media/platform/qcom/venus/hfi_venus.h new file mode 100644 index 0000000000..1b656ef2bf --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_venus.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#ifndef __VENUS_HFI_VENUS_H__ +#define __VENUS_HFI_VENUS_H__ + +struct venus_core; + +void venus_hfi_destroy(struct venus_core *core); +int venus_hfi_create(struct venus_core *core); +void venus_hfi_queues_reinit(struct venus_core *core); + +#endif diff --git a/drivers/media/platform/qcom/venus/hfi_venus_io.h b/drivers/media/platform/qcom/venus/hfi_venus_io.h new file mode 100644 index 0000000000..9735a246ce --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_venus_io.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#ifndef __VENUS_HFI_VENUS_IO_H__ +#define __VENUS_HFI_VENUS_IO_H__ + +#define VBIF_BASE 0x80000 + +#define VBIF_AXI_HALT_CTRL0 0x208 +#define VBIF_AXI_HALT_CTRL1 0x20c + +#define VBIF_AXI_HALT_CTRL0_HALT_REQ BIT(0) +#define VBIF_AXI_HALT_CTRL1_HALT_ACK BIT(0) +#define VBIF_AXI_HALT_ACK_TIMEOUT_US 500000 + +#define CPU_BASE 0xc0000 + +#define CPU_CS_BASE (CPU_BASE + 0x12000) +#define CPU_IC_BASE (CPU_BASE + 0x1f000) +#define CPU_BASE_V6 0xa0000 +#define CPU_CS_BASE_V6 CPU_BASE_V6 +#define CPU_IC_BASE_V6 (CPU_BASE_V6 + 0x138) + +#define CPU_CS_A2HSOFTINTCLR 0x1c + +#define VIDC_CTRL_INIT 0x48 +#define VIDC_CTRL_INIT_RESERVED_BITS31_1_MASK 0xfffffffe +#define VIDC_CTRL_INIT_RESERVED_BITS31_1_SHIFT 1 +#define VIDC_CTRL_INIT_CTRL_MASK 0x1 +#define VIDC_CTRL_INIT_CTRL_SHIFT 0 + +/* HFI control status */ +#define CPU_CS_SCIACMDARG0 0x4c +#define CPU_CS_SCIACMDARG0_MASK 0xff +#define CPU_CS_SCIACMDARG0_SHIFT 0x0 +#define CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK 0xfe +#define CPU_CS_SCIACMDARG0_ERROR_STATUS_SHIFT 0x1 +#define CPU_CS_SCIACMDARG0_INIT_STATUS_MASK 0x1 +#define CPU_CS_SCIACMDARG0_INIT_STATUS_SHIFT 0x0 +#define CPU_CS_SCIACMDARG0_PC_READY BIT(8) +#define CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK BIT(30) + +/* HFI queue table info */ +#define CPU_CS_SCIACMDARG1 0x50 + +/* HFI queue table address */ +#define CPU_CS_SCIACMDARG2 0x54 + +/* Venus cpu */ +#define CPU_CS_SCIACMDARG3 0x58 + +#define SFR_ADDR 0x5c +#define MMAP_ADDR 0x60 +#define UC_REGION_ADDR 0x64 +#define UC_REGION_SIZE 0x68 + +#define CPU_CS_H2XSOFTINTEN_V6 0x148 + +#define CPU_CS_X2RPMH_V6 0x168 +#define CPU_CS_X2RPMH_MASK0_BMSK_V6 0x1 +#define CPU_CS_X2RPMH_MASK0_SHFT_V6 0x0 +#define CPU_CS_X2RPMH_MASK1_BMSK_V6 0x2 +#define CPU_CS_X2RPMH_MASK1_SHFT_V6 0x1 +#define CPU_CS_X2RPMH_SWOVERRIDE_BMSK_V6 0x4 +#define CPU_CS_X2RPMH_SWOVERRIDE_SHFT_V6 0x3 + +/* Relative to CPU_IC_BASE */ +#define CPU_IC_SOFTINT 0x18 +#define CPU_IC_SOFTINT_V6 0x150 +#define CPU_IC_SOFTINT_H2A_MASK 0x8000 +#define CPU_IC_SOFTINT_H2A_SHIFT 0xf +#define CPU_IC_SOFTINT_H2A_SHIFT_V6 0x0 + +/* Venus wrapper */ +#define WRAPPER_BASE_V6 0x000b0000 +#define WRAPPER_BASE 0x000e0000 + +#define WRAPPER_HW_VERSION 0x00 +#define WRAPPER_HW_VERSION_MAJOR_VERSION_MASK 0x78000000 +#define WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT 28 +#define WRAPPER_HW_VERSION_MINOR_VERSION_MASK 0xfff0000 +#define WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT 16 +#define WRAPPER_HW_VERSION_STEP_VERSION_MASK 0xffff + +#define WRAPPER_CLOCK_CONFIG 0x04 + +#define WRAPPER_INTR_STATUS 0x0c +#define WRAPPER_INTR_STATUS_A2HWD_MASK 0x10 +#define WRAPPER_INTR_STATUS_A2HWD_SHIFT 0x4 +#define WRAPPER_INTR_STATUS_A2H_MASK 0x4 +#define WRAPPER_INTR_STATUS_A2H_SHIFT 0x2 + +#define WRAPPER_INTR_MASK 0x10 +#define WRAPPER_INTR_MASK_A2HWD_BASK 0x10 +#define WRAPPER_INTR_MASK_A2HWD_SHIFT 0x4 +#define WRAPPER_INTR_MASK_A2HVCODEC_MASK 0x8 +#define WRAPPER_INTR_MASK_A2HVCODEC_SHIFT 0x3 +#define WRAPPER_INTR_MASK_A2HCPU_MASK 0x4 +#define WRAPPER_INTR_MASK_A2HCPU_SHIFT 0x2 + +#define WRAPPER_INTR_STATUS_A2HWD_MASK_V6 0x8 +#define WRAPPER_INTR_MASK_A2HWD_BASK_V6 0x8 + +#define WRAPPER_INTR_CLEAR 0x14 +#define WRAPPER_INTR_CLEAR_A2HWD_MASK 0x10 +#define WRAPPER_INTR_CLEAR_A2HWD_SHIFT 0x4 +#define WRAPPER_INTR_CLEAR_A2H_MASK 0x4 +#define WRAPPER_INTR_CLEAR_A2H_SHIFT 0x2 + +#define WRAPPER_POWER_STATUS 0x44 +#define WRAPPER_VDEC_VCODEC_POWER_CONTROL 0x48 +#define WRAPPER_VENC_VCODEC_POWER_CONTROL 0x4c +#define WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6 0x54 +#define WRAPPER_DEBUG_BRIDGE_LPI_STATUS_V6 0x58 +#define WRAPPER_VDEC_VENC_AHB_BRIDGE_SYNC_RESET 0x64 + +#define WRAPPER_CPU_CLOCK_CONFIG 0x2000 +#define WRAPPER_CPU_AXI_HALT 0x2008 +#define WRAPPER_CPU_AXI_HALT_HALT BIT(16) +#define WRAPPER_CPU_AXI_HALT_STATUS 0x200c +#define WRAPPER_CPU_AXI_HALT_STATUS_IDLE BIT(24) + +#define WRAPPER_CPU_CGC_DIS 0x2010 +#define WRAPPER_CPU_STATUS 0x2014 +#define WRAPPER_CPU_STATUS_WFI BIT(0) +#define WRAPPER_SW_RESET 0x3000 +#define WRAPPER_CPA_START_ADDR 0x1020 +#define WRAPPER_CPA_END_ADDR 0x1024 +#define WRAPPER_FW_START_ADDR 0x1028 +#define WRAPPER_FW_END_ADDR 0x102C +#define WRAPPER_NONPIX_START_ADDR 0x1030 +#define WRAPPER_NONPIX_END_ADDR 0x1034 +#define WRAPPER_A9SS_SW_RESET 0x3000 +#define WRAPPER_A9SS_SW_RESET_BIT BIT(4) + +/* Venus 4xx */ +#define WRAPPER_VCODEC0_MMCC_POWER_STATUS 0x90 +#define WRAPPER_VCODEC0_MMCC_POWER_CONTROL 0x94 + +#define WRAPPER_VCODEC1_MMCC_POWER_STATUS 0x110 +#define WRAPPER_VCODEC1_MMCC_POWER_CONTROL 0x114 + +/* Venus 6xx */ +#define WRAPPER_CORE_POWER_STATUS_V6 0x80 +#define WRAPPER_CORE_POWER_CONTROL_V6 0x84 + +/* Wrapper TZ 6xx */ +#define WRAPPER_TZ_BASE_V6 0x000c0000 +#define WRAPPER_TZ_CPU_STATUS_V6 0x10 +#define WRAPPER_TZ_XTSS_SW_RESET 0x1000 +#define WRAPPER_XTSS_SW_RESET_BIT BIT(0) + +/* Venus AON */ +#define AON_BASE_V6 0x000e0000 +#define AON_WRAPPER_MVP_NOC_LPI_CONTROL 0x00 +#define AON_WRAPPER_MVP_NOC_LPI_STATUS 0x04 + +#endif diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c new file mode 100644 index 0000000000..48c9084bb4 --- /dev/null +++ b/drivers/media/platform/qcom/venus/pm_helpers.c @@ -0,0 +1,1207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Linaro Ltd. + * + * Author: Stanimir Varbanov <stanimir.varbanov@linaro.org> + */ +#include <linux/clk.h> +#include <linux/interconnect.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/pm_domain.h> +#include <linux/pm_opp.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/types.h> +#include <media/v4l2-mem2mem.h> + +#include "core.h" +#include "hfi_parser.h" +#include "hfi_venus_io.h" +#include "pm_helpers.h" +#include "hfi_platform.h" + +static bool legacy_binding; + +static int core_clks_get(struct venus_core *core) +{ + const struct venus_resources *res = core->res; + struct device *dev = core->dev; + unsigned int i; + + for (i = 0; i < res->clks_num; i++) { + core->clks[i] = devm_clk_get(dev, res->clks[i]); + if (IS_ERR(core->clks[i])) + return PTR_ERR(core->clks[i]); + } + + return 0; +} + +static int core_clks_enable(struct venus_core *core) +{ + const struct venus_resources *res = core->res; + const struct freq_tbl *freq_tbl = core->res->freq_tbl; + unsigned int freq_tbl_size = core->res->freq_tbl_size; + unsigned long freq; + unsigned int i; + int ret; + + if (!freq_tbl) + return -EINVAL; + + freq = freq_tbl[freq_tbl_size - 1].freq; + + for (i = 0; i < res->clks_num; i++) { + if (IS_V6(core)) { + ret = clk_set_rate(core->clks[i], freq); + if (ret) + goto err; + } + + ret = clk_prepare_enable(core->clks[i]); + if (ret) + goto err; + } + + return 0; +err: + while (i--) + clk_disable_unprepare(core->clks[i]); + + return ret; +} + +static void core_clks_disable(struct venus_core *core) +{ + const struct venus_resources *res = core->res; + unsigned int i = res->clks_num; + + while (i--) + clk_disable_unprepare(core->clks[i]); +} + +static int core_clks_set_rate(struct venus_core *core, unsigned long freq) +{ + int ret; + + ret = dev_pm_opp_set_rate(core->dev, freq); + if (ret) + return ret; + + ret = clk_set_rate(core->vcodec0_clks[0], freq); + if (ret) + return ret; + + ret = clk_set_rate(core->vcodec1_clks[0], freq); + if (ret) + return ret; + + return 0; +} + +static int vcodec_clks_get(struct venus_core *core, struct device *dev, + struct clk **clks, const char * const *id) +{ + const struct venus_resources *res = core->res; + unsigned int i; + + for (i = 0; i < res->vcodec_clks_num; i++) { + if (!id[i]) + continue; + clks[i] = devm_clk_get(dev, id[i]); + if (IS_ERR(clks[i])) + return PTR_ERR(clks[i]); + } + + return 0; +} + +static int vcodec_clks_enable(struct venus_core *core, struct clk **clks) +{ + const struct venus_resources *res = core->res; + unsigned int i; + int ret; + + for (i = 0; i < res->vcodec_clks_num; i++) { + ret = clk_prepare_enable(clks[i]); + if (ret) + goto err; + } + + return 0; +err: + while (i--) + clk_disable_unprepare(clks[i]); + + return ret; +} + +static void vcodec_clks_disable(struct venus_core *core, struct clk **clks) +{ + const struct venus_resources *res = core->res; + unsigned int i = res->vcodec_clks_num; + + while (i--) + clk_disable_unprepare(clks[i]); +} + +static u32 load_per_instance(struct venus_inst *inst) +{ + u32 mbs; + + if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP)) + return 0; + + mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16); + + return mbs * inst->fps; +} + +static u32 load_per_type(struct venus_core *core, u32 session_type) +{ + struct venus_inst *inst = NULL; + u32 mbs_per_sec = 0; + + list_for_each_entry(inst, &core->instances, list) { + if (inst->session_type != session_type) + continue; + + mbs_per_sec += load_per_instance(inst); + } + + return mbs_per_sec; +} + +static void mbs_to_bw(struct venus_inst *inst, u32 mbs, u32 *avg, u32 *peak) +{ + const struct venus_resources *res = inst->core->res; + const struct bw_tbl *bw_tbl; + unsigned int num_rows, i; + + *avg = 0; + *peak = 0; + + if (mbs == 0) + return; + + if (inst->session_type == VIDC_SESSION_TYPE_ENC) { + num_rows = res->bw_tbl_enc_size; + bw_tbl = res->bw_tbl_enc; + } else if (inst->session_type == VIDC_SESSION_TYPE_DEC) { + num_rows = res->bw_tbl_dec_size; + bw_tbl = res->bw_tbl_dec; + } else { + return; + } + + if (!bw_tbl || num_rows == 0) + return; + + for (i = 0; i < num_rows; i++) { + if (i != 0 && mbs > bw_tbl[i].mbs_per_sec) + break; + + if (inst->dpb_fmt & HFI_COLOR_FORMAT_10_BIT_BASE) { + *avg = bw_tbl[i].avg_10bit; + *peak = bw_tbl[i].peak_10bit; + } else { + *avg = bw_tbl[i].avg; + *peak = bw_tbl[i].peak; + } + } +} + +static int load_scale_bw(struct venus_core *core) +{ + struct venus_inst *inst = NULL; + u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0; + + list_for_each_entry(inst, &core->instances, list) { + mbs_per_sec = load_per_instance(inst); + mbs_to_bw(inst, mbs_per_sec, &avg, &peak); + total_avg += avg; + total_peak += peak; + } + + /* + * keep minimum bandwidth vote for "video-mem" path, + * so that clks can be disabled during vdec_session_release(). + * Actual bandwidth drop will be done during device supend + * so that device can power down without any warnings. + */ + + if (!total_avg && !total_peak) + total_avg = kbps_to_icc(1000); + + dev_dbg(core->dev, VDBGL "total: avg_bw: %u, peak_bw: %u\n", + total_avg, total_peak); + + return icc_set_bw(core->video_path, total_avg, total_peak); +} + +static int load_scale_v1(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + const struct freq_tbl *table = core->res->freq_tbl; + unsigned int num_rows = core->res->freq_tbl_size; + unsigned long freq = table[0].freq; + struct device *dev = core->dev; + u32 mbs_per_sec; + unsigned int i; + int ret = 0; + + mutex_lock(&core->lock); + mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) + + load_per_type(core, VIDC_SESSION_TYPE_DEC); + + if (mbs_per_sec > core->res->max_load) + dev_warn(dev, "HW is overloaded, needed: %d max: %d\n", + mbs_per_sec, core->res->max_load); + + if (!mbs_per_sec && num_rows > 1) { + freq = table[num_rows - 1].freq; + goto set_freq; + } + + for (i = 0; i < num_rows; i++) { + if (mbs_per_sec > table[i].load) + break; + freq = table[i].freq; + } + +set_freq: + + ret = core_clks_set_rate(core, freq); + if (ret) { + dev_err(dev, "failed to set clock rate %lu (%d)\n", + freq, ret); + goto exit; + } + + ret = load_scale_bw(core); + if (ret) { + dev_err(dev, "failed to set bandwidth (%d)\n", + ret); + goto exit; + } + +exit: + mutex_unlock(&core->lock); + return ret; +} + +static int core_get_v1(struct venus_core *core) +{ + int ret; + + ret = core_clks_get(core); + if (ret) + return ret; + + ret = devm_pm_opp_set_clkname(core->dev, "core"); + if (ret) + return ret; + + return 0; +} + +static void core_put_v1(struct venus_core *core) +{ +} + +static int core_power_v1(struct venus_core *core, int on) +{ + int ret = 0; + + if (on == POWER_ON) + ret = core_clks_enable(core); + else + core_clks_disable(core); + + return ret; +} + +static const struct venus_pm_ops pm_ops_v1 = { + .core_get = core_get_v1, + .core_put = core_put_v1, + .core_power = core_power_v1, + .load_scale = load_scale_v1, +}; + +static void +vcodec_control_v3(struct venus_core *core, u32 session_type, bool enable) +{ + void __iomem *ctrl; + + if (session_type == VIDC_SESSION_TYPE_DEC) + ctrl = core->wrapper_base + WRAPPER_VDEC_VCODEC_POWER_CONTROL; + else + ctrl = core->wrapper_base + WRAPPER_VENC_VCODEC_POWER_CONTROL; + + if (enable) + writel(0, ctrl); + else + writel(1, ctrl); +} + +static int vdec_get_v3(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + + return vcodec_clks_get(core, dev, core->vcodec0_clks, + core->res->vcodec0_clks); +} + +static int vdec_power_v3(struct device *dev, int on) +{ + struct venus_core *core = dev_get_drvdata(dev); + int ret = 0; + + vcodec_control_v3(core, VIDC_SESSION_TYPE_DEC, true); + + if (on == POWER_ON) + ret = vcodec_clks_enable(core, core->vcodec0_clks); + else + vcodec_clks_disable(core, core->vcodec0_clks); + + vcodec_control_v3(core, VIDC_SESSION_TYPE_DEC, false); + + return ret; +} + +static int venc_get_v3(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + + return vcodec_clks_get(core, dev, core->vcodec1_clks, + core->res->vcodec1_clks); +} + +static int venc_power_v3(struct device *dev, int on) +{ + struct venus_core *core = dev_get_drvdata(dev); + int ret = 0; + + vcodec_control_v3(core, VIDC_SESSION_TYPE_ENC, true); + + if (on == POWER_ON) + ret = vcodec_clks_enable(core, core->vcodec1_clks); + else + vcodec_clks_disable(core, core->vcodec1_clks); + + vcodec_control_v3(core, VIDC_SESSION_TYPE_ENC, false); + + return ret; +} + +static const struct venus_pm_ops pm_ops_v3 = { + .core_get = core_get_v1, + .core_put = core_put_v1, + .core_power = core_power_v1, + .vdec_get = vdec_get_v3, + .vdec_power = vdec_power_v3, + .venc_get = venc_get_v3, + .venc_power = venc_power_v3, + .load_scale = load_scale_v1, +}; + +static int vcodec_control_v4(struct venus_core *core, u32 coreid, bool enable) +{ + void __iomem *ctrl, *stat; + u32 val; + int ret; + + if (IS_V6(core)) { + ctrl = core->wrapper_base + WRAPPER_CORE_POWER_CONTROL_V6; + stat = core->wrapper_base + WRAPPER_CORE_POWER_STATUS_V6; + } else if (coreid == VIDC_CORE_ID_1) { + ctrl = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL; + stat = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_STATUS; + } else { + ctrl = core->wrapper_base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL; + stat = core->wrapper_base + WRAPPER_VCODEC1_MMCC_POWER_STATUS; + } + + if (enable) { + writel(0, ctrl); + + ret = readl_poll_timeout(stat, val, val & BIT(1), 1, 100); + if (ret) + return ret; + } else { + writel(1, ctrl); + + ret = readl_poll_timeout(stat, val, !(val & BIT(1)), 1, 100); + if (ret) + return ret; + } + + return 0; +} + +static int poweroff_coreid(struct venus_core *core, unsigned int coreid_mask) +{ + int ret; + + if (coreid_mask & VIDC_CORE_ID_1) { + ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true); + if (ret) + return ret; + + vcodec_clks_disable(core, core->vcodec0_clks); + + ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false); + if (ret) + return ret; + + ret = pm_runtime_put_sync(core->pmdomains[1]); + if (ret < 0) + return ret; + } + + if (coreid_mask & VIDC_CORE_ID_2) { + ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true); + if (ret) + return ret; + + vcodec_clks_disable(core, core->vcodec1_clks); + + ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false); + if (ret) + return ret; + + ret = pm_runtime_put_sync(core->pmdomains[2]); + if (ret < 0) + return ret; + } + + return 0; +} + +static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask) +{ + int ret; + + if (coreid_mask & VIDC_CORE_ID_1) { + ret = pm_runtime_get_sync(core->pmdomains[1]); + if (ret < 0) + return ret; + + ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true); + if (ret) + return ret; + + ret = vcodec_clks_enable(core, core->vcodec0_clks); + if (ret) + return ret; + + ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false); + if (ret < 0) + return ret; + } + + if (coreid_mask & VIDC_CORE_ID_2) { + ret = pm_runtime_get_sync(core->pmdomains[2]); + if (ret < 0) + return ret; + + ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true); + if (ret) + return ret; + + ret = vcodec_clks_enable(core, core->vcodec1_clks); + if (ret) + return ret; + + ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false); + if (ret < 0) + return ret; + } + + return 0; +} + +static inline int power_save_mode_enable(struct venus_inst *inst, + bool enable) +{ + struct venc_controls *enc_ctr = &inst->controls.enc; + const u32 ptype = HFI_PROPERTY_CONFIG_VENC_PERF_MODE; + u32 venc_mode; + int ret = 0; + + if (inst->session_type != VIDC_SESSION_TYPE_ENC) + return 0; + + if (enc_ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ) + enable = false; + + venc_mode = enable ? HFI_VENC_PERFMODE_POWER_SAVE : + HFI_VENC_PERFMODE_MAX_QUALITY; + + ret = hfi_session_set_property(inst, ptype, &venc_mode); + if (ret) + return ret; + + inst->flags = enable ? inst->flags | VENUS_LOW_POWER : + inst->flags & ~VENUS_LOW_POWER; + + return ret; +} + +static int move_core_to_power_save_mode(struct venus_core *core, + u32 core_id) +{ + struct venus_inst *inst = NULL; + + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) { + if (inst->clk_data.core_id == core_id && + inst->session_type == VIDC_SESSION_TYPE_ENC) + power_save_mode_enable(inst, true); + } + mutex_unlock(&core->lock); + return 0; +} + +static void +min_loaded_core(struct venus_inst *inst, u32 *min_coreid, u32 *min_load, bool low_power) +{ + u32 mbs_per_sec, load, core1_load = 0, core2_load = 0; + u32 cores_max = core_num_max(inst); + struct venus_core *core = inst->core; + struct venus_inst *inst_pos; + unsigned long vpp_freq; + u32 coreid; + + mutex_lock(&core->lock); + + list_for_each_entry(inst_pos, &core->instances, list) { + if (inst_pos == inst) + continue; + + if (inst_pos->state != INST_START) + continue; + + if (inst->session_type == VIDC_SESSION_TYPE_DEC) + vpp_freq = inst_pos->clk_data.vpp_freq; + else if (inst->session_type == VIDC_SESSION_TYPE_ENC) + vpp_freq = low_power ? inst_pos->clk_data.low_power_freq : + inst_pos->clk_data.vpp_freq; + else + continue; + + coreid = inst_pos->clk_data.core_id; + + mbs_per_sec = load_per_instance(inst_pos); + load = mbs_per_sec * vpp_freq; + + if ((coreid & VIDC_CORE_ID_3) == VIDC_CORE_ID_3) { + core1_load += load / 2; + core2_load += load / 2; + } else if (coreid & VIDC_CORE_ID_1) { + core1_load += load; + } else if (coreid & VIDC_CORE_ID_2) { + core2_load += load; + } + } + + *min_coreid = core1_load <= core2_load ? + VIDC_CORE_ID_1 : VIDC_CORE_ID_2; + *min_load = min(core1_load, core2_load); + + if (cores_max < VIDC_CORE_ID_2 || core->res->vcodec_num < 2) { + *min_coreid = VIDC_CORE_ID_1; + *min_load = core1_load; + } + + mutex_unlock(&core->lock); +} + +static int decide_core(struct venus_inst *inst) +{ + const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE; + struct venus_core *core = inst->core; + u32 min_coreid, min_load, cur_inst_load; + u32 min_lp_coreid, min_lp_load, cur_inst_lp_load; + struct hfi_videocores_usage_type cu; + unsigned long max_freq; + int ret = 0; + + if (legacy_binding) { + if (inst->session_type == VIDC_SESSION_TYPE_DEC) + cu.video_core_enable_mask = VIDC_CORE_ID_1; + else + cu.video_core_enable_mask = VIDC_CORE_ID_2; + + goto done; + } + + if (inst->clk_data.core_id != VIDC_CORE_ID_DEFAULT) + return 0; + + cur_inst_load = load_per_instance(inst); + cur_inst_load *= inst->clk_data.vpp_freq; + /*TODO : divide this inst->load by work_route */ + + cur_inst_lp_load = load_per_instance(inst); + cur_inst_lp_load *= inst->clk_data.low_power_freq; + /*TODO : divide this inst->load by work_route */ + + max_freq = core->res->freq_tbl[0].freq; + + min_loaded_core(inst, &min_coreid, &min_load, false); + min_loaded_core(inst, &min_lp_coreid, &min_lp_load, true); + + if (cur_inst_load + min_load <= max_freq) { + inst->clk_data.core_id = min_coreid; + cu.video_core_enable_mask = min_coreid; + } else if (cur_inst_lp_load + min_load <= max_freq) { + /* Move current instance to LP and return */ + inst->clk_data.core_id = min_coreid; + cu.video_core_enable_mask = min_coreid; + power_save_mode_enable(inst, true); + } else if (cur_inst_lp_load + min_lp_load <= max_freq) { + /* Move all instances to LP mode and return */ + inst->clk_data.core_id = min_lp_coreid; + cu.video_core_enable_mask = min_lp_coreid; + move_core_to_power_save_mode(core, min_lp_coreid); + } else { + dev_warn(core->dev, "HW can't support this load"); + return -EINVAL; + } + +done: + ret = hfi_session_set_property(inst, ptype, &cu); + if (ret) + return ret; + + return ret; +} + +static int acquire_core(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + unsigned int coreid_mask = 0; + + if (inst->core_acquired) + return 0; + + inst->core_acquired = true; + + if (inst->clk_data.core_id & VIDC_CORE_ID_1) { + if (core->core0_usage_count++) + return 0; + + coreid_mask = VIDC_CORE_ID_1; + } + + if (inst->clk_data.core_id & VIDC_CORE_ID_2) { + if (core->core1_usage_count++) + return 0; + + coreid_mask |= VIDC_CORE_ID_2; + } + + return poweron_coreid(core, coreid_mask); +} + +static int release_core(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + unsigned int coreid_mask = 0; + int ret; + + if (!inst->core_acquired) + return 0; + + if (inst->clk_data.core_id & VIDC_CORE_ID_1) { + if (--core->core0_usage_count) + goto done; + + coreid_mask = VIDC_CORE_ID_1; + } + + if (inst->clk_data.core_id & VIDC_CORE_ID_2) { + if (--core->core1_usage_count) + goto done; + + coreid_mask |= VIDC_CORE_ID_2; + } + + ret = poweroff_coreid(core, coreid_mask); + if (ret) + return ret; + +done: + inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT; + inst->core_acquired = false; + return 0; +} + +static int coreid_power_v4(struct venus_inst *inst, int on) +{ + struct venus_core *core = inst->core; + int ret; + + if (legacy_binding) + return 0; + + if (on == POWER_ON) { + ret = decide_core(inst); + if (ret) + return ret; + + mutex_lock(&core->lock); + ret = acquire_core(inst); + mutex_unlock(&core->lock); + } else { + mutex_lock(&core->lock); + ret = release_core(inst); + mutex_unlock(&core->lock); + } + + return ret; +} + +static int vdec_get_v4(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + + if (!legacy_binding) + return 0; + + return vcodec_clks_get(core, dev, core->vcodec0_clks, + core->res->vcodec0_clks); +} + +static void vdec_put_v4(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + unsigned int i; + + if (!legacy_binding) + return; + + for (i = 0; i < core->res->vcodec_clks_num; i++) + core->vcodec0_clks[i] = NULL; +} + +static int vdec_power_v4(struct device *dev, int on) +{ + struct venus_core *core = dev_get_drvdata(dev); + int ret; + + if (!legacy_binding) + return 0; + + ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true); + if (ret) + return ret; + + if (on == POWER_ON) + ret = vcodec_clks_enable(core, core->vcodec0_clks); + else + vcodec_clks_disable(core, core->vcodec0_clks); + + vcodec_control_v4(core, VIDC_CORE_ID_1, false); + + return ret; +} + +static int venc_get_v4(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + + if (!legacy_binding) + return 0; + + return vcodec_clks_get(core, dev, core->vcodec1_clks, + core->res->vcodec1_clks); +} + +static void venc_put_v4(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + unsigned int i; + + if (!legacy_binding) + return; + + for (i = 0; i < core->res->vcodec_clks_num; i++) + core->vcodec1_clks[i] = NULL; +} + +static int venc_power_v4(struct device *dev, int on) +{ + struct venus_core *core = dev_get_drvdata(dev); + int ret; + + if (!legacy_binding) + return 0; + + ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true); + if (ret) + return ret; + + if (on == POWER_ON) + ret = vcodec_clks_enable(core, core->vcodec1_clks); + else + vcodec_clks_disable(core, core->vcodec1_clks); + + vcodec_control_v4(core, VIDC_CORE_ID_2, false); + + return ret; +} + +static int vcodec_domains_get(struct venus_core *core) +{ + int ret; + struct device **opp_virt_dev; + struct device *dev = core->dev; + const struct venus_resources *res = core->res; + struct device *pd; + unsigned int i; + + if (!res->vcodec_pmdomains_num) + goto skip_pmdomains; + + for (i = 0; i < res->vcodec_pmdomains_num; i++) { + pd = dev_pm_domain_attach_by_name(dev, + res->vcodec_pmdomains[i]); + if (IS_ERR_OR_NULL(pd)) + return PTR_ERR(pd) ? : -ENODATA; + core->pmdomains[i] = pd; + } + +skip_pmdomains: + if (!core->res->opp_pmdomain) + return 0; + + /* Attach the power domain for setting performance state */ + ret = devm_pm_opp_attach_genpd(dev, res->opp_pmdomain, &opp_virt_dev); + if (ret) + goto opp_attach_err; + + core->opp_pmdomain = *opp_virt_dev; + core->opp_dl_venus = device_link_add(dev, core->opp_pmdomain, + DL_FLAG_RPM_ACTIVE | + DL_FLAG_PM_RUNTIME | + DL_FLAG_STATELESS); + if (!core->opp_dl_venus) { + ret = -ENODEV; + goto opp_attach_err; + } + + return 0; + +opp_attach_err: + for (i = 0; i < res->vcodec_pmdomains_num; i++) { + if (IS_ERR_OR_NULL(core->pmdomains[i])) + continue; + dev_pm_domain_detach(core->pmdomains[i], true); + } + + return ret; +} + +static void vcodec_domains_put(struct venus_core *core) +{ + const struct venus_resources *res = core->res; + unsigned int i; + + if (!res->vcodec_pmdomains_num) + goto skip_pmdomains; + + for (i = 0; i < res->vcodec_pmdomains_num; i++) { + if (IS_ERR_OR_NULL(core->pmdomains[i])) + continue; + dev_pm_domain_detach(core->pmdomains[i], true); + } + +skip_pmdomains: + if (!core->has_opp_table) + return; + + if (core->opp_dl_venus) + device_link_del(core->opp_dl_venus); +} + +static int core_resets_reset(struct venus_core *core) +{ + const struct venus_resources *res = core->res; + unsigned int i; + int ret; + + if (!res->resets_num) + return 0; + + for (i = 0; i < res->resets_num; i++) { + ret = reset_control_assert(core->resets[i]); + if (ret) + goto err; + + usleep_range(150, 250); + ret = reset_control_deassert(core->resets[i]); + if (ret) + goto err; + } + +err: + return ret; +} + +static int core_resets_get(struct venus_core *core) +{ + struct device *dev = core->dev; + const struct venus_resources *res = core->res; + unsigned int i; + int ret; + + if (!res->resets_num) + return 0; + + for (i = 0; i < res->resets_num; i++) { + core->resets[i] = + devm_reset_control_get_exclusive(dev, res->resets[i]); + if (IS_ERR(core->resets[i])) { + ret = PTR_ERR(core->resets[i]); + return ret; + } + } + + return 0; +} + +static int core_get_v4(struct venus_core *core) +{ + struct device *dev = core->dev; + const struct venus_resources *res = core->res; + int ret; + + ret = core_clks_get(core); + if (ret) + return ret; + + if (!res->vcodec_pmdomains_num) + legacy_binding = true; + + dev_info(dev, "%s legacy binding\n", legacy_binding ? "" : "non"); + + ret = vcodec_clks_get(core, dev, core->vcodec0_clks, res->vcodec0_clks); + if (ret) + return ret; + + ret = vcodec_clks_get(core, dev, core->vcodec1_clks, res->vcodec1_clks); + if (ret) + return ret; + + ret = core_resets_get(core); + if (ret) + return ret; + + if (legacy_binding) + return 0; + + ret = devm_pm_opp_set_clkname(dev, "core"); + if (ret) + return ret; + + ret = vcodec_domains_get(core); + if (ret) + return ret; + + if (core->res->opp_pmdomain) { + ret = devm_pm_opp_of_add_table(dev); + if (!ret) { + core->has_opp_table = true; + } else if (ret != -ENODEV) { + dev_err(dev, "invalid OPP table in device tree\n"); + return ret; + } + } + + return 0; +} + +static void core_put_v4(struct venus_core *core) +{ + if (legacy_binding) + return; + + vcodec_domains_put(core); +} + +static int core_power_v4(struct venus_core *core, int on) +{ + struct device *dev = core->dev; + struct device *pmctrl = core->pmdomains[0]; + int ret = 0; + + if (on == POWER_ON) { + if (pmctrl) { + ret = pm_runtime_resume_and_get(pmctrl); + if (ret < 0) { + return ret; + } + } + + ret = core_resets_reset(core); + if (ret) { + if (pmctrl) + pm_runtime_put_sync(pmctrl); + return ret; + } + + ret = core_clks_enable(core); + if (ret < 0 && pmctrl) + pm_runtime_put_sync(pmctrl); + } else { + /* Drop the performance state vote */ + if (core->opp_pmdomain) + dev_pm_opp_set_rate(dev, 0); + + core_clks_disable(core); + + ret = core_resets_reset(core); + + if (pmctrl) + pm_runtime_put_sync(pmctrl); + } + + return ret; +} + +static unsigned long calculate_inst_freq(struct venus_inst *inst, + unsigned long filled_len) +{ + unsigned long vpp_freq_per_mb = 0, vpp_freq = 0, vsp_freq = 0; + u32 fps = (u32)inst->fps; + u32 mbs_per_sec; + + mbs_per_sec = load_per_instance(inst); + + if (inst->state != INST_START) + return 0; + + if (inst->session_type == VIDC_SESSION_TYPE_ENC) { + vpp_freq_per_mb = inst->flags & VENUS_LOW_POWER ? + inst->clk_data.low_power_freq : + inst->clk_data.vpp_freq; + + vpp_freq = mbs_per_sec * vpp_freq_per_mb; + } else { + vpp_freq = mbs_per_sec * inst->clk_data.vpp_freq; + } + + /* 21 / 20 is overhead factor */ + vpp_freq += vpp_freq / 20; + vsp_freq = mbs_per_sec * inst->clk_data.vsp_freq; + + /* 10 / 7 is overhead factor */ + if (inst->session_type == VIDC_SESSION_TYPE_ENC) + vsp_freq += (inst->controls.enc.bitrate * 10) / 7; + else + vsp_freq += ((fps * filled_len * 8) * 10) / 7; + + return max(vpp_freq, vsp_freq); +} + +static int load_scale_v4(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + const struct freq_tbl *table = core->res->freq_tbl; + unsigned int num_rows = core->res->freq_tbl_size; + struct device *dev = core->dev; + unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0; + unsigned long filled_len = 0; + int i, ret = 0; + + for (i = 0; i < inst->num_input_bufs; i++) + filled_len = max(filled_len, inst->payloads[i]); + + if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len) + return ret; + + freq = calculate_inst_freq(inst, filled_len); + inst->clk_data.freq = freq; + + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) { + if (inst->clk_data.core_id == VIDC_CORE_ID_1) { + freq_core1 += inst->clk_data.freq; + } else if (inst->clk_data.core_id == VIDC_CORE_ID_2) { + freq_core2 += inst->clk_data.freq; + } else if (inst->clk_data.core_id == VIDC_CORE_ID_3) { + freq_core1 += inst->clk_data.freq; + freq_core2 += inst->clk_data.freq; + } + } + + freq = max(freq_core1, freq_core2); + + if (freq > table[0].freq) { + dev_dbg(dev, VDBGL "requested clock rate: %lu scaling clock rate : %lu\n", + freq, table[0].freq); + + freq = table[0].freq; + goto set_freq; + } + + for (i = num_rows - 1 ; i >= 0; i--) { + if (freq <= table[i].freq) { + freq = table[i].freq; + break; + } + } + +set_freq: + + ret = core_clks_set_rate(core, freq); + if (ret) { + dev_err(dev, "failed to set clock rate %lu (%d)\n", + freq, ret); + goto exit; + } + + ret = load_scale_bw(core); + if (ret) { + dev_err(dev, "failed to set bandwidth (%d)\n", + ret); + goto exit; + } + +exit: + mutex_unlock(&core->lock); + return ret; +} + +static const struct venus_pm_ops pm_ops_v4 = { + .core_get = core_get_v4, + .core_put = core_put_v4, + .core_power = core_power_v4, + .vdec_get = vdec_get_v4, + .vdec_put = vdec_put_v4, + .vdec_power = vdec_power_v4, + .venc_get = venc_get_v4, + .venc_put = venc_put_v4, + .venc_power = venc_power_v4, + .coreid_power = coreid_power_v4, + .load_scale = load_scale_v4, +}; + +const struct venus_pm_ops *venus_pm_get(enum hfi_version version) +{ + switch (version) { + case HFI_VERSION_1XX: + default: + return &pm_ops_v1; + case HFI_VERSION_3XX: + return &pm_ops_v3; + case HFI_VERSION_4XX: + case HFI_VERSION_6XX: + return &pm_ops_v4; + } + + return NULL; +} diff --git a/drivers/media/platform/qcom/venus/pm_helpers.h b/drivers/media/platform/qcom/venus/pm_helpers.h new file mode 100644 index 0000000000..a492c50c55 --- /dev/null +++ b/drivers/media/platform/qcom/venus/pm_helpers.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2019 Linaro Ltd. */ +#ifndef __VENUS_PM_HELPERS_H__ +#define __VENUS_PM_HELPERS_H__ + +struct device; +struct venus_core; + +#define POWER_ON 1 +#define POWER_OFF 0 + +struct venus_pm_ops { + int (*core_get)(struct venus_core *core); + void (*core_put)(struct venus_core *core); + int (*core_power)(struct venus_core *core, int on); + + int (*vdec_get)(struct device *dev); + void (*vdec_put)(struct device *dev); + int (*vdec_power)(struct device *dev, int on); + + int (*venc_get)(struct device *dev); + void (*venc_put)(struct device *dev); + int (*venc_power)(struct device *dev, int on); + + int (*coreid_power)(struct venus_inst *inst, int on); + + int (*load_scale)(struct venus_inst *inst); +}; + +const struct venus_pm_ops *venus_pm_get(enum hfi_version version); + +static inline int venus_pm_load_scale(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + + if (!core->pm_ops || !core->pm_ops->load_scale) + return 0; + + return core->pm_ops->load_scale(inst); +} + +static inline int venus_pm_acquire_core(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + const struct venus_pm_ops *pm_ops = core->pm_ops; + int ret = 0; + + if (pm_ops && pm_ops->coreid_power) + ret = pm_ops->coreid_power(inst, POWER_ON); + + return ret; +} + +static inline int venus_pm_release_core(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + const struct venus_pm_ops *pm_ops = core->pm_ops; + int ret = 0; + + if (pm_ops && pm_ops->coreid_power) + ret = pm_ops->coreid_power(inst, POWER_OFF); + + return ret; +} + +#endif diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c new file mode 100644 index 0000000000..dbf305cec1 --- /dev/null +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -0,0 +1,1888 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <media/v4l2-ioctl.h> +#include <media/v4l2-event.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-mem2mem.h> +#include <media/videobuf2-dma-contig.h> + +#include "hfi_venus_io.h" +#include "hfi_parser.h" +#include "core.h" +#include "helpers.h" +#include "vdec.h" +#include "pm_helpers.h" + +/* + * Three resons to keep MPLANE formats (despite that the number of planes + * currently is one): + * - the MPLANE formats allow only one plane to be used + * - the downstream driver use MPLANE formats too + * - future firmware versions could add support for >1 planes + */ +static const struct venus_format vdec_formats[] = { + [VENUS_FMT_NV12] = { + .pixfmt = V4L2_PIX_FMT_NV12, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + }, + [VENUS_FMT_QC08C] = { + .pixfmt = V4L2_PIX_FMT_QC08C, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + }, + [VENUS_FMT_QC10C] = { + .pixfmt = V4L2_PIX_FMT_QC10C, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + }, + [VENUS_FMT_P010] = { + .pixfmt = V4L2_PIX_FMT_P010, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + }, + [VENUS_FMT_H264] = { + .pixfmt = V4L2_PIX_FMT_H264, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + .flags = V4L2_FMT_FLAG_DYN_RESOLUTION, + }, + [VENUS_FMT_VP8] = { + .pixfmt = V4L2_PIX_FMT_VP8, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + .flags = V4L2_FMT_FLAG_DYN_RESOLUTION, + }, + [VENUS_FMT_VP9] = { + .pixfmt = V4L2_PIX_FMT_VP9, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + .flags = V4L2_FMT_FLAG_DYN_RESOLUTION, + }, + [VENUS_FMT_HEVC] = { + .pixfmt = V4L2_PIX_FMT_HEVC, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + .flags = V4L2_FMT_FLAG_DYN_RESOLUTION, + }, + [VENUS_FMT_VC1_ANNEX_G] = { + .pixfmt = V4L2_PIX_FMT_VC1_ANNEX_G, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + .flags = V4L2_FMT_FLAG_DYN_RESOLUTION, + }, + [VENUS_FMT_VC1_ANNEX_L] = { + .pixfmt = V4L2_PIX_FMT_VC1_ANNEX_L, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + .flags = V4L2_FMT_FLAG_DYN_RESOLUTION, + }, + [VENUS_FMT_MPEG4] = { + .pixfmt = V4L2_PIX_FMT_MPEG4, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + .flags = V4L2_FMT_FLAG_DYN_RESOLUTION, + }, + [VENUS_FMT_MPEG2] = { + .pixfmt = V4L2_PIX_FMT_MPEG2, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + .flags = V4L2_FMT_FLAG_DYN_RESOLUTION, + }, + [VENUS_FMT_H263] = { + .pixfmt = V4L2_PIX_FMT_H263, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + .flags = V4L2_FMT_FLAG_DYN_RESOLUTION, + }, + [VENUS_FMT_XVID] = { + .pixfmt = V4L2_PIX_FMT_XVID, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + .flags = V4L2_FMT_FLAG_DYN_RESOLUTION, + }, + +}; + +static const struct venus_format * +find_format(struct venus_inst *inst, u32 pixfmt, u32 type) +{ + const struct venus_format *fmt = vdec_formats; + unsigned int size = ARRAY_SIZE(vdec_formats); + unsigned int i; + + for (i = 0; i < size; i++) { + if (fmt[i].pixfmt == pixfmt) + break; + } + + if (i == size || fmt[i].type != type) + return NULL; + + if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + !venus_helper_check_codec(inst, fmt[i].pixfmt)) + return NULL; + + if (V4L2_TYPE_IS_CAPTURE(type) && + !venus_helper_check_format(inst, fmt[i].pixfmt)) + return NULL; + + if (V4L2_TYPE_IS_CAPTURE(type) && fmt[i].pixfmt == V4L2_PIX_FMT_QC10C && + !(inst->bit_depth == VIDC_BITDEPTH_10)) + return NULL; + + return &fmt[i]; +} + +static const struct venus_format * +find_format_by_index(struct venus_inst *inst, unsigned int index, u32 type) +{ + const struct venus_format *fmt = vdec_formats; + unsigned int size = ARRAY_SIZE(vdec_formats); + unsigned int i, k = 0; + + if (index > size) + return NULL; + + for (i = 0; i < size; i++) { + bool valid; + + if (fmt[i].type != type) + continue; + + if (V4L2_TYPE_IS_OUTPUT(type)) { + valid = venus_helper_check_codec(inst, fmt[i].pixfmt); + } else if (V4L2_TYPE_IS_CAPTURE(type)) { + valid = venus_helper_check_format(inst, fmt[i].pixfmt); + + if (fmt[i].pixfmt == V4L2_PIX_FMT_QC10C && + !(inst->bit_depth == VIDC_BITDEPTH_10)) + valid = false; + } + + if (k == index && valid) + break; + if (valid) + k++; + } + + if (i == size) + return NULL; + + return &fmt[i]; +} + +static const struct venus_format * +vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) +{ + struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; + struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt; + const struct venus_format *fmt; + u32 szimage; + + memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved)); + memset(pixmp->reserved, 0, sizeof(pixmp->reserved)); + + fmt = find_format(inst, pixmp->pixelformat, f->type); + if (!fmt) { + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + pixmp->pixelformat = V4L2_PIX_FMT_NV12; + else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + pixmp->pixelformat = V4L2_PIX_FMT_H264; + else + return NULL; + fmt = find_format(inst, pixmp->pixelformat, f->type); + if (!fmt) + return NULL; + } + + pixmp->width = clamp(pixmp->width, frame_width_min(inst), + frame_width_max(inst)); + pixmp->height = clamp(pixmp->height, frame_height_min(inst), + frame_height_max(inst)); + + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + pixmp->height = ALIGN(pixmp->height, 32); + + if (pixmp->field == V4L2_FIELD_ANY) + pixmp->field = V4L2_FIELD_NONE; + pixmp->num_planes = fmt->num_planes; + pixmp->flags = 0; + + szimage = venus_helper_get_framesz(pixmp->pixelformat, pixmp->width, + pixmp->height); + + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + unsigned int stride = pixmp->width; + + if (pixmp->pixelformat == V4L2_PIX_FMT_P010) + stride *= 2; + + pfmt[0].sizeimage = szimage; + pfmt[0].bytesperline = ALIGN(stride, 128); + } else { + pfmt[0].sizeimage = clamp_t(u32, pfmt[0].sizeimage, 0, SZ_8M); + pfmt[0].sizeimage = max(pfmt[0].sizeimage, szimage); + pfmt[0].bytesperline = 0; + } + + return fmt; +} + +static int vdec_try_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct venus_inst *inst = to_inst(file); + + vdec_try_fmt_common(inst, f); + + return 0; +} + +static int vdec_check_src_change(struct venus_inst *inst) +{ + int ret; + + if (inst->subscriptions & V4L2_EVENT_SOURCE_CHANGE && + inst->codec_state == VENUS_DEC_STATE_INIT && + !inst->reconfig) + return -EINVAL; + + if (inst->subscriptions & V4L2_EVENT_SOURCE_CHANGE) + return 0; + + /* + * The code snippet below is a workaround for backward compatibility + * with applications which doesn't support V4L2 events. It will be + * dropped in future once those applications are fixed. + */ + + if (inst->codec_state != VENUS_DEC_STATE_INIT) + goto done; + + ret = wait_event_timeout(inst->reconf_wait, inst->reconfig, + msecs_to_jiffies(100)); + if (!ret) + return -EINVAL; + + if (!(inst->codec_state == VENUS_DEC_STATE_CAPTURE_SETUP) || + !inst->reconfig) + dev_dbg(inst->core->dev, VDBGH "wrong state\n"); + +done: + return 0; +} + +static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct venus_inst *inst = to_inst(file); + const struct venus_format *fmt = NULL; + struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; + int ret; + + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + fmt = inst->fmt_cap; + else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + fmt = inst->fmt_out; + + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + ret = vdec_check_src_change(inst); + if (ret) + return ret; + } + + pixmp->pixelformat = fmt->pixfmt; + + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + pixmp->width = inst->width; + pixmp->height = inst->height; + pixmp->colorspace = inst->colorspace; + pixmp->ycbcr_enc = inst->ycbcr_enc; + pixmp->quantization = inst->quantization; + pixmp->xfer_func = inst->xfer_func; + } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + pixmp->width = inst->out_width; + pixmp->height = inst->out_height; + } + + vdec_try_fmt_common(inst, f); + + return 0; +} + +static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct venus_inst *inst = to_inst(file); + struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; + struct v4l2_pix_format_mplane orig_pixmp; + const struct venus_format *fmt; + struct v4l2_format format; + u32 pixfmt_out = 0, pixfmt_cap = 0; + struct vb2_queue *q; + + q = v4l2_m2m_get_vq(inst->m2m_ctx, f->type); + if (!q) + return -EINVAL; + + if (vb2_is_busy(q)) + return -EBUSY; + + orig_pixmp = *pixmp; + + fmt = vdec_try_fmt_common(inst, f); + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + pixfmt_out = pixmp->pixelformat; + pixfmt_cap = inst->fmt_cap->pixfmt; + } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + pixfmt_cap = pixmp->pixelformat; + pixfmt_out = inst->fmt_out->pixfmt; + } + + memset(&format, 0, sizeof(format)); + + format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + format.fmt.pix_mp.pixelformat = pixfmt_out; + format.fmt.pix_mp.width = orig_pixmp.width; + format.fmt.pix_mp.height = orig_pixmp.height; + vdec_try_fmt_common(inst, &format); + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + inst->out_width = format.fmt.pix_mp.width; + inst->out_height = format.fmt.pix_mp.height; + inst->colorspace = pixmp->colorspace; + inst->ycbcr_enc = pixmp->ycbcr_enc; + inst->quantization = pixmp->quantization; + inst->xfer_func = pixmp->xfer_func; + inst->input_buf_size = pixmp->plane_fmt[0].sizeimage; + } + + memset(&format, 0, sizeof(format)); + + format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + format.fmt.pix_mp.pixelformat = pixfmt_cap; + format.fmt.pix_mp.width = orig_pixmp.width; + format.fmt.pix_mp.height = orig_pixmp.height; + vdec_try_fmt_common(inst, &format); + + inst->width = format.fmt.pix_mp.width; + inst->height = format.fmt.pix_mp.height; + inst->crop.top = 0; + inst->crop.left = 0; + inst->crop.width = inst->width; + inst->crop.height = inst->height; + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + inst->fmt_out = fmt; + else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + inst->fmt_cap = fmt; + inst->output2_buf_size = + venus_helper_get_framesz(pixfmt_cap, orig_pixmp.width, orig_pixmp.height); + } + + return 0; +} + +static int +vdec_g_selection(struct file *file, void *fh, struct v4l2_selection *s) +{ + struct venus_inst *inst = to_inst(file); + + if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) + return -EINVAL; + + s->r.top = 0; + s->r.left = 0; + + switch (s->target) { + case V4L2_SEL_TGT_CROP_BOUNDS: + case V4L2_SEL_TGT_CROP_DEFAULT: + case V4L2_SEL_TGT_CROP: + if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) + return -EINVAL; + s->r.width = inst->out_width; + s->r.height = inst->out_height; + break; + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + case V4L2_SEL_TGT_COMPOSE_PADDED: + if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + s->r.width = inst->width; + s->r.height = inst->height; + break; + case V4L2_SEL_TGT_COMPOSE_DEFAULT: + case V4L2_SEL_TGT_COMPOSE: + if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + s->r = inst->crop; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +vdec_querycap(struct file *file, void *fh, struct v4l2_capability *cap) +{ + strscpy(cap->driver, "qcom-venus", sizeof(cap->driver)); + strscpy(cap->card, "Qualcomm Venus video decoder", sizeof(cap->card)); + strscpy(cap->bus_info, "platform:qcom-venus", sizeof(cap->bus_info)); + + return 0; +} + +static int vdec_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f) +{ + struct venus_inst *inst = to_inst(file); + const struct venus_format *fmt; + + memset(f->reserved, 0, sizeof(f->reserved)); + + fmt = find_format_by_index(inst, f->index, f->type); + if (!fmt) + return -EINVAL; + + f->pixelformat = fmt->pixfmt; + f->flags = fmt->flags; + + return 0; +} + +static int vdec_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) +{ + struct venus_inst *inst = to_inst(file); + struct v4l2_captureparm *cap = &a->parm.capture; + struct v4l2_fract *timeperframe = &cap->timeperframe; + u64 us_per_frame, fps; + + if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + return -EINVAL; + + memset(cap->reserved, 0, sizeof(cap->reserved)); + if (!timeperframe->denominator) + timeperframe->denominator = inst->timeperframe.denominator; + if (!timeperframe->numerator) + timeperframe->numerator = inst->timeperframe.numerator; + cap->readbuffers = 0; + cap->extendedmode = 0; + cap->capability = V4L2_CAP_TIMEPERFRAME; + us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC; + do_div(us_per_frame, timeperframe->denominator); + + if (!us_per_frame) + return -EINVAL; + + fps = (u64)USEC_PER_SEC; + do_div(fps, us_per_frame); + + inst->fps = fps; + inst->timeperframe = *timeperframe; + + return 0; +} + +static int vdec_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + struct venus_inst *inst = to_inst(file); + const struct venus_format *fmt; + + fmt = find_format(inst, fsize->pixel_format, + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); + if (!fmt) { + fmt = find_format(inst, fsize->pixel_format, + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + if (!fmt) + return -EINVAL; + } + + if (fsize->index) + return -EINVAL; + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + + fsize->stepwise.min_width = frame_width_min(inst); + fsize->stepwise.max_width = frame_width_max(inst); + fsize->stepwise.step_width = frame_width_step(inst); + fsize->stepwise.min_height = frame_height_min(inst); + fsize->stepwise.max_height = frame_height_max(inst); + fsize->stepwise.step_height = frame_height_step(inst); + + return 0; +} + +static int vdec_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + struct venus_inst *inst = container_of(fh, struct venus_inst, fh); + int ret; + + switch (sub->type) { + case V4L2_EVENT_EOS: + return v4l2_event_subscribe(fh, sub, 2, NULL); + case V4L2_EVENT_SOURCE_CHANGE: + ret = v4l2_src_change_event_subscribe(fh, sub); + if (ret) + return ret; + inst->subscriptions |= V4L2_EVENT_SOURCE_CHANGE; + return 0; + case V4L2_EVENT_CTRL: + return v4l2_ctrl_subscribe_event(fh, sub); + default: + return -EINVAL; + } +} + +static int +vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd) +{ + struct venus_inst *inst = to_inst(file); + struct vb2_queue *dst_vq; + struct hfi_frame_data fdata = {0}; + int ret; + + ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, cmd); + if (ret) + return ret; + + mutex_lock(&inst->lock); + + if (cmd->cmd == V4L2_DEC_CMD_STOP) { + /* + * Implement V4L2_DEC_CMD_STOP by enqueue an empty buffer on + * decoder input to signal EOS. + */ + if (!(inst->streamon_out && inst->streamon_cap)) + goto unlock; + + fdata.buffer_type = HFI_BUFFER_INPUT; + fdata.flags |= HFI_BUFFERFLAG_EOS; + if (IS_V6(inst->core) && is_fw_rev_or_older(inst->core, 1, 0, 87)) + fdata.device_addr = 0; + else + fdata.device_addr = 0xdeadb000; + + ret = hfi_session_process_buf(inst, &fdata); + + if (!ret && inst->codec_state == VENUS_DEC_STATE_DECODING) { + inst->codec_state = VENUS_DEC_STATE_DRAIN; + inst->drain_active = true; + } + } else if (cmd->cmd == V4L2_DEC_CMD_START && + inst->codec_state == VENUS_DEC_STATE_STOPPED) { + dst_vq = v4l2_m2m_get_vq(inst->fh.m2m_ctx, + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); + vb2_clear_last_buffer_dequeued(dst_vq); + + inst->codec_state = VENUS_DEC_STATE_DECODING; + } + +unlock: + mutex_unlock(&inst->lock); + return ret; +} + +static const struct v4l2_ioctl_ops vdec_ioctl_ops = { + .vidioc_querycap = vdec_querycap, + .vidioc_enum_fmt_vid_cap = vdec_enum_fmt, + .vidioc_enum_fmt_vid_out = vdec_enum_fmt, + .vidioc_s_fmt_vid_cap_mplane = vdec_s_fmt, + .vidioc_s_fmt_vid_out_mplane = vdec_s_fmt, + .vidioc_g_fmt_vid_cap_mplane = vdec_g_fmt, + .vidioc_g_fmt_vid_out_mplane = vdec_g_fmt, + .vidioc_try_fmt_vid_cap_mplane = vdec_try_fmt, + .vidioc_try_fmt_vid_out_mplane = vdec_try_fmt, + .vidioc_g_selection = vdec_g_selection, + .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, + .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, + .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, + .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, + .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, + .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, + .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, + .vidioc_streamon = v4l2_m2m_ioctl_streamon, + .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, + .vidioc_s_parm = vdec_s_parm, + .vidioc_enum_framesizes = vdec_enum_framesizes, + .vidioc_subscribe_event = vdec_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, + .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd, + .vidioc_decoder_cmd = vdec_decoder_cmd, +}; + +static int vdec_pm_get(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + struct device *dev = core->dev_dec; + int ret; + + mutex_lock(&core->pm_lock); + ret = pm_runtime_resume_and_get(dev); + mutex_unlock(&core->pm_lock); + + return ret; +} + +static int vdec_pm_put(struct venus_inst *inst, bool autosuspend) +{ + struct venus_core *core = inst->core; + struct device *dev = core->dev_dec; + int ret; + + mutex_lock(&core->pm_lock); + + if (autosuspend) + ret = pm_runtime_put_autosuspend(dev); + else + ret = pm_runtime_put_sync(dev); + + mutex_unlock(&core->pm_lock); + + return ret < 0 ? ret : 0; +} + +static int vdec_pm_get_put(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + struct device *dev = core->dev_dec; + int ret = 0; + + mutex_lock(&core->pm_lock); + + if (pm_runtime_suspended(dev)) { + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + goto error; + + ret = pm_runtime_put_autosuspend(dev); + } + +error: + mutex_unlock(&core->pm_lock); + + return ret < 0 ? ret : 0; +} + +static void vdec_pm_touch(struct venus_inst *inst) +{ + pm_runtime_mark_last_busy(inst->core->dev_dec); +} + +static int vdec_set_properties(struct venus_inst *inst) +{ + struct vdec_controls *ctr = &inst->controls.dec; + struct hfi_enable en = { .enable = 1 }; + u32 ptype, decode_order, conceal; + int ret; + + if (ctr->post_loop_deb_mode) { + ptype = HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER; + ret = hfi_session_set_property(inst, ptype, &en); + if (ret) + return ret; + } + + if (ctr->display_delay_enable && ctr->display_delay == 0) { + ptype = HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER; + decode_order = HFI_OUTPUT_ORDER_DECODE; + ret = hfi_session_set_property(inst, ptype, &decode_order); + if (ret) + return ret; + } + + /* Enabling sufficient sequence change support for VP9 */ + if (is_fw_rev_or_newer(inst->core, 5, 4, 51)) { + ptype = HFI_PROPERTY_PARAM_VDEC_ENABLE_SUFFICIENT_SEQCHANGE_EVENT; + ret = hfi_session_set_property(inst, ptype, &en); + if (ret) + return ret; + } + + ptype = HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR; + conceal = ctr->conceal_color & 0xffff; + conceal |= ((ctr->conceal_color >> 16) & 0xffff) << 10; + conceal |= ((ctr->conceal_color >> 32) & 0xffff) << 20; + + ret = hfi_session_set_property(inst, ptype, &conceal); + if (ret) + return ret; + + return 0; +} + +static int vdec_set_work_route(struct venus_inst *inst) +{ + u32 ptype = HFI_PROPERTY_PARAM_WORK_ROUTE; + struct hfi_video_work_route wr; + + if (!(IS_IRIS2(inst->core) || IS_IRIS2_1(inst->core))) + return 0; + + wr.video_work_route = inst->core->res->num_vpp_pipes; + + return hfi_session_set_property(inst, ptype, &wr); +} + +#define is_ubwc_fmt(fmt) (!!((fmt) & HFI_COLOR_FORMAT_UBWC_BASE)) +#define is_10bit_ubwc_fmt(fmt) (!!((fmt) & HFI_COLOR_FORMAT_10_BIT_BASE & \ + HFI_COLOR_FORMAT_UBWC_BASE)) + + +static int vdec_output_conf(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + struct hfi_enable en = { .enable = 1 }; + struct hfi_buffer_requirements bufreq; + u32 width = inst->width; + u32 height = inst->height; + u32 out_fmt, out2_fmt; + bool ubwc = false; + u32 ptype; + int ret; + + ret = venus_helper_set_work_mode(inst); + if (ret) + return ret; + + if (core->res->hfi_version == HFI_VERSION_1XX) { + ptype = HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER; + ret = hfi_session_set_property(inst, ptype, &en); + if (ret) + return ret; + } + + /* Force searching UBWC formats for bigger then HD resolutions */ + if (width > 1920 && height > ALIGN(1080, 32)) + ubwc = true; + + /* For Venus v4/v6 UBWC format is mandatory */ + if (IS_V4(core) || IS_V6(core)) + ubwc = true; + + ret = venus_helper_get_out_fmts(inst, inst->fmt_cap->pixfmt, &out_fmt, + &out2_fmt, ubwc); + if (ret) + return ret; + + inst->output_buf_size = + venus_helper_get_framesz_raw(out_fmt, width, height); + inst->output2_buf_size = + venus_helper_get_framesz_raw(out2_fmt, width, height); + + if (is_ubwc_fmt(out_fmt)) { + inst->opb_buftype = HFI_BUFFER_OUTPUT2; + inst->opb_fmt = out2_fmt; + inst->dpb_buftype = HFI_BUFFER_OUTPUT; + inst->dpb_fmt = out_fmt; + } else if (is_ubwc_fmt(out2_fmt) || is_10bit_ubwc_fmt(out_fmt)) { + inst->opb_buftype = HFI_BUFFER_OUTPUT; + inst->opb_fmt = out_fmt; + inst->dpb_buftype = HFI_BUFFER_OUTPUT2; + inst->dpb_fmt = out2_fmt; + } else { + inst->opb_buftype = HFI_BUFFER_OUTPUT; + inst->opb_fmt = out_fmt; + inst->dpb_buftype = 0; + inst->dpb_fmt = 0; + } + + ret = venus_helper_set_raw_format(inst, inst->opb_fmt, + inst->opb_buftype); + if (ret) + return ret; + + ret = venus_helper_set_format_constraints(inst); + if (ret) + return ret; + + if (inst->dpb_fmt) { + ret = venus_helper_set_multistream(inst, false, true); + if (ret) + return ret; + + ret = venus_helper_set_raw_format(inst, inst->dpb_fmt, + inst->dpb_buftype); + if (ret) + return ret; + + ret = venus_helper_set_output_resolution(inst, width, height, + HFI_BUFFER_OUTPUT2); + if (ret) + return ret; + } + + if (IS_V3(core) || IS_V4(core) || IS_V6(core)) { + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq); + if (ret) + return ret; + + if (bufreq.size > inst->output_buf_size) + return -EINVAL; + + if (inst->dpb_fmt) { + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT2, + &bufreq); + if (ret) + return ret; + + if (bufreq.size > inst->output2_buf_size) + return -EINVAL; + } + + if (inst->output2_buf_size) { + ret = venus_helper_set_bufsize(inst, + inst->output2_buf_size, + HFI_BUFFER_OUTPUT2); + if (ret) + return ret; + } + + if (inst->output_buf_size) { + ret = venus_helper_set_bufsize(inst, + inst->output_buf_size, + HFI_BUFFER_OUTPUT); + if (ret) + return ret; + } + } + + ret = venus_helper_set_dyn_bufmode(inst); + if (ret) + return ret; + + return 0; +} + +static int vdec_session_init(struct venus_inst *inst) +{ + int ret; + + ret = venus_helper_session_init(inst); + if (ret == -EALREADY) + return 0; + else if (ret) + return ret; + + ret = venus_helper_set_input_resolution(inst, frame_width_min(inst), + frame_height_min(inst)); + if (ret) + goto deinit; + + return 0; +deinit: + hfi_session_deinit(inst); + return ret; +} + +static int vdec_num_buffers(struct venus_inst *inst, unsigned int *in_num, + unsigned int *out_num) +{ + enum hfi_version ver = inst->core->res->hfi_version; + struct hfi_buffer_requirements bufreq; + int ret; + + *in_num = *out_num = 0; + + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq); + if (ret) + return ret; + + *in_num = hfi_bufreq_get_count_min(&bufreq, ver); + + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq); + if (ret) + return ret; + + *out_num = hfi_bufreq_get_count_min(&bufreq, ver); + + return 0; +} + +static int vdec_queue_setup(struct vb2_queue *q, + unsigned int *num_buffers, unsigned int *num_planes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct venus_inst *inst = vb2_get_drv_priv(q); + struct venus_core *core = inst->core; + unsigned int in_num, out_num; + int ret = 0; + + if (*num_planes) { + unsigned int output_buf_size = venus_helper_get_opb_size(inst); + + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + *num_planes != inst->fmt_out->num_planes) + return -EINVAL; + + if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + *num_planes != inst->fmt_cap->num_planes) + return -EINVAL; + + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + sizes[0] < inst->input_buf_size) + return -EINVAL; + + if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + sizes[0] < output_buf_size) + return -EINVAL; + + return 0; + } + + if (test_bit(0, &core->sys_error)) { + if (inst->nonblock) + return -EAGAIN; + + ret = wait_event_interruptible(core->sys_err_done, + !test_bit(0, &core->sys_error)); + if (ret) + return ret; + } + + ret = vdec_pm_get(inst); + if (ret) + return ret; + + ret = vdec_session_init(inst); + if (ret) + goto put_power; + + ret = vdec_num_buffers(inst, &in_num, &out_num); + if (ret) + goto put_power; + + ret = vdec_pm_put(inst, false); + if (ret) + return ret; + + switch (q->type) { + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + *num_planes = inst->fmt_out->num_planes; + sizes[0] = venus_helper_get_framesz(inst->fmt_out->pixfmt, + inst->out_width, + inst->out_height); + sizes[0] = max(sizes[0], inst->input_buf_size); + inst->input_buf_size = sizes[0]; + *num_buffers = max(*num_buffers, in_num); + inst->num_input_bufs = *num_buffers; + inst->num_output_bufs = out_num; + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + *num_planes = inst->fmt_cap->num_planes; + sizes[0] = venus_helper_get_framesz(inst->fmt_cap->pixfmt, + inst->width, + inst->height); + inst->output_buf_size = sizes[0]; + *num_buffers = max(*num_buffers, out_num); + inst->num_output_bufs = *num_buffers; + + mutex_lock(&inst->lock); + if (inst->codec_state == VENUS_DEC_STATE_CAPTURE_SETUP) + inst->codec_state = VENUS_DEC_STATE_STOPPED; + mutex_unlock(&inst->lock); + break; + default: + ret = -EINVAL; + break; + } + + return ret; + +put_power: + vdec_pm_put(inst, false); + return ret; +} + +static int vdec_verify_conf(struct venus_inst *inst) +{ + enum hfi_version ver = inst->core->res->hfi_version; + struct hfi_buffer_requirements bufreq; + int ret; + + if (!inst->num_input_bufs || !inst->num_output_bufs) + return -EINVAL; + + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq); + if (ret) + return ret; + + if (inst->num_output_bufs < bufreq.count_actual || + inst->num_output_bufs < hfi_bufreq_get_count_min(&bufreq, ver)) + return -EINVAL; + + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq); + if (ret) + return ret; + + if (inst->num_input_bufs < hfi_bufreq_get_count_min(&bufreq, ver)) + return -EINVAL; + + return 0; +} + +static int vdec_start_capture(struct venus_inst *inst) +{ + int ret; + + if (!inst->streamon_out) + return 0; + + if (inst->codec_state == VENUS_DEC_STATE_DECODING) { + if (inst->reconfig) + goto reconfigure; + + venus_helper_queue_dpb_bufs(inst); + venus_helper_process_initial_cap_bufs(inst); + inst->streamon_cap = 1; + return 0; + } + + if (inst->codec_state != VENUS_DEC_STATE_STOPPED) + return 0; + +reconfigure: + ret = vdec_output_conf(inst); + if (ret) + return ret; + + ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs, + VB2_MAX_FRAME, VB2_MAX_FRAME); + if (ret) + return ret; + + ret = venus_helper_intbufs_realloc(inst); + if (ret) + goto err; + + venus_pm_load_scale(inst); + + inst->next_buf_last = false; + + ret = venus_helper_alloc_dpb_bufs(inst); + if (ret) + goto err; + + ret = hfi_session_continue(inst); + if (ret) + goto free_dpb_bufs; + + ret = venus_helper_queue_dpb_bufs(inst); + if (ret) + goto free_dpb_bufs; + + ret = venus_helper_process_initial_cap_bufs(inst); + if (ret) + goto free_dpb_bufs; + + inst->codec_state = VENUS_DEC_STATE_DECODING; + + if (inst->drain_active) + inst->codec_state = VENUS_DEC_STATE_DRAIN; + + inst->streamon_cap = 1; + inst->sequence_cap = 0; + inst->reconfig = false; + inst->drain_active = false; + + return 0; + +free_dpb_bufs: + venus_helper_free_dpb_bufs(inst); +err: + return ret; +} + +static int vdec_start_output(struct venus_inst *inst) +{ + int ret; + + if (inst->codec_state == VENUS_DEC_STATE_SEEK) { + ret = venus_helper_process_initial_out_bufs(inst); + if (inst->next_buf_last) + inst->codec_state = VENUS_DEC_STATE_DRC; + else + inst->codec_state = VENUS_DEC_STATE_DECODING; + goto done; + } + + if (inst->codec_state == VENUS_DEC_STATE_INIT || + inst->codec_state == VENUS_DEC_STATE_CAPTURE_SETUP) { + ret = venus_helper_process_initial_out_bufs(inst); + goto done; + } + + if (inst->codec_state != VENUS_DEC_STATE_DEINIT) + return -EINVAL; + + venus_helper_init_instance(inst); + inst->sequence_out = 0; + inst->reconfig = false; + inst->next_buf_last = false; + + ret = vdec_set_properties(inst); + if (ret) + return ret; + + ret = vdec_set_work_route(inst); + if (ret) + return ret; + + ret = vdec_output_conf(inst); + if (ret) + return ret; + + ret = vdec_verify_conf(inst); + if (ret) + return ret; + + ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs, + VB2_MAX_FRAME, VB2_MAX_FRAME); + if (ret) + return ret; + + ret = venus_helper_vb2_start_streaming(inst); + if (ret) + return ret; + + ret = venus_helper_process_initial_out_bufs(inst); + if (ret) + return ret; + + inst->codec_state = VENUS_DEC_STATE_INIT; + +done: + inst->streamon_out = 1; + return ret; +} + +static int vdec_start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct venus_inst *inst = vb2_get_drv_priv(q); + int ret; + + mutex_lock(&inst->lock); + + if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + ret = vdec_start_capture(inst); + } else { + ret = vdec_pm_get(inst); + if (ret) + goto error; + + ret = venus_pm_acquire_core(inst); + if (ret) + goto put_power; + + ret = vdec_pm_put(inst, true); + if (ret) + goto error; + + ret = vdec_start_output(inst); + } + + if (ret) + goto error; + + mutex_unlock(&inst->lock); + return 0; + +put_power: + vdec_pm_put(inst, false); +error: + venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_QUEUED); + mutex_unlock(&inst->lock); + return ret; +} + +static void vdec_cancel_dst_buffers(struct venus_inst *inst) +{ + struct vb2_v4l2_buffer *buf; + + while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx))) + v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR); +} + +static int vdec_stop_capture(struct venus_inst *inst) +{ + int ret = 0; + + switch (inst->codec_state) { + case VENUS_DEC_STATE_DECODING: + ret = hfi_session_flush(inst, HFI_FLUSH_ALL, true); + fallthrough; + case VENUS_DEC_STATE_DRAIN: + inst->codec_state = VENUS_DEC_STATE_STOPPED; + inst->drain_active = false; + fallthrough; + case VENUS_DEC_STATE_SEEK: + vdec_cancel_dst_buffers(inst); + break; + case VENUS_DEC_STATE_DRC: + ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT, true); + inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP; + venus_helper_free_dpb_bufs(inst); + break; + default: + break; + } + + return ret; +} + +static int vdec_stop_output(struct venus_inst *inst) +{ + int ret = 0; + + switch (inst->codec_state) { + case VENUS_DEC_STATE_DECODING: + case VENUS_DEC_STATE_DRAIN: + case VENUS_DEC_STATE_STOPPED: + case VENUS_DEC_STATE_DRC: + ret = hfi_session_flush(inst, HFI_FLUSH_ALL, true); + inst->codec_state = VENUS_DEC_STATE_SEEK; + break; + case VENUS_DEC_STATE_INIT: + case VENUS_DEC_STATE_CAPTURE_SETUP: + ret = hfi_session_flush(inst, HFI_FLUSH_INPUT, true); + break; + default: + break; + } + + return ret; +} + +static void vdec_stop_streaming(struct vb2_queue *q) +{ + struct venus_inst *inst = vb2_get_drv_priv(q); + int ret = -EINVAL; + + vdec_pm_get_put(inst); + + mutex_lock(&inst->lock); + + if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + ret = vdec_stop_capture(inst); + else + ret = vdec_stop_output(inst); + + venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_ERROR); + + inst->session_error = 0; + + if (ret) + goto unlock; + + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + inst->streamon_out = 0; + else + inst->streamon_cap = 0; + +unlock: + mutex_unlock(&inst->lock); +} + +static void vdec_session_release(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + int ret, abort = 0; + + vdec_pm_get(inst); + + mutex_lock(&inst->lock); + inst->codec_state = VENUS_DEC_STATE_DEINIT; + + ret = hfi_session_stop(inst); + abort = (ret && ret != -EINVAL) ? 1 : 0; + ret = hfi_session_unload_res(inst); + abort = (ret && ret != -EINVAL) ? 1 : 0; + ret = venus_helper_unregister_bufs(inst); + abort = (ret && ret != -EINVAL) ? 1 : 0; + ret = venus_helper_intbufs_free(inst); + abort = (ret && ret != -EINVAL) ? 1 : 0; + ret = hfi_session_deinit(inst); + abort = (ret && ret != -EINVAL) ? 1 : 0; + + if (inst->session_error || test_bit(0, &core->sys_error)) + abort = 1; + + if (abort) + hfi_session_abort(inst); + + venus_helper_free_dpb_bufs(inst); + venus_pm_load_scale(inst); + INIT_LIST_HEAD(&inst->registeredbufs); + mutex_unlock(&inst->lock); + + venus_pm_release_core(inst); + vdec_pm_put(inst, false); +} + +static int vdec_buf_init(struct vb2_buffer *vb) +{ + struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); + + inst->buf_count++; + + return venus_helper_vb2_buf_init(vb); +} + +static void vdec_buf_cleanup(struct vb2_buffer *vb) +{ + struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct venus_buffer *buf = to_venus_buffer(vbuf); + + mutex_lock(&inst->lock); + if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + if (!list_empty(&inst->registeredbufs)) + list_del_init(&buf->reg_list); + mutex_unlock(&inst->lock); + + inst->buf_count--; + if (!inst->buf_count) + vdec_session_release(inst); +} + +static void vdec_vb2_buf_queue(struct vb2_buffer *vb) +{ + struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + static const struct v4l2_event eos = { .type = V4L2_EVENT_EOS }; + + vdec_pm_get_put(inst); + + mutex_lock(&inst->lock); + + if (inst->next_buf_last && V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type) && + inst->codec_state == VENUS_DEC_STATE_DRC) { + vbuf->flags |= V4L2_BUF_FLAG_LAST; + vbuf->sequence = inst->sequence_cap++; + vbuf->field = V4L2_FIELD_NONE; + vb2_set_plane_payload(vb, 0, 0); + v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE); + v4l2_event_queue_fh(&inst->fh, &eos); + inst->next_buf_last = false; + mutex_unlock(&inst->lock); + return; + } + + venus_helper_vb2_buf_queue(vb); + mutex_unlock(&inst->lock); +} + +static const struct vb2_ops vdec_vb2_ops = { + .queue_setup = vdec_queue_setup, + .buf_init = vdec_buf_init, + .buf_cleanup = vdec_buf_cleanup, + .buf_prepare = venus_helper_vb2_buf_prepare, + .start_streaming = vdec_start_streaming, + .stop_streaming = vdec_stop_streaming, + .buf_queue = vdec_vb2_buf_queue, +}; + +static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type, + u32 tag, u32 bytesused, u32 data_offset, u32 flags, + u32 hfi_flags, u64 timestamp_us) +{ + enum vb2_buffer_state state = VB2_BUF_STATE_DONE; + struct vb2_v4l2_buffer *vbuf; + struct vb2_buffer *vb; + unsigned int type; + + vdec_pm_touch(inst); + + if (buf_type == HFI_BUFFER_INPUT) + type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + else + type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + + vbuf = venus_helper_find_buf(inst, type, tag); + if (!vbuf) { + venus_helper_change_dpb_owner(inst, vbuf, type, buf_type, tag); + return; + } + + vbuf->flags = flags; + vbuf->field = V4L2_FIELD_NONE; + vb = &vbuf->vb2_buf; + + if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + vb2_set_plane_payload(vb, 0, bytesused); + vb->planes[0].data_offset = data_offset; + vb->timestamp = timestamp_us * NSEC_PER_USEC; + vbuf->sequence = inst->sequence_cap++; + + if (vbuf->flags & V4L2_BUF_FLAG_LAST) { + const struct v4l2_event ev = { .type = V4L2_EVENT_EOS }; + + v4l2_event_queue_fh(&inst->fh, &ev); + + if (inst->codec_state == VENUS_DEC_STATE_DRAIN) { + inst->drain_active = false; + inst->codec_state = VENUS_DEC_STATE_STOPPED; + } + } + + if (!bytesused) + state = VB2_BUF_STATE_ERROR; + } else { + vbuf->sequence = inst->sequence_out++; + } + + venus_helper_get_ts_metadata(inst, timestamp_us, vbuf); + + if (hfi_flags & HFI_BUFFERFLAG_READONLY) + venus_helper_acquire_buf_ref(vbuf); + + if (hfi_flags & HFI_BUFFERFLAG_DATACORRUPT) + state = VB2_BUF_STATE_ERROR; + + if (hfi_flags & HFI_BUFFERFLAG_DROP_FRAME) { + state = VB2_BUF_STATE_ERROR; + vb2_set_plane_payload(vb, 0, 0); + vb->timestamp = 0; + } + + v4l2_m2m_buf_done(vbuf, state); +} + +static void vdec_event_change(struct venus_inst *inst, + struct hfi_event_data *ev_data, bool sufficient) +{ + static const struct v4l2_event ev = { + .type = V4L2_EVENT_SOURCE_CHANGE, + .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION }; + struct device *dev = inst->core->dev_dec; + struct v4l2_format format = {}; + + mutex_lock(&inst->lock); + + format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + format.fmt.pix_mp.pixelformat = inst->fmt_cap->pixfmt; + format.fmt.pix_mp.width = ev_data->width; + format.fmt.pix_mp.height = ev_data->height; + + vdec_try_fmt_common(inst, &format); + + inst->width = format.fmt.pix_mp.width; + inst->height = format.fmt.pix_mp.height; + /* + * Some versions of the firmware do not report crop information for + * all codecs. For these cases, set the crop to the coded resolution. + */ + if (ev_data->input_crop.width > 0 && ev_data->input_crop.height > 0) { + inst->crop.left = ev_data->input_crop.left; + inst->crop.top = ev_data->input_crop.top; + inst->crop.width = ev_data->input_crop.width; + inst->crop.height = ev_data->input_crop.height; + } else { + inst->crop.left = 0; + inst->crop.top = 0; + inst->crop.width = ev_data->width; + inst->crop.height = ev_data->height; + } + + inst->fw_min_cnt = ev_data->buf_count; + /* overwriting this to 11 for vp9 due to fw bug */ + if (inst->hfi_codec == HFI_VIDEO_CODEC_VP9) + inst->fw_min_cnt = 11; + + inst->out_width = ev_data->width; + inst->out_height = ev_data->height; + + if (inst->bit_depth != ev_data->bit_depth) { + inst->bit_depth = ev_data->bit_depth; + if (inst->bit_depth == VIDC_BITDEPTH_10) + inst->fmt_cap = &vdec_formats[VENUS_FMT_P010]; + else + inst->fmt_cap = &vdec_formats[VENUS_FMT_NV12]; + } + + if (inst->pic_struct != ev_data->pic_struct) + inst->pic_struct = ev_data->pic_struct; + + dev_dbg(dev, VDBGM "event %s sufficient resources (%ux%u)\n", + sufficient ? "" : "not", ev_data->width, ev_data->height); + + switch (inst->codec_state) { + case VENUS_DEC_STATE_INIT: + inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP; + break; + case VENUS_DEC_STATE_DECODING: + case VENUS_DEC_STATE_DRAIN: + inst->codec_state = VENUS_DEC_STATE_DRC; + break; + default: + break; + } + + /* + * The assumption is that the firmware have to return the last buffer + * before this event is received in the v4l2 driver. Also the firmware + * itself doesn't mark the last decoder output buffer with HFI EOS flag. + */ + + if (inst->codec_state == VENUS_DEC_STATE_DRC) { + int ret; + + inst->next_buf_last = true; + + ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT, false); + if (ret) + dev_dbg(dev, VDBGH "flush output error %d\n", ret); + } + + inst->next_buf_last = true; + inst->reconfig = true; + v4l2_event_queue_fh(&inst->fh, &ev); + wake_up(&inst->reconf_wait); + + mutex_unlock(&inst->lock); +} + +static void vdec_event_notify(struct venus_inst *inst, u32 event, + struct hfi_event_data *data) +{ + struct venus_core *core = inst->core; + struct device *dev = core->dev_dec; + + vdec_pm_touch(inst); + + switch (event) { + case EVT_SESSION_ERROR: + inst->session_error = true; + venus_helper_vb2_queue_error(inst); + dev_err(dev, "dec: event session error %x\n", inst->error); + break; + case EVT_SYS_EVENT_CHANGE: + switch (data->event_type) { + case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES: + vdec_event_change(inst, data, true); + break; + case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES: + vdec_event_change(inst, data, false); + break; + case HFI_EVENT_RELEASE_BUFFER_REFERENCE: + venus_helper_release_buf_ref(inst, data->tag); + break; + default: + break; + } + break; + default: + break; + } +} + +static void vdec_flush_done(struct venus_inst *inst) +{ + dev_dbg(inst->core->dev_dec, VDBGH "flush done\n"); +} + +static const struct hfi_inst_ops vdec_hfi_ops = { + .buf_done = vdec_buf_done, + .event_notify = vdec_event_notify, + .flush_done = vdec_flush_done, +}; + +static void vdec_inst_init(struct venus_inst *inst) +{ + inst->hfi_codec = HFI_VIDEO_CODEC_H264; + inst->fmt_out = &vdec_formats[VENUS_FMT_H264]; + inst->fmt_cap = &vdec_formats[VENUS_FMT_NV12]; + inst->width = frame_width_min(inst); + inst->height = ALIGN(frame_height_min(inst), 32); + inst->crop.left = 0; + inst->crop.top = 0; + inst->crop.width = inst->width; + inst->crop.height = inst->height; + inst->fw_min_cnt = 8; + inst->out_width = frame_width_min(inst); + inst->out_height = frame_height_min(inst); + inst->fps = 30; + inst->timeperframe.numerator = 1; + inst->timeperframe.denominator = 30; + inst->opb_buftype = HFI_BUFFER_OUTPUT; +} + +static void vdec_m2m_device_run(void *priv) +{ +} + +static const struct v4l2_m2m_ops vdec_m2m_ops = { + .device_run = vdec_m2m_device_run, + .job_abort = venus_helper_m2m_job_abort, +}; + +static int m2m_queue_init(void *priv, struct vb2_queue *src_vq, + struct vb2_queue *dst_vq) +{ + struct venus_inst *inst = priv; + int ret; + + src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + src_vq->io_modes = VB2_MMAP | VB2_DMABUF; + src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + src_vq->ops = &vdec_vb2_ops; + src_vq->mem_ops = &vb2_dma_contig_memops; + src_vq->drv_priv = inst; + src_vq->buf_struct_size = sizeof(struct venus_buffer); + src_vq->allow_zero_bytesused = 1; + src_vq->min_buffers_needed = 0; + src_vq->dev = inst->core->dev; + src_vq->lock = &inst->ctx_q_lock; + ret = vb2_queue_init(src_vq); + if (ret) + return ret; + + dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; + dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + dst_vq->ops = &vdec_vb2_ops; + dst_vq->mem_ops = &vb2_dma_contig_memops; + dst_vq->drv_priv = inst; + dst_vq->buf_struct_size = sizeof(struct venus_buffer); + dst_vq->allow_zero_bytesused = 1; + dst_vq->min_buffers_needed = 0; + dst_vq->dev = inst->core->dev; + dst_vq->lock = &inst->ctx_q_lock; + return vb2_queue_init(dst_vq); +} + +static int vdec_open(struct file *file) +{ + struct venus_core *core = video_drvdata(file); + struct venus_inst *inst; + int ret; + + inst = kzalloc(sizeof(*inst), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + INIT_LIST_HEAD(&inst->dpbbufs); + INIT_LIST_HEAD(&inst->registeredbufs); + INIT_LIST_HEAD(&inst->internalbufs); + INIT_LIST_HEAD(&inst->list); + mutex_init(&inst->lock); + mutex_init(&inst->ctx_q_lock); + + inst->core = core; + inst->session_type = VIDC_SESSION_TYPE_DEC; + inst->num_output_bufs = 1; + inst->codec_state = VENUS_DEC_STATE_DEINIT; + inst->buf_count = 0; + inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT; + inst->core_acquired = false; + inst->bit_depth = VIDC_BITDEPTH_8; + inst->pic_struct = HFI_INTERLACE_FRAME_PROGRESSIVE; + init_waitqueue_head(&inst->reconf_wait); + inst->nonblock = file->f_flags & O_NONBLOCK; + + venus_helper_init_instance(inst); + + ret = vdec_ctrl_init(inst); + if (ret) + goto err_free; + + ret = hfi_session_create(inst, &vdec_hfi_ops); + if (ret) + goto err_ctrl_deinit; + + vdec_inst_init(inst); + + ida_init(&inst->dpb_ids); + + /* + * create m2m device for every instance, the m2m context scheduling + * is made by firmware side so we do not need to care about. + */ + inst->m2m_dev = v4l2_m2m_init(&vdec_m2m_ops); + if (IS_ERR(inst->m2m_dev)) { + ret = PTR_ERR(inst->m2m_dev); + goto err_session_destroy; + } + + inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init); + if (IS_ERR(inst->m2m_ctx)) { + ret = PTR_ERR(inst->m2m_ctx); + goto err_m2m_release; + } + + v4l2_fh_init(&inst->fh, core->vdev_dec); + + inst->fh.ctrl_handler = &inst->ctrl_handler; + v4l2_fh_add(&inst->fh); + inst->fh.m2m_ctx = inst->m2m_ctx; + file->private_data = &inst->fh; + + return 0; + +err_m2m_release: + v4l2_m2m_release(inst->m2m_dev); +err_session_destroy: + hfi_session_destroy(inst); +err_ctrl_deinit: + vdec_ctrl_deinit(inst); +err_free: + kfree(inst); + return ret; +} + +static int vdec_close(struct file *file) +{ + struct venus_inst *inst = to_inst(file); + + vdec_pm_get(inst); + + v4l2_m2m_ctx_release(inst->m2m_ctx); + v4l2_m2m_release(inst->m2m_dev); + vdec_ctrl_deinit(inst); + ida_destroy(&inst->dpb_ids); + hfi_session_destroy(inst); + mutex_destroy(&inst->lock); + mutex_destroy(&inst->ctx_q_lock); + v4l2_fh_del(&inst->fh); + v4l2_fh_exit(&inst->fh); + + vdec_pm_put(inst, false); + + kfree(inst); + return 0; +} + +static const struct v4l2_file_operations vdec_fops = { + .owner = THIS_MODULE, + .open = vdec_open, + .release = vdec_close, + .unlocked_ioctl = video_ioctl2, + .poll = v4l2_m2m_fop_poll, + .mmap = v4l2_m2m_fop_mmap, +}; + +static int vdec_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct video_device *vdev; + struct venus_core *core; + int ret; + + if (!dev->parent) + return -EPROBE_DEFER; + + core = dev_get_drvdata(dev->parent); + if (!core) + return -EPROBE_DEFER; + + platform_set_drvdata(pdev, core); + + if (core->pm_ops->vdec_get) { + ret = core->pm_ops->vdec_get(dev); + if (ret) + return ret; + } + + vdev = video_device_alloc(); + if (!vdev) + return -ENOMEM; + + strscpy(vdev->name, "qcom-venus-decoder", sizeof(vdev->name)); + vdev->release = video_device_release; + vdev->fops = &vdec_fops; + vdev->ioctl_ops = &vdec_ioctl_ops; + vdev->vfl_dir = VFL_DIR_M2M; + vdev->v4l2_dev = &core->v4l2_dev; + vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; + + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); + if (ret) + goto err_vdev_release; + + core->vdev_dec = vdev; + core->dev_dec = dev; + + video_set_drvdata(vdev, core); + pm_runtime_set_autosuspend_delay(dev, 2000); + pm_runtime_use_autosuspend(dev); + pm_runtime_enable(dev); + + return 0; + +err_vdev_release: + video_device_release(vdev); + return ret; +} + +static void vdec_remove(struct platform_device *pdev) +{ + struct venus_core *core = dev_get_drvdata(pdev->dev.parent); + + video_unregister_device(core->vdev_dec); + pm_runtime_disable(core->dev_dec); + + if (core->pm_ops->vdec_put) + core->pm_ops->vdec_put(core->dev_dec); +} + +static __maybe_unused int vdec_runtime_suspend(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + const struct venus_pm_ops *pm_ops = core->pm_ops; + int ret = 0; + + if (pm_ops->vdec_power) + ret = pm_ops->vdec_power(dev, POWER_OFF); + + return ret; +} + +static __maybe_unused int vdec_runtime_resume(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + const struct venus_pm_ops *pm_ops = core->pm_ops; + int ret = 0; + + if (pm_ops->vdec_power) + ret = pm_ops->vdec_power(dev, POWER_ON); + + return ret; +} + +static const struct dev_pm_ops vdec_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(vdec_runtime_suspend, vdec_runtime_resume, NULL) +}; + +static const struct of_device_id vdec_dt_match[] = { + { .compatible = "venus-decoder" }, + { } +}; +MODULE_DEVICE_TABLE(of, vdec_dt_match); + +static struct platform_driver qcom_venus_dec_driver = { + .probe = vdec_probe, + .remove_new = vdec_remove, + .driver = { + .name = "qcom-venus-decoder", + .of_match_table = vdec_dt_match, + .pm = &vdec_pm_ops, + }, +}; +module_platform_driver(qcom_venus_dec_driver); + +MODULE_ALIAS("platform:qcom-venus-decoder"); +MODULE_DESCRIPTION("Qualcomm Venus video decoder driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/qcom/venus/vdec.h b/drivers/media/platform/qcom/venus/vdec.h new file mode 100644 index 0000000000..6b262d0bf5 --- /dev/null +++ b/drivers/media/platform/qcom/venus/vdec.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#ifndef __VENUS_VDEC_H__ +#define __VENUS_VDEC_H__ + +struct venus_inst; + +int vdec_ctrl_init(struct venus_inst *inst); +void vdec_ctrl_deinit(struct venus_inst *inst); + +#endif diff --git a/drivers/media/platform/qcom/venus/vdec_ctrls.c b/drivers/media/platform/qcom/venus/vdec_ctrls.c new file mode 100644 index 0000000000..7e0f29bf7f --- /dev/null +++ b/drivers/media/platform/qcom/venus/vdec_ctrls.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#include <linux/types.h> +#include <media/v4l2-ctrls.h> + +#include "core.h" +#include "helpers.h" +#include "vdec.h" + +static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct venus_inst *inst = ctrl_to_inst(ctrl); + struct vdec_controls *ctr = &inst->controls.dec; + + switch (ctrl->id) { + case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: + ctr->post_loop_deb_mode = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: + case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: + case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: + ctr->profile = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: + case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: + ctr->level = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: + ctr->display_delay = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: + ctr->display_delay_enable = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: + ctr->conceal_color = *ctrl->p_new.p_s64; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl) +{ + struct venus_inst *inst = ctrl_to_inst(ctrl); + struct vdec_controls *ctr = &inst->controls.dec; + struct hfi_buffer_requirements bufreq; + enum hfi_version ver = inst->core->res->hfi_version; + u32 profile, level; + int ret; + + switch (ctrl->id) { + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: + case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: + case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: + ret = venus_helper_get_profile_level(inst, &profile, &level); + if (!ret) + ctr->profile = profile; + ctrl->val = ctr->profile; + break; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: + case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: + ret = venus_helper_get_profile_level(inst, &profile, &level); + if (!ret) + ctr->level = level; + ctrl->val = ctr->level; + break; + case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: + ctrl->val = ctr->post_loop_deb_mode; + break; + case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq); + if (!ret) + ctrl->val = hfi_bufreq_get_count_min(&bufreq, ver); + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct v4l2_ctrl_ops vdec_ctrl_ops = { + .s_ctrl = vdec_op_s_ctrl, + .g_volatile_ctrl = vdec_op_g_volatile_ctrl, +}; + +int vdec_ctrl_init(struct venus_inst *inst) +{ + struct v4l2_ctrl *ctrl; + int ret; + + ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 12); + if (ret) + return ret; + + ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE, + V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY, + ~((1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE) | + (1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE)), + V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_5, + 0, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_PROFILE, + V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH, + ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH)), + V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_LEVEL, + V4L2_MPEG_VIDEO_H264_LEVEL_5_1, + 0, V4L2_MPEG_VIDEO_H264_LEVEL_1_0); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_VP8_PROFILE, + V4L2_MPEG_VIDEO_VP8_PROFILE_3, + 0, V4L2_MPEG_VIDEO_VP8_PROFILE_0); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_VP9_PROFILE, + V4L2_MPEG_VIDEO_VP9_PROFILE_3, + 0, V4L2_MPEG_VIDEO_VP9_PROFILE_0); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_VP9_LEVEL, + V4L2_MPEG_VIDEO_VP9_LEVEL_6_2, + 0, V4L2_MPEG_VIDEO_VP9_LEVEL_1_0); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER, 0, 1, 1, 0); + + ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 1); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY, + 0, 16383, 1, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE, + 0, 1, 1, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR, 0, + 0xffffffffffffLL, 1, 0x8000800010LL); + + ret = inst->ctrl_handler.error; + if (ret) { + v4l2_ctrl_handler_free(&inst->ctrl_handler); + return ret; + } + + return 0; +} + +void vdec_ctrl_deinit(struct venus_inst *inst) +{ + v4l2_ctrl_handler_free(&inst->ctrl_handler); +} diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c new file mode 100644 index 0000000000..44b13696cf --- /dev/null +++ b/drivers/media/platform/qcom/venus/venc.c @@ -0,0 +1,1657 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <media/v4l2-mem2mem.h> +#include <media/videobuf2-dma-contig.h> +#include <media/v4l2-ioctl.h> +#include <media/v4l2-event.h> +#include <media/v4l2-ctrls.h> + +#include "hfi_venus_io.h" +#include "hfi_parser.h" +#include "core.h" +#include "helpers.h" +#include "venc.h" +#include "pm_helpers.h" + +#define NUM_B_FRAMES_MAX 4 + +/* + * Three resons to keep MPLANE formats (despite that the number of planes + * currently is one): + * - the MPLANE formats allow only one plane to be used + * - the downstream driver use MPLANE formats too + * - future firmware versions could add support for >1 planes + */ +static const struct venus_format venc_formats[] = { + [VENUS_FMT_NV12] = { + .pixfmt = V4L2_PIX_FMT_NV12, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + }, + [VENUS_FMT_H264] = { + .pixfmt = V4L2_PIX_FMT_H264, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + }, + [VENUS_FMT_VP8] = { + .pixfmt = V4L2_PIX_FMT_VP8, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + }, + [VENUS_FMT_HEVC] = { + .pixfmt = V4L2_PIX_FMT_HEVC, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + }, + [VENUS_FMT_MPEG4] = { + .pixfmt = V4L2_PIX_FMT_MPEG4, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + }, + [VENUS_FMT_H263] = { + .pixfmt = V4L2_PIX_FMT_H263, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + }, +}; + +static const struct venus_format * +find_format(struct venus_inst *inst, u32 pixfmt, u32 type) +{ + const struct venus_format *fmt = venc_formats; + unsigned int size = ARRAY_SIZE(venc_formats); + unsigned int i; + + for (i = 0; i < size; i++) { + if (fmt[i].pixfmt == pixfmt) + break; + } + + if (i == size || fmt[i].type != type) + return NULL; + + if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + !venus_helper_check_codec(inst, fmt[i].pixfmt)) + return NULL; + + return &fmt[i]; +} + +static const struct venus_format * +find_format_by_index(struct venus_inst *inst, unsigned int index, u32 type) +{ + const struct venus_format *fmt = venc_formats; + unsigned int size = ARRAY_SIZE(venc_formats); + unsigned int i, k = 0; + + if (index > size) + return NULL; + + for (i = 0; i < size; i++) { + bool valid; + + if (fmt[i].type != type) + continue; + valid = type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE || + venus_helper_check_codec(inst, fmt[i].pixfmt); + if (k == index && valid) + break; + if (valid) + k++; + } + + if (i == size) + return NULL; + + return &fmt[i]; +} + +static int venc_v4l2_to_hfi(int id, int value) +{ + switch (id) { + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: + switch (value) { + case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC: + default: + return HFI_H264_ENTROPY_CAVLC; + case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC: + return HFI_H264_ENTROPY_CABAC; + } + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: + switch (value) { + case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED: + default: + return HFI_H264_DB_MODE_ALL_BOUNDARY; + case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED: + return HFI_H264_DB_MODE_DISABLE; + case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY: + return HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY; + } + } + + return 0; +} + +static int +venc_querycap(struct file *file, void *fh, struct v4l2_capability *cap) +{ + strscpy(cap->driver, "qcom-venus", sizeof(cap->driver)); + strscpy(cap->card, "Qualcomm Venus video encoder", sizeof(cap->card)); + strscpy(cap->bus_info, "platform:qcom-venus", sizeof(cap->bus_info)); + + return 0; +} + +static int venc_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f) +{ + struct venus_inst *inst = to_inst(file); + const struct venus_format *fmt; + + fmt = find_format_by_index(inst, f->index, f->type); + + memset(f->reserved, 0, sizeof(f->reserved)); + + if (!fmt) + return -EINVAL; + + f->pixelformat = fmt->pixfmt; + + return 0; +} + +static const struct venus_format * +venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) +{ + struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; + struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt; + const struct venus_format *fmt; + u32 sizeimage; + + memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved)); + memset(pixmp->reserved, 0, sizeof(pixmp->reserved)); + + fmt = find_format(inst, pixmp->pixelformat, f->type); + if (!fmt) { + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + pixmp->pixelformat = V4L2_PIX_FMT_H264; + else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + pixmp->pixelformat = V4L2_PIX_FMT_NV12; + else + return NULL; + fmt = find_format(inst, pixmp->pixelformat, f->type); + if (!fmt) + return NULL; + } + + pixmp->width = clamp(pixmp->width, frame_width_min(inst), + frame_width_max(inst)); + pixmp->height = clamp(pixmp->height, frame_height_min(inst), + frame_height_max(inst)); + + pixmp->width = ALIGN(pixmp->width, 128); + pixmp->height = ALIGN(pixmp->height, 32); + + pixmp->width = ALIGN(pixmp->width, 2); + pixmp->height = ALIGN(pixmp->height, 2); + + if (pixmp->field == V4L2_FIELD_ANY) + pixmp->field = V4L2_FIELD_NONE; + pixmp->num_planes = fmt->num_planes; + pixmp->flags = 0; + + sizeimage = venus_helper_get_framesz(pixmp->pixelformat, + pixmp->width, + pixmp->height); + pfmt[0].sizeimage = max(ALIGN(pfmt[0].sizeimage, SZ_4K), sizeimage); + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + pfmt[0].bytesperline = ALIGN(pixmp->width, 128); + else + pfmt[0].bytesperline = 0; + + return fmt; +} + +static int venc_try_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct venus_inst *inst = to_inst(file); + + venc_try_fmt_common(inst, f); + + return 0; +} + +static int venc_s_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct venus_inst *inst = to_inst(file); + struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; + struct v4l2_pix_format_mplane orig_pixmp; + const struct venus_format *fmt; + struct v4l2_format format; + u32 pixfmt_out = 0, pixfmt_cap = 0; + struct vb2_queue *q; + + q = v4l2_m2m_get_vq(inst->m2m_ctx, f->type); + if (!q) + return -EINVAL; + + if (vb2_is_busy(q)) + return -EBUSY; + + orig_pixmp = *pixmp; + + fmt = venc_try_fmt_common(inst, f); + if (!fmt) + return -EINVAL; + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + pixfmt_out = pixmp->pixelformat; + pixfmt_cap = inst->fmt_cap->pixfmt; + } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + pixfmt_cap = pixmp->pixelformat; + pixfmt_out = inst->fmt_out->pixfmt; + } + + memset(&format, 0, sizeof(format)); + + format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + format.fmt.pix_mp.pixelformat = pixfmt_out; + format.fmt.pix_mp.width = orig_pixmp.width; + format.fmt.pix_mp.height = orig_pixmp.height; + venc_try_fmt_common(inst, &format); + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + inst->out_width = format.fmt.pix_mp.width; + inst->out_height = format.fmt.pix_mp.height; + inst->colorspace = pixmp->colorspace; + inst->ycbcr_enc = pixmp->ycbcr_enc; + inst->quantization = pixmp->quantization; + inst->xfer_func = pixmp->xfer_func; + } + + memset(&format, 0, sizeof(format)); + + format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + format.fmt.pix_mp.pixelformat = pixfmt_cap; + format.fmt.pix_mp.width = orig_pixmp.width; + format.fmt.pix_mp.height = orig_pixmp.height; + venc_try_fmt_common(inst, &format); + + inst->width = format.fmt.pix_mp.width; + inst->height = format.fmt.pix_mp.height; + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + inst->fmt_out = fmt; + else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + inst->fmt_cap = fmt; + inst->output_buf_size = pixmp->plane_fmt[0].sizeimage; + } + + return 0; +} + +static int venc_g_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; + struct venus_inst *inst = to_inst(file); + const struct venus_format *fmt; + + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + fmt = inst->fmt_cap; + else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + fmt = inst->fmt_out; + else + return -EINVAL; + + pixmp->pixelformat = fmt->pixfmt; + + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + pixmp->width = inst->width; + pixmp->height = inst->height; + pixmp->colorspace = inst->colorspace; + pixmp->ycbcr_enc = inst->ycbcr_enc; + pixmp->quantization = inst->quantization; + pixmp->xfer_func = inst->xfer_func; + } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + pixmp->width = inst->out_width; + pixmp->height = inst->out_height; + } + + venc_try_fmt_common(inst, f); + + return 0; +} + +static int +venc_g_selection(struct file *file, void *fh, struct v4l2_selection *s) +{ + struct venus_inst *inst = to_inst(file); + + if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) + return -EINVAL; + + switch (s->target) { + case V4L2_SEL_TGT_CROP_DEFAULT: + case V4L2_SEL_TGT_CROP_BOUNDS: + s->r.width = inst->out_width; + s->r.height = inst->out_height; + break; + case V4L2_SEL_TGT_CROP: + s->r.width = inst->width; + s->r.height = inst->height; + break; + default: + return -EINVAL; + } + + s->r.top = 0; + s->r.left = 0; + + return 0; +} + +static int +venc_s_selection(struct file *file, void *fh, struct v4l2_selection *s) +{ + struct venus_inst *inst = to_inst(file); + + if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) + return -EINVAL; + + if (s->r.width > inst->out_width || + s->r.height > inst->out_height) + return -EINVAL; + + s->r.width = ALIGN(s->r.width, 2); + s->r.height = ALIGN(s->r.height, 2); + + switch (s->target) { + case V4L2_SEL_TGT_CROP: + s->r.top = 0; + s->r.left = 0; + inst->width = s->r.width; + inst->height = s->r.height; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) +{ + struct venus_inst *inst = to_inst(file); + struct v4l2_outputparm *out = &a->parm.output; + struct v4l2_fract *timeperframe = &out->timeperframe; + u64 us_per_frame, fps; + + if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && + a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + return -EINVAL; + + memset(out->reserved, 0, sizeof(out->reserved)); + + if (!timeperframe->denominator) + timeperframe->denominator = inst->timeperframe.denominator; + if (!timeperframe->numerator) + timeperframe->numerator = inst->timeperframe.numerator; + + out->capability = V4L2_CAP_TIMEPERFRAME; + + us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC; + do_div(us_per_frame, timeperframe->denominator); + + if (!us_per_frame) + return -EINVAL; + + fps = (u64)USEC_PER_SEC; + do_div(fps, us_per_frame); + + inst->timeperframe = *timeperframe; + inst->fps = fps; + + return 0; +} + +static int venc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a) +{ + struct venus_inst *inst = to_inst(file); + + if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && + a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + return -EINVAL; + + a->parm.output.capability |= V4L2_CAP_TIMEPERFRAME; + a->parm.output.timeperframe = inst->timeperframe; + + return 0; +} + +static int venc_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + struct venus_inst *inst = to_inst(file); + const struct venus_format *fmt; + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + + fmt = find_format(inst, fsize->pixel_format, + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); + if (!fmt) { + fmt = find_format(inst, fsize->pixel_format, + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + if (!fmt) + return -EINVAL; + } + + if (fsize->index) + return -EINVAL; + + fsize->stepwise.min_width = frame_width_min(inst); + fsize->stepwise.max_width = frame_width_max(inst); + fsize->stepwise.step_width = frame_width_step(inst); + fsize->stepwise.min_height = frame_height_min(inst); + fsize->stepwise.max_height = frame_height_max(inst); + fsize->stepwise.step_height = frame_height_step(inst); + + return 0; +} + +static int venc_enum_frameintervals(struct file *file, void *fh, + struct v4l2_frmivalenum *fival) +{ + struct venus_inst *inst = to_inst(file); + const struct venus_format *fmt; + unsigned int framerate_factor = 1; + + fival->type = V4L2_FRMIVAL_TYPE_STEPWISE; + + fmt = find_format(inst, fival->pixel_format, + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); + if (!fmt) { + fmt = find_format(inst, fival->pixel_format, + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + if (!fmt) + return -EINVAL; + } + + if (fival->index) + return -EINVAL; + + if (!fival->width || !fival->height) + return -EINVAL; + + if (fival->width > frame_width_max(inst) || + fival->width < frame_width_min(inst) || + fival->height > frame_height_max(inst) || + fival->height < frame_height_min(inst)) + return -EINVAL; + + if (IS_V1(inst->core)) { + /* framerate is reported in 1/65535 fps unit */ + framerate_factor = (1 << 16); + } + + fival->stepwise.min.numerator = 1; + fival->stepwise.min.denominator = frate_max(inst) / framerate_factor; + fival->stepwise.max.numerator = 1; + fival->stepwise.max.denominator = frate_min(inst) / framerate_factor; + fival->stepwise.step.numerator = 1; + fival->stepwise.step.denominator = frate_max(inst) / framerate_factor; + + return 0; +} + +static int venc_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + switch (sub->type) { + case V4L2_EVENT_EOS: + return v4l2_event_subscribe(fh, sub, 2, NULL); + case V4L2_EVENT_CTRL: + return v4l2_ctrl_subscribe_event(fh, sub); + default: + return -EINVAL; + } +} + +static int +venc_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *cmd) +{ + struct venus_inst *inst = to_inst(file); + struct hfi_frame_data fdata = {0}; + int ret = 0; + + ret = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, cmd); + if (ret) + return ret; + + mutex_lock(&inst->lock); + + if (cmd->cmd == V4L2_ENC_CMD_STOP && + inst->enc_state == VENUS_ENC_STATE_ENCODING) { + /* + * Implement V4L2_ENC_CMD_STOP by enqueue an empty buffer on + * encoder input to signal EOS. + */ + if (!(inst->streamon_out && inst->streamon_cap)) + goto unlock; + + fdata.buffer_type = HFI_BUFFER_INPUT; + fdata.flags |= HFI_BUFFERFLAG_EOS; + fdata.device_addr = 0xdeadb000; + + ret = hfi_session_process_buf(inst, &fdata); + + inst->enc_state = VENUS_ENC_STATE_DRAIN; + } else if (cmd->cmd == V4L2_ENC_CMD_START) { + if (inst->enc_state == VENUS_ENC_STATE_DRAIN) { + ret = -EBUSY; + goto unlock; + } + if (inst->enc_state == VENUS_ENC_STATE_STOPPED) { + vb2_clear_last_buffer_dequeued(&inst->fh.m2m_ctx->cap_q_ctx.q); + inst->enc_state = VENUS_ENC_STATE_ENCODING; + } + } + +unlock: + mutex_unlock(&inst->lock); + return ret; +} + +static const struct v4l2_ioctl_ops venc_ioctl_ops = { + .vidioc_querycap = venc_querycap, + .vidioc_enum_fmt_vid_cap = venc_enum_fmt, + .vidioc_enum_fmt_vid_out = venc_enum_fmt, + .vidioc_s_fmt_vid_cap_mplane = venc_s_fmt, + .vidioc_s_fmt_vid_out_mplane = venc_s_fmt, + .vidioc_g_fmt_vid_cap_mplane = venc_g_fmt, + .vidioc_g_fmt_vid_out_mplane = venc_g_fmt, + .vidioc_try_fmt_vid_cap_mplane = venc_try_fmt, + .vidioc_try_fmt_vid_out_mplane = venc_try_fmt, + .vidioc_g_selection = venc_g_selection, + .vidioc_s_selection = venc_s_selection, + .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, + .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, + .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, + .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, + .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, + .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, + .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, + .vidioc_streamon = v4l2_m2m_ioctl_streamon, + .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, + .vidioc_s_parm = venc_s_parm, + .vidioc_g_parm = venc_g_parm, + .vidioc_enum_framesizes = venc_enum_framesizes, + .vidioc_enum_frameintervals = venc_enum_frameintervals, + .vidioc_subscribe_event = venc_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, + .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd, + .vidioc_encoder_cmd = venc_encoder_cmd, +}; + +static int venc_pm_get(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + struct device *dev = core->dev_enc; + int ret; + + mutex_lock(&core->pm_lock); + ret = pm_runtime_resume_and_get(dev); + mutex_unlock(&core->pm_lock); + + return ret < 0 ? ret : 0; +} + +static int venc_pm_put(struct venus_inst *inst, bool autosuspend) +{ + struct venus_core *core = inst->core; + struct device *dev = core->dev_enc; + int ret; + + mutex_lock(&core->pm_lock); + + if (autosuspend) + ret = pm_runtime_put_autosuspend(dev); + else + ret = pm_runtime_put_sync(dev); + + mutex_unlock(&core->pm_lock); + + return ret < 0 ? ret : 0; +} + +static int venc_pm_get_put(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + struct device *dev = core->dev_enc; + int ret = 0; + + mutex_lock(&core->pm_lock); + + if (pm_runtime_suspended(dev)) { + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + goto error; + + ret = pm_runtime_put_autosuspend(dev); + } + +error: + mutex_unlock(&core->pm_lock); + + return ret < 0 ? ret : 0; +} + +static void venc_pm_touch(struct venus_inst *inst) +{ + pm_runtime_mark_last_busy(inst->core->dev_enc); +} + +static int venc_set_properties(struct venus_inst *inst) +{ + struct venc_controls *ctr = &inst->controls.enc; + struct hfi_intra_period intra_period; + struct hfi_framerate frate; + struct hfi_bitrate brate; + struct hfi_idr_period idrp; + struct hfi_quantization quant; + struct hfi_quantization_range quant_range; + struct hfi_quantization_range_v2 quant_range_v2; + struct hfi_enable en; + struct hfi_ltr_mode ltr_mode; + struct hfi_intra_refresh intra_refresh = {}; + u32 ptype, rate_control, bitrate; + u32 profile, level; + int ret; + + ret = venus_helper_set_work_mode(inst); + if (ret) + return ret; + + ptype = HFI_PROPERTY_CONFIG_FRAME_RATE; + frate.buffer_type = HFI_BUFFER_OUTPUT; + frate.framerate = inst->fps * (1 << 16); + + ret = hfi_session_set_property(inst, ptype, &frate); + if (ret) + return ret; + + if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264) { + struct hfi_h264_vui_timing_info info; + struct hfi_h264_entropy_control entropy; + struct hfi_h264_db_control deblock; + struct hfi_h264_8x8_transform h264_transform; + + ptype = HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO; + info.enable = 1; + info.fixed_framerate = 1; + info.time_scale = NSEC_PER_SEC; + + ret = hfi_session_set_property(inst, ptype, &info); + if (ret) + return ret; + + ptype = HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL; + entropy.entropy_mode = venc_v4l2_to_hfi( + V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE, + ctr->h264_entropy_mode); + entropy.cabac_model = HFI_H264_CABAC_MODEL_0; + + ret = hfi_session_set_property(inst, ptype, &entropy); + if (ret) + return ret; + + ptype = HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL; + deblock.mode = venc_v4l2_to_hfi( + V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE, + ctr->h264_loop_filter_mode); + deblock.slice_alpha_offset = ctr->h264_loop_filter_alpha; + deblock.slice_beta_offset = ctr->h264_loop_filter_beta; + + ret = hfi_session_set_property(inst, ptype, &deblock); + if (ret) + return ret; + + ptype = HFI_PROPERTY_PARAM_VENC_H264_TRANSFORM_8X8; + h264_transform.enable_type = 0; + if (ctr->profile.h264 == V4L2_MPEG_VIDEO_H264_PROFILE_HIGH || + ctr->profile.h264 == V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH) + h264_transform.enable_type = ctr->h264_8x8_transform; + + ret = hfi_session_set_property(inst, ptype, &h264_transform); + if (ret) + return ret; + + } + + if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264 || + inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) { + /* IDR periodicity, n: + * n = 0 - only the first I-frame is IDR frame + * n = 1 - all I-frames will be IDR frames + * n > 1 - every n-th I-frame will be IDR frame + */ + ptype = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD; + idrp.idr_period = 0; + ret = hfi_session_set_property(inst, ptype, &idrp); + if (ret) + return ret; + } + + if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC && + ctr->profile.hevc == V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10) { + struct hfi_hdr10_pq_sei hdr10; + unsigned int c; + + ptype = HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI; + + for (c = 0; c < 3; c++) { + hdr10.mastering.display_primaries_x[c] = + ctr->mastering.display_primaries_x[c]; + hdr10.mastering.display_primaries_y[c] = + ctr->mastering.display_primaries_y[c]; + } + + hdr10.mastering.white_point_x = ctr->mastering.white_point_x; + hdr10.mastering.white_point_y = ctr->mastering.white_point_y; + hdr10.mastering.max_display_mastering_luminance = + ctr->mastering.max_display_mastering_luminance; + hdr10.mastering.min_display_mastering_luminance = + ctr->mastering.min_display_mastering_luminance; + + hdr10.cll.max_content_light = ctr->cll.max_content_light_level; + hdr10.cll.max_pic_average_light = + ctr->cll.max_pic_average_light_level; + + ret = hfi_session_set_property(inst, ptype, &hdr10); + if (ret) + return ret; + } + + if (ctr->num_b_frames) { + u32 max_num_b_frames = NUM_B_FRAMES_MAX; + + ptype = HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES; + ret = hfi_session_set_property(inst, ptype, &max_num_b_frames); + if (ret) + return ret; + } + + ptype = HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD; + intra_period.pframes = ctr->num_p_frames; + intra_period.bframes = ctr->num_b_frames; + + ret = hfi_session_set_property(inst, ptype, &intra_period); + if (ret) + return ret; + + if (!ctr->rc_enable) + rate_control = HFI_RATE_CONTROL_OFF; + else if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) + rate_control = ctr->frame_skip_mode ? HFI_RATE_CONTROL_VBR_VFR : + HFI_RATE_CONTROL_VBR_CFR; + else if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) + rate_control = ctr->frame_skip_mode ? HFI_RATE_CONTROL_CBR_VFR : + HFI_RATE_CONTROL_CBR_CFR; + else if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ) + rate_control = HFI_RATE_CONTROL_CQ; + + ptype = HFI_PROPERTY_PARAM_VENC_RATE_CONTROL; + ret = hfi_session_set_property(inst, ptype, &rate_control); + if (ret) + return ret; + + if (rate_control == HFI_RATE_CONTROL_CQ && ctr->const_quality) { + struct hfi_heic_frame_quality quality = {}; + + ptype = HFI_PROPERTY_CONFIG_HEIC_FRAME_QUALITY; + quality.frame_quality = ctr->const_quality; + ret = hfi_session_set_property(inst, ptype, &quality); + if (ret) + return ret; + } + + if (!ctr->bitrate) + bitrate = 64000; + else + bitrate = ctr->bitrate; + + ptype = HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE; + brate.bitrate = bitrate; + brate.layer_id = 0; + + ret = hfi_session_set_property(inst, ptype, &brate); + if (ret) + return ret; + + if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264 || + inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) { + ptype = HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER; + if (ctr->header_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) + en.enable = 0; + else + en.enable = 1; + + ret = hfi_session_set_property(inst, ptype, &en); + if (ret) + return ret; + } + + if (!ctr->bitrate_peak) + bitrate *= 2; + else + bitrate = ctr->bitrate_peak; + + ptype = HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE; + brate.bitrate = bitrate; + brate.layer_id = 0; + + ret = hfi_session_set_property(inst, ptype, &brate); + if (ret) + return ret; + + ptype = HFI_PROPERTY_PARAM_VENC_SESSION_QP; + if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) { + quant.qp_i = ctr->hevc_i_qp; + quant.qp_p = ctr->hevc_p_qp; + quant.qp_b = ctr->hevc_b_qp; + } else { + quant.qp_i = ctr->h264_i_qp; + quant.qp_p = ctr->h264_p_qp; + quant.qp_b = ctr->h264_b_qp; + } + quant.layer_id = 0; + ret = hfi_session_set_property(inst, ptype, &quant); + if (ret) + return ret; + + if (inst->core->res->hfi_version == HFI_VERSION_4XX || + inst->core->res->hfi_version == HFI_VERSION_6XX) { + ptype = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2; + + if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) { + quant_range_v2.min_qp.qp_packed = ctr->hevc_min_qp; + quant_range_v2.max_qp.qp_packed = ctr->hevc_max_qp; + } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_VP8) { + quant_range_v2.min_qp.qp_packed = ctr->vp8_min_qp; + quant_range_v2.max_qp.qp_packed = ctr->vp8_max_qp; + } else { + quant_range_v2.min_qp.qp_packed = ctr->h264_min_qp; + quant_range_v2.max_qp.qp_packed = ctr->h264_max_qp; + } + + ret = hfi_session_set_property(inst, ptype, &quant_range_v2); + } else { + ptype = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE; + + if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) { + quant_range.min_qp = ctr->hevc_min_qp; + quant_range.max_qp = ctr->hevc_max_qp; + } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_VP8) { + quant_range.min_qp = ctr->vp8_min_qp; + quant_range.max_qp = ctr->vp8_max_qp; + } else { + quant_range.min_qp = ctr->h264_min_qp; + quant_range.max_qp = ctr->h264_max_qp; + } + + quant_range.layer_id = 0; + ret = hfi_session_set_property(inst, ptype, &quant_range); + } + + if (ret) + return ret; + + ptype = HFI_PROPERTY_PARAM_VENC_LTRMODE; + ltr_mode.ltr_count = ctr->ltr_count; + ltr_mode.ltr_mode = HFI_LTR_MODE_MANUAL; + ltr_mode.trust_mode = 1; + ret = hfi_session_set_property(inst, ptype, <r_mode); + if (ret) + return ret; + + switch (inst->hfi_codec) { + case HFI_VIDEO_CODEC_H264: + profile = ctr->profile.h264; + level = ctr->level.h264; + break; + case HFI_VIDEO_CODEC_MPEG4: + profile = ctr->profile.mpeg4; + level = ctr->level.mpeg4; + break; + case HFI_VIDEO_CODEC_VP8: + profile = ctr->profile.vp8; + level = 0; + break; + case HFI_VIDEO_CODEC_VP9: + profile = ctr->profile.vp9; + level = ctr->level.vp9; + break; + case HFI_VIDEO_CODEC_HEVC: + profile = ctr->profile.hevc; + level = ctr->level.hevc; + break; + case HFI_VIDEO_CODEC_MPEG2: + default: + profile = 0; + level = 0; + break; + } + + ret = venus_helper_set_profile_level(inst, profile, level); + if (ret) + return ret; + + if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264 || + inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) { + struct hfi_enable en = {}; + + ptype = HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL; + + if (ctr->aud_enable) + en.enable = 1; + + ret = hfi_session_set_property(inst, ptype, &en); + } + + if ((inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264 || + inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) && + (rate_control == HFI_RATE_CONTROL_CBR_VFR || + rate_control == HFI_RATE_CONTROL_CBR_CFR)) { + intra_refresh.mode = HFI_INTRA_REFRESH_NONE; + intra_refresh.cir_mbs = 0; + + if (ctr->intra_refresh_period) { + u32 mbs; + + mbs = ALIGN(inst->width, 16) * ALIGN(inst->height, 16); + mbs /= 16 * 16; + if (mbs % ctr->intra_refresh_period) + mbs++; + mbs /= ctr->intra_refresh_period; + + intra_refresh.cir_mbs = mbs; + if (ctr->intra_refresh_type == + V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_CYCLIC) + intra_refresh.mode = HFI_INTRA_REFRESH_CYCLIC; + else + intra_refresh.mode = HFI_INTRA_REFRESH_RANDOM; + } + + ptype = HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH; + + ret = hfi_session_set_property(inst, ptype, &intra_refresh); + if (ret) + return ret; + } + + return 0; +} + +static int venc_init_session(struct venus_inst *inst) +{ + int ret; + + ret = venus_helper_session_init(inst); + if (ret == -EALREADY) + return 0; + else if (ret) + return ret; + + ret = venus_helper_set_stride(inst, inst->out_width, + inst->out_height); + if (ret) + goto deinit; + + ret = venus_helper_set_input_resolution(inst, inst->width, + inst->height); + if (ret) + goto deinit; + + ret = venus_helper_set_output_resolution(inst, inst->width, + inst->height, + HFI_BUFFER_OUTPUT); + if (ret) + goto deinit; + + ret = venus_helper_set_color_format(inst, inst->fmt_out->pixfmt); + if (ret) + goto deinit; + + ret = venc_set_properties(inst); + if (ret) + goto deinit; + + return 0; +deinit: + hfi_session_deinit(inst); + return ret; +} + +static int venc_out_num_buffers(struct venus_inst *inst, unsigned int *num) +{ + struct hfi_buffer_requirements bufreq; + int ret; + + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq); + if (ret) + return ret; + + *num = bufreq.count_actual; + + return 0; +} + +static int venc_queue_setup(struct vb2_queue *q, + unsigned int *num_buffers, unsigned int *num_planes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct venus_inst *inst = vb2_get_drv_priv(q); + struct venus_core *core = inst->core; + unsigned int num, min = 4; + int ret; + + if (*num_planes) { + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + *num_planes != inst->fmt_out->num_planes) + return -EINVAL; + + if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + *num_planes != inst->fmt_cap->num_planes) + return -EINVAL; + + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + sizes[0] < inst->input_buf_size) + return -EINVAL; + + if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + sizes[0] < inst->output_buf_size) + return -EINVAL; + + return 0; + } + + if (test_bit(0, &core->sys_error)) { + if (inst->nonblock) + return -EAGAIN; + + ret = wait_event_interruptible(core->sys_err_done, + !test_bit(0, &core->sys_error)); + if (ret) + return ret; + } + + ret = venc_pm_get(inst); + if (ret) + return ret; + + mutex_lock(&inst->lock); + ret = venc_init_session(inst); + mutex_unlock(&inst->lock); + + if (ret) + goto put_power; + + ret = venc_pm_put(inst, false); + if (ret) + return ret; + + switch (q->type) { + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + *num_planes = inst->fmt_out->num_planes; + + ret = venc_out_num_buffers(inst, &num); + if (ret) + break; + + num = max(num, min); + *num_buffers = max(*num_buffers, num); + inst->num_input_bufs = *num_buffers; + + sizes[0] = venus_helper_get_framesz(inst->fmt_out->pixfmt, + inst->out_width, + inst->out_height); + inst->input_buf_size = sizes[0]; + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + *num_planes = inst->fmt_cap->num_planes; + *num_buffers = max(*num_buffers, min); + inst->num_output_bufs = *num_buffers; + sizes[0] = venus_helper_get_framesz(inst->fmt_cap->pixfmt, + inst->width, + inst->height); + sizes[0] = max(sizes[0], inst->output_buf_size); + inst->output_buf_size = sizes[0]; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +put_power: + venc_pm_put(inst, false); + return ret; +} + +static int venc_buf_init(struct vb2_buffer *vb) +{ + struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); + + inst->buf_count++; + + return venus_helper_vb2_buf_init(vb); +} + +static void venc_release_session(struct venus_inst *inst) +{ + int ret; + + venc_pm_get(inst); + + mutex_lock(&inst->lock); + + ret = hfi_session_deinit(inst); + if (ret || inst->session_error) + hfi_session_abort(inst); + + mutex_unlock(&inst->lock); + + venus_pm_load_scale(inst); + INIT_LIST_HEAD(&inst->registeredbufs); + venus_pm_release_core(inst); + + venc_pm_put(inst, false); +} + +static void venc_buf_cleanup(struct vb2_buffer *vb) +{ + struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct venus_buffer *buf = to_venus_buffer(vbuf); + + mutex_lock(&inst->lock); + if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + if (!list_empty(&inst->registeredbufs)) + list_del_init(&buf->reg_list); + mutex_unlock(&inst->lock); + + inst->buf_count--; + if (!inst->buf_count) + venc_release_session(inst); +} + +static int venc_verify_conf(struct venus_inst *inst) +{ + enum hfi_version ver = inst->core->res->hfi_version; + struct hfi_buffer_requirements bufreq; + int ret; + + if (!inst->num_input_bufs || !inst->num_output_bufs) + return -EINVAL; + + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq); + if (ret) + return ret; + + if (inst->num_output_bufs < bufreq.count_actual || + inst->num_output_bufs < hfi_bufreq_get_count_min(&bufreq, ver)) + return -EINVAL; + + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq); + if (ret) + return ret; + + if (inst->num_input_bufs < bufreq.count_actual || + inst->num_input_bufs < hfi_bufreq_get_count_min(&bufreq, ver)) + return -EINVAL; + + return 0; +} + +static int venc_start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct venus_inst *inst = vb2_get_drv_priv(q); + int ret; + + mutex_lock(&inst->lock); + + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + inst->streamon_out = 1; + else + inst->streamon_cap = 1; + + if (!(inst->streamon_out & inst->streamon_cap)) { + mutex_unlock(&inst->lock); + return 0; + } + + venus_helper_init_instance(inst); + + inst->sequence_cap = 0; + inst->sequence_out = 0; + + ret = venc_pm_get(inst); + if (ret) + goto error; + + ret = venus_pm_acquire_core(inst); + if (ret) + goto put_power; + + ret = venc_pm_put(inst, true); + if (ret) + goto error; + + ret = venc_set_properties(inst); + if (ret) + goto error; + + ret = venc_verify_conf(inst); + if (ret) + goto error; + + ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs, + inst->num_output_bufs, 0); + if (ret) + goto error; + + ret = venus_helper_vb2_start_streaming(inst); + if (ret) + goto error; + + inst->enc_state = VENUS_ENC_STATE_ENCODING; + + mutex_unlock(&inst->lock); + + return 0; + +put_power: + venc_pm_put(inst, false); +error: + venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_QUEUED); + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + inst->streamon_out = 0; + else + inst->streamon_cap = 0; + mutex_unlock(&inst->lock); + return ret; +} + +static void venc_vb2_buf_queue(struct vb2_buffer *vb) +{ + struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + + venc_pm_get_put(inst); + + mutex_lock(&inst->lock); + + if (inst->enc_state == VENUS_ENC_STATE_STOPPED) { + vbuf->sequence = inst->sequence_cap++; + vbuf->field = V4L2_FIELD_NONE; + vb2_set_plane_payload(vb, 0, 0); + v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE); + mutex_unlock(&inst->lock); + return; + } + + venus_helper_vb2_buf_queue(vb); + mutex_unlock(&inst->lock); +} + +static const struct vb2_ops venc_vb2_ops = { + .queue_setup = venc_queue_setup, + .buf_init = venc_buf_init, + .buf_cleanup = venc_buf_cleanup, + .buf_prepare = venus_helper_vb2_buf_prepare, + .start_streaming = venc_start_streaming, + .stop_streaming = venus_helper_vb2_stop_streaming, + .buf_queue = venc_vb2_buf_queue, +}; + +static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type, + u32 tag, u32 bytesused, u32 data_offset, u32 flags, + u32 hfi_flags, u64 timestamp_us) +{ + struct vb2_v4l2_buffer *vbuf; + struct vb2_buffer *vb; + unsigned int type; + + venc_pm_touch(inst); + + if (buf_type == HFI_BUFFER_INPUT) + type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + else + type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + + vbuf = venus_helper_find_buf(inst, type, tag); + if (!vbuf) + return; + + vbuf->flags = flags; + + if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + vb = &vbuf->vb2_buf; + vb2_set_plane_payload(vb, 0, bytesused + data_offset); + vb->planes[0].data_offset = data_offset; + vb->timestamp = timestamp_us * NSEC_PER_USEC; + vbuf->sequence = inst->sequence_cap++; + if ((vbuf->flags & V4L2_BUF_FLAG_LAST) && + inst->enc_state == VENUS_ENC_STATE_DRAIN) { + inst->enc_state = VENUS_ENC_STATE_STOPPED; + } + } else { + vbuf->sequence = inst->sequence_out++; + } + + v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE); +} + +static void venc_event_notify(struct venus_inst *inst, u32 event, + struct hfi_event_data *data) +{ + struct device *dev = inst->core->dev_enc; + + venc_pm_touch(inst); + + if (event == EVT_SESSION_ERROR) { + inst->session_error = true; + venus_helper_vb2_queue_error(inst); + dev_err(dev, "enc: event session error %x\n", inst->error); + } +} + +static const struct hfi_inst_ops venc_hfi_ops = { + .buf_done = venc_buf_done, + .event_notify = venc_event_notify, +}; + +static const struct v4l2_m2m_ops venc_m2m_ops = { + .device_run = venus_helper_m2m_device_run, + .job_abort = venus_helper_m2m_job_abort, +}; + +static int m2m_queue_init(void *priv, struct vb2_queue *src_vq, + struct vb2_queue *dst_vq) +{ + struct venus_inst *inst = priv; + int ret; + + src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; + src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + src_vq->ops = &venc_vb2_ops; + src_vq->mem_ops = &vb2_dma_contig_memops; + src_vq->drv_priv = inst; + src_vq->buf_struct_size = sizeof(struct venus_buffer); + src_vq->allow_zero_bytesused = 1; + src_vq->min_buffers_needed = 1; + src_vq->dev = inst->core->dev; + src_vq->lock = &inst->ctx_q_lock; + if (inst->core->res->hfi_version == HFI_VERSION_1XX) + src_vq->bidirectional = 1; + ret = vb2_queue_init(src_vq); + if (ret) + return ret; + + dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; + dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + dst_vq->ops = &venc_vb2_ops; + dst_vq->mem_ops = &vb2_dma_contig_memops; + dst_vq->drv_priv = inst; + dst_vq->buf_struct_size = sizeof(struct venus_buffer); + dst_vq->allow_zero_bytesused = 1; + dst_vq->min_buffers_needed = 1; + dst_vq->dev = inst->core->dev; + dst_vq->lock = &inst->ctx_q_lock; + return vb2_queue_init(dst_vq); +} + +static void venc_inst_init(struct venus_inst *inst) +{ + inst->fmt_cap = &venc_formats[VENUS_FMT_H264]; + inst->fmt_out = &venc_formats[VENUS_FMT_NV12]; + inst->width = 1280; + inst->height = ALIGN(720, 32); + inst->out_width = 1280; + inst->out_height = 720; + inst->fps = 15; + inst->timeperframe.numerator = 1; + inst->timeperframe.denominator = 15; + inst->hfi_codec = HFI_VIDEO_CODEC_H264; +} + +static int venc_open(struct file *file) +{ + struct venus_core *core = video_drvdata(file); + struct venus_inst *inst; + int ret; + + inst = kzalloc(sizeof(*inst), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + INIT_LIST_HEAD(&inst->dpbbufs); + INIT_LIST_HEAD(&inst->registeredbufs); + INIT_LIST_HEAD(&inst->internalbufs); + INIT_LIST_HEAD(&inst->list); + mutex_init(&inst->lock); + mutex_init(&inst->ctx_q_lock); + + inst->core = core; + inst->session_type = VIDC_SESSION_TYPE_ENC; + inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT; + inst->core_acquired = false; + inst->nonblock = file->f_flags & O_NONBLOCK; + + if (inst->enc_state == VENUS_ENC_STATE_DEINIT) + inst->enc_state = VENUS_ENC_STATE_INIT; + + venus_helper_init_instance(inst); + + ret = venc_ctrl_init(inst); + if (ret) + goto err_free; + + ret = hfi_session_create(inst, &venc_hfi_ops); + if (ret) + goto err_ctrl_deinit; + + venc_inst_init(inst); + + /* + * create m2m device for every instance, the m2m context scheduling + * is made by firmware side so we do not need to care about. + */ + inst->m2m_dev = v4l2_m2m_init(&venc_m2m_ops); + if (IS_ERR(inst->m2m_dev)) { + ret = PTR_ERR(inst->m2m_dev); + goto err_session_destroy; + } + + inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init); + if (IS_ERR(inst->m2m_ctx)) { + ret = PTR_ERR(inst->m2m_ctx); + goto err_m2m_release; + } + + v4l2_fh_init(&inst->fh, core->vdev_enc); + + inst->fh.ctrl_handler = &inst->ctrl_handler; + v4l2_fh_add(&inst->fh); + inst->fh.m2m_ctx = inst->m2m_ctx; + file->private_data = &inst->fh; + + return 0; + +err_m2m_release: + v4l2_m2m_release(inst->m2m_dev); +err_session_destroy: + hfi_session_destroy(inst); +err_ctrl_deinit: + venc_ctrl_deinit(inst); +err_free: + kfree(inst); + return ret; +} + +static int venc_close(struct file *file) +{ + struct venus_inst *inst = to_inst(file); + + venc_pm_get(inst); + + v4l2_m2m_ctx_release(inst->m2m_ctx); + v4l2_m2m_release(inst->m2m_dev); + venc_ctrl_deinit(inst); + hfi_session_destroy(inst); + mutex_destroy(&inst->lock); + mutex_destroy(&inst->ctx_q_lock); + v4l2_fh_del(&inst->fh); + v4l2_fh_exit(&inst->fh); + + inst->enc_state = VENUS_ENC_STATE_DEINIT; + + venc_pm_put(inst, false); + + kfree(inst); + return 0; +} + +static const struct v4l2_file_operations venc_fops = { + .owner = THIS_MODULE, + .open = venc_open, + .release = venc_close, + .unlocked_ioctl = video_ioctl2, + .poll = v4l2_m2m_fop_poll, + .mmap = v4l2_m2m_fop_mmap, +}; + +static int venc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct video_device *vdev; + struct venus_core *core; + int ret; + + if (!dev->parent) + return -EPROBE_DEFER; + + core = dev_get_drvdata(dev->parent); + if (!core) + return -EPROBE_DEFER; + + platform_set_drvdata(pdev, core); + + if (core->pm_ops->venc_get) { + ret = core->pm_ops->venc_get(dev); + if (ret) + return ret; + } + + vdev = video_device_alloc(); + if (!vdev) + return -ENOMEM; + + strscpy(vdev->name, "qcom-venus-encoder", sizeof(vdev->name)); + vdev->release = video_device_release; + vdev->fops = &venc_fops; + vdev->ioctl_ops = &venc_ioctl_ops; + vdev->vfl_dir = VFL_DIR_M2M; + vdev->v4l2_dev = &core->v4l2_dev; + vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; + + ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); + if (ret) + goto err_vdev_release; + + core->vdev_enc = vdev; + core->dev_enc = dev; + + video_set_drvdata(vdev, core); + pm_runtime_set_autosuspend_delay(dev, 2000); + pm_runtime_use_autosuspend(dev); + pm_runtime_enable(dev); + + return 0; + +err_vdev_release: + video_device_release(vdev); + return ret; +} + +static void venc_remove(struct platform_device *pdev) +{ + struct venus_core *core = dev_get_drvdata(pdev->dev.parent); + + video_unregister_device(core->vdev_enc); + pm_runtime_disable(core->dev_enc); + + if (core->pm_ops->venc_put) + core->pm_ops->venc_put(core->dev_enc); +} + +static __maybe_unused int venc_runtime_suspend(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + const struct venus_pm_ops *pm_ops = core->pm_ops; + int ret = 0; + + if (pm_ops->venc_power) + ret = pm_ops->venc_power(dev, POWER_OFF); + + return ret; +} + +static __maybe_unused int venc_runtime_resume(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + const struct venus_pm_ops *pm_ops = core->pm_ops; + int ret = 0; + + if (pm_ops->venc_power) + ret = pm_ops->venc_power(dev, POWER_ON); + + return ret; +} + +static const struct dev_pm_ops venc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(venc_runtime_suspend, venc_runtime_resume, NULL) +}; + +static const struct of_device_id venc_dt_match[] = { + { .compatible = "venus-encoder" }, + { } +}; +MODULE_DEVICE_TABLE(of, venc_dt_match); + +static struct platform_driver qcom_venus_enc_driver = { + .probe = venc_probe, + .remove_new = venc_remove, + .driver = { + .name = "qcom-venus-encoder", + .of_match_table = venc_dt_match, + .pm = &venc_pm_ops, + }, +}; +module_platform_driver(qcom_venus_enc_driver); + +MODULE_ALIAS("platform:qcom-venus-encoder"); +MODULE_DESCRIPTION("Qualcomm Venus video encoder driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/qcom/venus/venc.h b/drivers/media/platform/qcom/venus/venc.h new file mode 100644 index 0000000000..4ea37fdcd9 --- /dev/null +++ b/drivers/media/platform/qcom/venus/venc.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#ifndef __VENUS_VENC_H__ +#define __VENUS_VENC_H__ + +struct venus_inst; + +int venc_ctrl_init(struct venus_inst *inst); +void venc_ctrl_deinit(struct venus_inst *inst); + +#endif diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c new file mode 100644 index 0000000000..d9d2a293f3 --- /dev/null +++ b/drivers/media/platform/qcom/venus/venc_ctrls.c @@ -0,0 +1,642 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + */ +#include <linux/types.h> +#include <media/v4l2-ctrls.h> + +#include "core.h" +#include "venc.h" +#include "helpers.h" + +#define BITRATE_MIN 32000 +#define BITRATE_MAX 160000000 +#define BITRATE_DEFAULT 1000000 +#define BITRATE_DEFAULT_PEAK (BITRATE_DEFAULT * 2) +#define BITRATE_STEP 100 +#define SLICE_BYTE_SIZE_MAX 1024 +#define SLICE_BYTE_SIZE_MIN 1024 +#define SLICE_MB_SIZE_MAX 300 +#define AT_SLICE_BOUNDARY \ + V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY +#define MAX_LTR_FRAME_COUNT 4 + +static int venc_calc_bpframes(u32 gop_size, u32 conseq_b, u32 *bf, u32 *pf) +{ + u32 half = (gop_size - 1) >> 1; + u32 b, p, ratio; + bool found = false; + + if (!gop_size) + return -EINVAL; + + *bf = *pf = 0; + + if (!conseq_b) { + *pf = gop_size - 1; + return 0; + } + + b = p = half; + + for (; b <= gop_size - 1; b++, p--) { + if (b % p) + continue; + + ratio = b / p; + + if (ratio == conseq_b) { + found = true; + break; + } + + if (ratio > conseq_b) + break; + } + + if (!found) + return -EINVAL; + + if (b + p + 1 != gop_size) + return -EINVAL; + + *bf = b; + *pf = p; + + return 0; +} + +static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct venus_inst *inst = ctrl_to_inst(ctrl); + struct venc_controls *ctr = &inst->controls.enc; + struct hfi_enable en = { .enable = 1 }; + struct hfi_bitrate brate; + struct hfi_ltr_use ltr_use; + struct hfi_ltr_mark ltr_mark; + u32 bframes; + u32 ptype; + int ret; + + switch (ctrl->id) { + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + ctr->bitrate_mode = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_BITRATE: + ctr->bitrate = ctrl->val; + mutex_lock(&inst->lock); + if (inst->streamon_out && inst->streamon_cap) { + ptype = HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE; + brate.bitrate = ctr->bitrate; + brate.layer_id = 0; + + ret = hfi_session_set_property(inst, ptype, &brate); + if (ret) { + mutex_unlock(&inst->lock); + return ret; + } + } + mutex_unlock(&inst->lock); + break; + case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: + ctr->bitrate_peak = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: + ctr->h264_entropy_mode = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: + ctr->profile.mpeg4 = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + ctr->profile.h264 = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: + ctr->profile.hevc = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: + ctr->profile.vp8 = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: + ctr->level.mpeg4 = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + ctr->level.h264 = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: + ctr->level.hevc = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: + ctr->h264_i_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: + ctr->h264_p_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: + ctr->h264_b_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: + ctr->h264_min_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP: + ctr->h264_i_min_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP: + ctr->h264_p_min_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP: + ctr->h264_b_min_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: + ctr->h264_max_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP: + ctr->h264_i_max_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP: + ctr->h264_p_max_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP: + ctr->h264_b_max_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: + ctr->hevc_i_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP: + ctr->hevc_p_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP: + ctr->hevc_b_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP: + ctr->hevc_min_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP: + ctr->hevc_i_min_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP: + ctr->hevc_p_min_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP: + ctr->hevc_b_min_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP: + ctr->hevc_max_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP: + ctr->hevc_i_max_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP: + ctr->hevc_p_max_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP: + ctr->hevc_b_max_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: + ctr->multi_slice_mode = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: + ctr->multi_slice_max_bytes = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: + ctr->multi_slice_max_mb = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: + ctr->h264_loop_filter_alpha = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: + ctr->h264_loop_filter_beta = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: + ctr->h264_loop_filter_mode = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEADER_MODE: + ctr->header_mode = ctrl->val; + mutex_lock(&inst->lock); + if (inst->streamon_out && inst->streamon_cap) { + if (ctrl->val == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) + en.enable = 0; + else + en.enable = 1; + ptype = HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER; + ret = hfi_session_set_property(inst, ptype, &en); + if (ret) { + mutex_unlock(&inst->lock); + return ret; + } + } + mutex_unlock(&inst->lock); + break; + case V4L2_CID_MPEG_VIDEO_GOP_SIZE: + ret = venc_calc_bpframes(ctrl->val, ctr->num_b_frames, &bframes, + &ctr->num_p_frames); + if (ret) + return ret; + + ctr->gop_size = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: + ctr->h264_i_period = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: + ctr->vp8_min_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: + ctr->vp8_max_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_B_FRAMES: + ret = venc_calc_bpframes(ctr->gop_size, ctrl->val, &bframes, + &ctr->num_p_frames); + if (ret) + return ret; + + ctr->num_b_frames = bframes; + break; + case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: + mutex_lock(&inst->lock); + if (inst->streamon_out && inst->streamon_cap) { + ptype = HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME; + ret = hfi_session_set_property(inst, ptype, &en); + + if (ret) { + mutex_unlock(&inst->lock); + return ret; + } + } + mutex_unlock(&inst->lock); + break; + case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: + ctr->rc_enable = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY: + ctr->const_quality = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: + ctr->frame_skip_mode = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID: + ctr->base_priority_id = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: + ctr->aud_enable = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_LTR_COUNT: + ctr->ltr_count = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: + mutex_lock(&inst->lock); + if (inst->streamon_out && inst->streamon_cap) { + ptype = HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME; + ltr_mark.mark_frame = ctrl->val; + ret = hfi_session_set_property(inst, ptype, <r_mark); + if (ret) { + mutex_unlock(&inst->lock); + return ret; + } + } + mutex_unlock(&inst->lock); + break; + case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: + mutex_lock(&inst->lock); + if (inst->streamon_out && inst->streamon_cap) { + ptype = HFI_PROPERTY_CONFIG_VENC_USELTRFRAME; + ltr_use.ref_ltr = ctrl->val; + ltr_use.use_constrnt = true; + ltr_use.frames = 0; + ret = hfi_session_set_property(inst, ptype, <r_use); + if (ret) { + mutex_unlock(&inst->lock); + return ret; + } + } + mutex_unlock(&inst->lock); + break; + case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: + ctr->cll = *ctrl->p_new.p_hdr10_cll; + break; + case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: + ctr->mastering = *ctrl->p_new.p_hdr10_mastering; + break; + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: + ctr->intra_refresh_type = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD: + ctr->intra_refresh_period = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: + if (ctr->profile.h264 != V4L2_MPEG_VIDEO_H264_PROFILE_HIGH && + ctr->profile.h264 != V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH) + return -EINVAL; + + /* + * In video firmware, 8x8 transform is supported only for + * high profile(HP) and constrained high profile(CHP). + * If client wants to disable 8x8 transform for HP/CHP, + * it is better to set profile as main profile(MP). + * Because there is no difference between HP and MP + * if we disable 8x8 transform for HP. + */ + + + ctr->h264_8x8_transform = ctrl->val; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int venc_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl) +{ + struct venus_inst *inst = ctrl_to_inst(ctrl); + struct hfi_buffer_requirements bufreq; + enum hfi_version ver = inst->core->res->hfi_version; + int ret; + + switch (ctrl->id) { + case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq); + if (!ret) + ctrl->val = hfi_bufreq_get_count_min(&bufreq, ver); + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct v4l2_ctrl_ops venc_ctrl_ops = { + .s_ctrl = venc_op_s_ctrl, + .g_volatile_ctrl = venc_op_g_volatile_ctrl, +}; + +int venc_ctrl_init(struct venus_inst *inst) +{ + int ret; + struct v4l2_ctrl_hdr10_mastering_display p_hdr10_mastering = { + { 34000, 13250, 7500 }, + { 16000, 34500, 3000 }, 15635, 16450, 10000000, 500, + }; + struct v4l2_ctrl_hdr10_cll_info p_hdr10_cll = { 1000, 400 }; + + ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 59); + if (ret) + return ret; + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_BITRATE_MODE, + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, + ~((1 << V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) | + (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) | + (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)), + V4L2_MPEG_VIDEO_BITRATE_MODE_VBR); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE, + V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC, + 0, V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE, + V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY, + ~((1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE) | + (1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE)), + V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_5, + 0, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_HEVC_PROFILE, + V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10, + ~((1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN) | + (1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE) | + (1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10)), + V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_HEVC_LEVEL, + V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2, + 0, V4L2_MPEG_VIDEO_HEVC_LEVEL_1); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_PROFILE, + V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH, + ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH)), + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_LEVEL, + V4L2_MPEG_VIDEO_H264_LEVEL_5_1, + 0, V4L2_MPEG_VIDEO_H264_LEVEL_1_0); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE, + AT_SLICE_BOUNDARY, + 0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_HEADER_MODE, + V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME, + ~((1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) | + (1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME)), + V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE, + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES, + 0, V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_VP8_PROFILE, + V4L2_MPEG_VIDEO_VP8_PROFILE_3, + 0, V4L2_MPEG_VIDEO_VP8_PROFILE_0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, 4, 11, 1, 4); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_BITRATE, BITRATE_MIN, BITRATE_MAX, + BITRATE_STEP, BITRATE_DEFAULT); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, BITRATE_MIN, BITRATE_MAX, + BITRATE_STEP, BITRATE_DEFAULT_PEAK); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 1, 51, 1, 26); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 1, 51, 1, 28); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP, 1, 51, 1, 30); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 1, 51, 1, 1); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP, 1, 51, 1, 1); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM, 0, 1, 1, 1); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP, 1, 51, 1, 1); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP, 1, 51, 1, 1); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 1, 51, 1, 51); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP, 1, 51, 1, 51); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP, 1, 51, 1, 51); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP, 1, 51, 1, 51); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP, 1, 63, 1, 26); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP, 1, 63, 1, 28); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP, 1, 63, 1, 30); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, 1, 63, 1, 1); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP, 1, 63, 1, 1); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP, 1, 63, 1, 1); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP, 1, 63, 1, 1); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP, 1, 63, 1, 63); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP, 1, 63, 1, 63); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP, 1, 63, 1, 63); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP, 1, 63, 1, 63); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES, SLICE_BYTE_SIZE_MIN, + SLICE_BYTE_SIZE_MAX, 1, SLICE_BYTE_SIZE_MIN); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, 1, + SLICE_MB_SIZE_MAX, 1, 1); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, -6, 6, 1, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, -6, 6, 1, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0, (1 << 16) - 1, 1, 30); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_VPX_MIN_QP, 1, 128, 1, 1); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_VPX_MAX_QP, 1, 128, 1, 128); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_B_FRAMES, 0, 4, 1, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_I_PERIOD, 0, (1 << 16) - 1, 1, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME, 0, 0, 0, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, 0, 1, 1, 1); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY, 0, 100, 1, 0); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE, + V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT, + ~((1 << V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED) | + (1 << V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT)), + V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID, 0, + 6, 1, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_AU_DELIMITER, 0, 1, 1, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES, 0, + ((1 << MAX_LTR_FRAME_COUNT) - 1), 0, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_LTR_COUNT, 0, + MAX_LTR_FRAME_COUNT, 1, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX, 0, + (MAX_LTR_FRAME_COUNT - 1), 1, 0); + + v4l2_ctrl_new_std_compound(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_COLORIMETRY_HDR10_CLL_INFO, + v4l2_ctrl_ptr_create(&p_hdr10_cll)); + + v4l2_ctrl_new_std_compound(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY, + v4l2_ctrl_ptr_create((void *)&p_hdr10_mastering)); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE, + V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_CYCLIC, + 0, V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_RANDOM); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD, 0, + ((4096 * 2304) >> 8), 1, 0); + + ret = inst->ctrl_handler.error; + if (ret) + goto err; + + ret = v4l2_ctrl_handler_setup(&inst->ctrl_handler); + if (ret) + goto err; + + return 0; +err: + v4l2_ctrl_handler_free(&inst->ctrl_handler); + return ret; +} + +void venc_ctrl_deinit(struct venus_inst *inst) +{ + v4l2_ctrl_handler_free(&inst->ctrl_handler); +} |