summaryrefslogtreecommitdiffstats
path: root/drivers/media/platform/verisilicon
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/media/platform/verisilicon
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/media/platform/verisilicon')
-rw-r--r--drivers/media/platform/verisilicon/Kconfig54
-rw-r--r--drivers/media/platform/verisilicon/Makefile41
-rw-r--r--drivers/media/platform/verisilicon/hantro.h493
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c1238
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1.c39
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_h264_dec.c284
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c240
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_regs.h356
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c511
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2.c44
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c627
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_regs.h325
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c1014
-rw-r--r--drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c166
-rw-r--r--drivers/media/platform/verisilicon/hantro_h1_regs.h154
-rw-r--r--drivers/media/platform/verisilicon/hantro_h264.c521
-rw-r--r--drivers/media/platform/verisilicon/hantro_hevc.c279
-rw-r--r--drivers/media/platform/verisilicon/hantro_hw.h543
-rw-r--r--drivers/media/platform/verisilicon/hantro_jpeg.c348
-rw-r--r--drivers/media/platform/verisilicon/hantro_jpeg.h15
-rw-r--r--drivers/media/platform/verisilicon/hantro_mpeg2.c61
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c291
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c1022
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.h34
-rw-r--r--drivers/media/platform/verisilicon/hantro_vp8.c201
-rw-r--r--drivers/media/platform/verisilicon/hantro_vp9.c240
-rw-r--r--drivers/media/platform/verisilicon/hantro_vp9.h102
-rw-r--r--drivers/media/platform/verisilicon/imx8m_vpu_hw.c402
-rw-r--r--drivers/media/platform/verisilicon/rockchip_av1_entropymode.c4424
-rw-r--r--drivers/media/platform/verisilicon/rockchip_av1_entropymode.h272
-rw-r--r--drivers/media/platform/verisilicon/rockchip_av1_filmgrain.c401
-rw-r--r--drivers/media/platform/verisilicon/rockchip_av1_filmgrain.h36
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c491
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c197
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c248
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c600
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_regs.h600
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c2232
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_regs.h477
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu_hw.c814
-rw-r--r--drivers/media/platform/verisilicon/sama5d4_vdec_hw.c128
-rw-r--r--drivers/media/platform/verisilicon/sunxi_vpu_hw.c129
42 files changed, 20694 insertions, 0 deletions
diff --git a/drivers/media/platform/verisilicon/Kconfig b/drivers/media/platform/verisilicon/Kconfig
new file mode 100644
index 0000000000..e65b836b9d
--- /dev/null
+++ b/drivers/media/platform/verisilicon/Kconfig
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Verisilicon media platform drivers"
+
+config VIDEO_HANTRO
+ tristate "Hantro VPU driver"
+ depends on ARCH_MXC || ARCH_ROCKCHIP || ARCH_AT91 || ARCH_SUNXI || COMPILE_TEST
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER_REQUEST_API
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ select V4L2_H264
+ select V4L2_VP9
+ help
+ Support for the Hantro IP based Video Processing Units present on
+ Rockchip and NXP i.MX8M SoCs, which accelerate video and image
+ encoding and decoding.
+ To compile this driver as a module, choose M here: the module
+ will be called hantro-vpu.
+
+config VIDEO_HANTRO_IMX8M
+ bool "Hantro VPU i.MX8M support"
+ depends on VIDEO_HANTRO
+ depends on ARCH_MXC || COMPILE_TEST
+ default y
+ help
+ Enable support for i.MX8M SoCs.
+
+config VIDEO_HANTRO_SAMA5D4
+ bool "Hantro VDEC SAMA5D4 support"
+ depends on VIDEO_HANTRO
+ depends on ARCH_AT91 || COMPILE_TEST
+ default y
+ help
+ Enable support for Microchip SAMA5D4 SoCs.
+
+config VIDEO_HANTRO_ROCKCHIP
+ bool "Hantro VPU Rockchip support"
+ depends on VIDEO_HANTRO
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ default y
+ help
+ Enable support for RK3288, RK3328, and RK3399 SoCs.
+
+config VIDEO_HANTRO_SUNXI
+ bool "Hantro VPU Allwinner support"
+ depends on VIDEO_HANTRO
+ depends on ARCH_SUNXI || COMPILE_TEST
+ default y
+ help
+ Enable support for H6 SoC.
diff --git a/drivers/media/platform/verisilicon/Makefile b/drivers/media/platform/verisilicon/Makefile
new file mode 100644
index 0000000000..6ad2ef8859
--- /dev/null
+++ b/drivers/media/platform/verisilicon/Makefile
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_VIDEO_HANTRO) += hantro-vpu.o
+
+hantro-vpu-y += \
+ hantro_drv.o \
+ hantro_v4l2.o \
+ hantro_postproc.o \
+ hantro_h1_jpeg_enc.o \
+ hantro_g1.o \
+ hantro_g1_h264_dec.o \
+ hantro_g1_mpeg2_dec.o \
+ hantro_g1_vp8_dec.o \
+ hantro_g2.o \
+ hantro_g2_hevc_dec.o \
+ hantro_g2_vp9_dec.o \
+ rockchip_vpu2_hw_jpeg_enc.o \
+ rockchip_vpu2_hw_h264_dec.o \
+ rockchip_vpu2_hw_mpeg2_dec.o \
+ rockchip_vpu2_hw_vp8_dec.o \
+ rockchip_vpu981_hw_av1_dec.o \
+ rockchip_av1_filmgrain.o \
+ rockchip_av1_entropymode.o \
+ hantro_jpeg.o \
+ hantro_h264.o \
+ hantro_hevc.o \
+ hantro_mpeg2.o \
+ hantro_vp8.o \
+ hantro_vp9.o
+
+hantro-vpu-$(CONFIG_VIDEO_HANTRO_IMX8M) += \
+ imx8m_vpu_hw.o
+
+hantro-vpu-$(CONFIG_VIDEO_HANTRO_SAMA5D4) += \
+ sama5d4_vdec_hw.o
+
+hantro-vpu-$(CONFIG_VIDEO_HANTRO_ROCKCHIP) += \
+ rockchip_vpu_hw.o
+
+hantro-vpu-$(CONFIG_VIDEO_HANTRO_SUNXI) += \
+ sunxi_vpu_hw.o
diff --git a/drivers/media/platform/verisilicon/hantro.h b/drivers/media/platform/verisilicon/hantro.h
new file mode 100644
index 0000000000..77aee94895
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro.h
@@ -0,0 +1,493 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef HANTRO_H_
+#define HANTRO_H_
+
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/wait.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "hantro_hw.h"
+
+struct hantro_ctx;
+struct hantro_codec_ops;
+struct hantro_postproc_ops;
+
+#define HANTRO_JPEG_ENCODER BIT(0)
+#define HANTRO_ENCODERS 0x0000ffff
+#define HANTRO_MPEG2_DECODER BIT(16)
+#define HANTRO_VP8_DECODER BIT(17)
+#define HANTRO_H264_DECODER BIT(18)
+#define HANTRO_HEVC_DECODER BIT(19)
+#define HANTRO_VP9_DECODER BIT(20)
+#define HANTRO_AV1_DECODER BIT(21)
+#define HANTRO_DECODERS 0xffff0000
+
+/**
+ * struct hantro_irq - irq handler and name
+ *
+ * @name: irq name for device tree lookup
+ * @handler: interrupt handler
+ */
+struct hantro_irq {
+ const char *name;
+ irqreturn_t (*handler)(int irq, void *priv);
+};
+
+/**
+ * struct hantro_variant - information about VPU hardware variant
+ *
+ * @enc_offset: Offset from VPU base to encoder registers.
+ * @dec_offset: Offset from VPU base to decoder registers.
+ * @enc_fmts: Encoder formats.
+ * @num_enc_fmts: Number of encoder formats.
+ * @dec_fmts: Decoder formats.
+ * @num_dec_fmts: Number of decoder formats.
+ * @postproc_fmts: Post-processor formats.
+ * @num_postproc_fmts: Number of post-processor formats.
+ * @postproc_ops: Post-processor ops.
+ * @codec: Supported codecs
+ * @codec_ops: Codec ops.
+ * @init: Initialize hardware, optional.
+ * @runtime_resume: reenable hardware after power gating, optional.
+ * @irqs: array of irq names and interrupt handlers
+ * @num_irqs: number of irqs in the array
+ * @clk_names: array of clock names
+ * @num_clocks: number of clocks in the array
+ * @reg_names: array of register range names
+ * @num_regs: number of register range names in the array
+ * @double_buffer: core needs double buffering
+ * @legacy_regs: core uses legacy register set
+ * @late_postproc: postproc must be set up at the end of the job
+ */
+struct hantro_variant {
+ unsigned int enc_offset;
+ unsigned int dec_offset;
+ const struct hantro_fmt *enc_fmts;
+ unsigned int num_enc_fmts;
+ const struct hantro_fmt *dec_fmts;
+ unsigned int num_dec_fmts;
+ const struct hantro_fmt *postproc_fmts;
+ unsigned int num_postproc_fmts;
+ const struct hantro_postproc_ops *postproc_ops;
+ unsigned int codec;
+ const struct hantro_codec_ops *codec_ops;
+ int (*init)(struct hantro_dev *vpu);
+ int (*runtime_resume)(struct hantro_dev *vpu);
+ const struct hantro_irq *irqs;
+ int num_irqs;
+ const char * const *clk_names;
+ int num_clocks;
+ const char * const *reg_names;
+ int num_regs;
+ unsigned int double_buffer : 1;
+ unsigned int legacy_regs : 1;
+ unsigned int late_postproc : 1;
+};
+
+/**
+ * enum hantro_codec_mode - codec operating mode.
+ * @HANTRO_MODE_NONE: No operating mode. Used for RAW video formats.
+ * @HANTRO_MODE_JPEG_ENC: JPEG encoder.
+ * @HANTRO_MODE_H264_DEC: H264 decoder.
+ * @HANTRO_MODE_MPEG2_DEC: MPEG-2 decoder.
+ * @HANTRO_MODE_VP8_DEC: VP8 decoder.
+ * @HANTRO_MODE_HEVC_DEC: HEVC decoder.
+ * @HANTRO_MODE_VP9_DEC: VP9 decoder.
+ * @HANTRO_MODE_AV1_DEC: AV1 decoder
+ */
+enum hantro_codec_mode {
+ HANTRO_MODE_NONE = -1,
+ HANTRO_MODE_JPEG_ENC,
+ HANTRO_MODE_H264_DEC,
+ HANTRO_MODE_MPEG2_DEC,
+ HANTRO_MODE_VP8_DEC,
+ HANTRO_MODE_HEVC_DEC,
+ HANTRO_MODE_VP9_DEC,
+ HANTRO_MODE_AV1_DEC,
+};
+
+/*
+ * struct hantro_ctrl - helper type to declare supported controls
+ * @codec: codec id this control belong to (HANTRO_JPEG_ENCODER, etc.)
+ * @cfg: control configuration
+ */
+struct hantro_ctrl {
+ unsigned int codec;
+ struct v4l2_ctrl_config cfg;
+};
+
+/*
+ * struct hantro_func - Hantro VPU functionality
+ *
+ * @id: processing functionality ID (can be
+ * %MEDIA_ENT_F_PROC_VIDEO_ENCODER or
+ * %MEDIA_ENT_F_PROC_VIDEO_DECODER)
+ * @vdev: &struct video_device that exposes the encoder or
+ * decoder functionality
+ * @source_pad: &struct media_pad with the source pad.
+ * @sink: &struct media_entity pointer with the sink entity
+ * @sink_pad: &struct media_pad with the sink pad.
+ * @proc: &struct media_entity pointer with the M2M device itself.
+ * @proc_pads: &struct media_pad with the @proc pads.
+ * @intf_devnode: &struct media_intf devnode pointer with the interface
+ * with controls the M2M device.
+ *
+ * Contains everything needed to attach the video device to the media device.
+ */
+struct hantro_func {
+ unsigned int id;
+ struct video_device vdev;
+ struct media_pad source_pad;
+ struct media_entity sink;
+ struct media_pad sink_pad;
+ struct media_entity proc;
+ struct media_pad proc_pads[2];
+ struct media_intf_devnode *intf_devnode;
+};
+
+static inline struct hantro_func *
+hantro_vdev_to_func(struct video_device *vdev)
+{
+ return container_of(vdev, struct hantro_func, vdev);
+}
+
+/**
+ * struct hantro_dev - driver data
+ * @v4l2_dev: V4L2 device to register video devices for.
+ * @m2m_dev: mem2mem device associated to this device.
+ * @mdev: media device associated to this device.
+ * @encoder: encoder functionality.
+ * @decoder: decoder functionality.
+ * @pdev: Pointer to VPU platform device.
+ * @dev: Pointer to device for convenient logging using
+ * dev_ macros.
+ * @clocks: Array of clock handles.
+ * @resets: Array of reset handles.
+ * @reg_bases: Mapped addresses of VPU registers.
+ * @enc_base: Mapped address of VPU encoder register for convenience.
+ * @dec_base: Mapped address of VPU decoder register for convenience.
+ * @ctrl_base: Mapped address of VPU control block.
+ * @vpu_mutex: Mutex to synchronize V4L2 calls.
+ * @irqlock: Spinlock to synchronize access to data structures
+ * shared with interrupt handlers.
+ * @variant: Hardware variant-specific parameters.
+ * @watchdog_work: Delayed work for hardware timeout handling.
+ */
+struct hantro_dev {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct media_device mdev;
+ struct hantro_func *encoder;
+ struct hantro_func *decoder;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct clk_bulk_data *clocks;
+ struct reset_control *resets;
+ void __iomem **reg_bases;
+ void __iomem *enc_base;
+ void __iomem *dec_base;
+ void __iomem *ctrl_base;
+
+ struct mutex vpu_mutex; /* video_device lock */
+ spinlock_t irqlock;
+ const struct hantro_variant *variant;
+ struct delayed_work watchdog_work;
+};
+
+/**
+ * struct hantro_ctx - Context (instance) private data.
+ *
+ * @dev: VPU driver data to which the context belongs.
+ * @fh: V4L2 file handler.
+ * @is_encoder: Decoder or encoder context?
+ *
+ * @sequence_cap: Sequence counter for capture queue
+ * @sequence_out: Sequence counter for output queue
+ *
+ * @vpu_src_fmt: Descriptor of active source format.
+ * @src_fmt: V4L2 pixel format of active source format.
+ * @vpu_dst_fmt: Descriptor of active destination format.
+ * @dst_fmt: V4L2 pixel format of active destination format.
+ *
+ * @ctrl_handler: Control handler used to register controls.
+ * @jpeg_quality: User-specified JPEG compression quality.
+ * @bit_depth: Bit depth of current frame
+ * @need_postproc: Set to true if the bitstream features require to
+ * use the post-processor.
+ *
+ * @codec_ops: Set of operations related to codec mode.
+ * @postproc: Post-processing context.
+ * @h264_dec: H.264-decoding context.
+ * @jpeg_enc: JPEG-encoding context.
+ * @mpeg2_dec: MPEG-2-decoding context.
+ * @vp8_dec: VP8-decoding context.
+ * @hevc_dec: HEVC-decoding context.
+ * @vp9_dec: VP9-decoding context.
+ * @av1_dec: AV1-decoding context.
+ */
+struct hantro_ctx {
+ struct hantro_dev *dev;
+ struct v4l2_fh fh;
+ bool is_encoder;
+
+ u32 sequence_cap;
+ u32 sequence_out;
+
+ const struct hantro_fmt *vpu_src_fmt;
+ struct v4l2_pix_format_mplane src_fmt;
+ const struct hantro_fmt *vpu_dst_fmt;
+ struct v4l2_pix_format_mplane dst_fmt;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ int jpeg_quality;
+ int bit_depth;
+
+ const struct hantro_codec_ops *codec_ops;
+ struct hantro_postproc_ctx postproc;
+ bool need_postproc;
+
+ /* Specific for particular codec modes. */
+ union {
+ struct hantro_h264_dec_hw_ctx h264_dec;
+ struct hantro_mpeg2_dec_hw_ctx mpeg2_dec;
+ struct hantro_vp8_dec_hw_ctx vp8_dec;
+ struct hantro_hevc_dec_hw_ctx hevc_dec;
+ struct hantro_vp9_dec_hw_ctx vp9_dec;
+ struct hantro_av1_dec_hw_ctx av1_dec;
+ };
+};
+
+/**
+ * struct hantro_fmt - information about supported video formats.
+ * @name: Human readable name of the format.
+ * @fourcc: FourCC code of the format. See V4L2_PIX_FMT_*.
+ * @codec_mode: Codec mode related to this format. See
+ * enum hantro_codec_mode.
+ * @header_size: Optional header size. Currently used by JPEG encoder.
+ * @max_depth: Maximum depth, for bitstream formats
+ * @enc_fmt: Format identifier for encoder registers.
+ * @frmsize: Supported range of frame sizes (only for bitstream formats).
+ * @postprocessed: Indicates if this format needs the post-processor.
+ * @match_depth: Indicates if format bit depth must match video bit depth
+ */
+struct hantro_fmt {
+ char *name;
+ u32 fourcc;
+ enum hantro_codec_mode codec_mode;
+ int header_size;
+ int max_depth;
+ enum hantro_enc_fmt enc_fmt;
+ struct v4l2_frmsize_stepwise frmsize;
+ bool postprocessed;
+ bool match_depth;
+};
+
+struct hantro_reg {
+ u32 base;
+ u32 shift;
+ u32 mask;
+};
+
+struct hantro_postproc_regs {
+ struct hantro_reg pipeline_en;
+ struct hantro_reg max_burst;
+ struct hantro_reg clk_gate;
+ struct hantro_reg out_swap32;
+ struct hantro_reg out_endian;
+ struct hantro_reg out_luma_base;
+ struct hantro_reg input_width;
+ struct hantro_reg input_height;
+ struct hantro_reg output_width;
+ struct hantro_reg output_height;
+ struct hantro_reg input_fmt;
+ struct hantro_reg output_fmt;
+ struct hantro_reg orig_width;
+ struct hantro_reg display_width;
+};
+
+struct hantro_vp9_decoded_buffer_info {
+ /* Info needed when the decoded frame serves as a reference frame. */
+ unsigned short width;
+ unsigned short height;
+ u32 bit_depth : 4;
+};
+
+struct hantro_decoded_buffer {
+ /* Must be the first field in this struct. */
+ struct v4l2_m2m_buffer base;
+
+ union {
+ struct hantro_vp9_decoded_buffer_info vp9;
+ };
+};
+
+/* Logging helpers */
+
+/**
+ * DOC: hantro_debug: Module parameter to control level of debugging messages.
+ *
+ * Level of debugging messages can be controlled by bits of
+ * module parameter called "debug". Meaning of particular
+ * bits is as follows:
+ *
+ * bit 0 - global information: mode, size, init, release
+ * bit 1 - each run start/result information
+ * bit 2 - contents of small controls from userspace
+ * bit 3 - contents of big controls from userspace
+ * bit 4 - detail fmt, ctrl, buffer q/dq information
+ * bit 5 - detail function enter/leave trace information
+ * bit 6 - register write/read information
+ */
+extern int hantro_debug;
+
+#define vpu_debug(level, fmt, args...) \
+ do { \
+ if (hantro_debug & BIT(level)) \
+ pr_info("%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+
+#define vpu_err(fmt, args...) \
+ pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
+
+/* Structure access helpers. */
+static __always_inline struct hantro_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct hantro_ctx, fh);
+}
+
+/* Register accessors. */
+static __always_inline void vepu_write_relaxed(struct hantro_dev *vpu,
+ u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel_relaxed(val, vpu->enc_base + reg);
+}
+
+static __always_inline void vepu_write(struct hantro_dev *vpu, u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel(val, vpu->enc_base + reg);
+}
+
+static __always_inline u32 vepu_read(struct hantro_dev *vpu, u32 reg)
+{
+ u32 val = readl(vpu->enc_base + reg);
+
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ return val;
+}
+
+static __always_inline void vdpu_write_relaxed(struct hantro_dev *vpu,
+ u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel_relaxed(val, vpu->dec_base + reg);
+}
+
+static __always_inline void vdpu_write(struct hantro_dev *vpu, u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel(val, vpu->dec_base + reg);
+}
+
+static __always_inline void hantro_write_addr(struct hantro_dev *vpu,
+ unsigned long offset,
+ dma_addr_t addr)
+{
+ vdpu_write(vpu, addr & 0xffffffff, offset);
+}
+
+static __always_inline u32 vdpu_read(struct hantro_dev *vpu, u32 reg)
+{
+ u32 val = readl(vpu->dec_base + reg);
+
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ return val;
+}
+
+static __always_inline u32 vdpu_read_mask(struct hantro_dev *vpu,
+ const struct hantro_reg *reg,
+ u32 val)
+{
+ u32 v;
+
+ v = vdpu_read(vpu, reg->base);
+ v &= ~(reg->mask << reg->shift);
+ v |= ((val & reg->mask) << reg->shift);
+ return v;
+}
+
+static __always_inline void hantro_reg_write(struct hantro_dev *vpu,
+ const struct hantro_reg *reg,
+ u32 val)
+{
+ vdpu_write(vpu, vdpu_read_mask(vpu, reg, val), reg->base);
+}
+
+static __always_inline void hantro_reg_write_relaxed(struct hantro_dev *vpu,
+ const struct hantro_reg *reg,
+ u32 val)
+{
+ vdpu_write_relaxed(vpu, vdpu_read_mask(vpu, reg, val), reg->base);
+}
+
+void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id);
+dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts);
+
+static inline struct vb2_v4l2_buffer *
+hantro_get_src_buf(struct hantro_ctx *ctx)
+{
+ return v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+}
+
+static inline struct vb2_v4l2_buffer *
+hantro_get_dst_buf(struct hantro_ctx *ctx)
+{
+ return v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+}
+
+bool hantro_needs_postproc(const struct hantro_ctx *ctx,
+ const struct hantro_fmt *fmt);
+
+static inline dma_addr_t
+hantro_get_dec_buf_addr(struct hantro_ctx *ctx, struct vb2_buffer *vb)
+{
+ if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
+ return ctx->postproc.dec_q[vb->index].dma;
+ return vb2_dma_contig_plane_dma_addr(vb, 0);
+}
+
+static inline struct hantro_decoded_buffer *
+vb2_to_hantro_decoded_buf(struct vb2_buffer *buf)
+{
+ return container_of(buf, struct hantro_decoded_buffer, base.vb.vb2_buf);
+}
+
+void hantro_postproc_disable(struct hantro_ctx *ctx);
+void hantro_postproc_enable(struct hantro_ctx *ctx);
+void hantro_postproc_free(struct hantro_ctx *ctx);
+int hantro_postproc_alloc(struct hantro_ctx *ctx);
+int hanto_postproc_enum_framesizes(struct hantro_ctx *ctx,
+ struct v4l2_frmsizeenum *fsize);
+
+#endif /* HANTRO_H_ */
diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
new file mode 100644
index 0000000000..1874c97608
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_drv.c
@@ -0,0 +1,1238 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Collabora, Ltd.
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "hantro_v4l2.h"
+#include "hantro.h"
+#include "hantro_hw.h"
+
+#define DRIVER_NAME "hantro-vpu"
+
+int hantro_debug;
+module_param_named(debug, hantro_debug, int, 0644);
+MODULE_PARM_DESC(debug,
+ "Debug level - higher value produces more verbose messages");
+
+void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id)
+{
+ struct v4l2_ctrl *ctrl;
+
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, id);
+ return ctrl ? ctrl->p_cur.p : NULL;
+}
+
+dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
+{
+ struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
+ struct vb2_buffer *buf;
+
+ buf = vb2_find_buffer(q, ts);
+ if (!buf)
+ return 0;
+ return hantro_get_dec_buf_addr(ctx, buf);
+}
+
+static const struct v4l2_event hantro_eos_event = {
+ .type = V4L2_EVENT_EOS
+};
+
+static void hantro_job_finish_no_pm(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ enum vb2_buffer_state result)
+{
+ struct vb2_v4l2_buffer *src, *dst;
+
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ if (WARN_ON(!src))
+ return;
+ if (WARN_ON(!dst))
+ return;
+
+ src->sequence = ctx->sequence_out++;
+ dst->sequence = ctx->sequence_cap++;
+
+ if (v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, src)) {
+ dst->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
+ v4l2_m2m_mark_stopped(ctx->fh.m2m_ctx);
+ }
+
+ v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
+ result);
+}
+
+static void hantro_job_finish(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ enum vb2_buffer_state result)
+{
+ pm_runtime_mark_last_busy(vpu->dev);
+ pm_runtime_put_autosuspend(vpu->dev);
+
+ clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
+
+ hantro_job_finish_no_pm(vpu, ctx, result);
+}
+
+void hantro_irq_done(struct hantro_dev *vpu,
+ enum vb2_buffer_state result)
+{
+ struct hantro_ctx *ctx =
+ v4l2_m2m_get_curr_priv(vpu->m2m_dev);
+
+ /*
+ * If cancel_delayed_work returns false
+ * the timeout expired. The watchdog is running,
+ * and will take care of finishing the job.
+ */
+ if (cancel_delayed_work(&vpu->watchdog_work)) {
+ if (result == VB2_BUF_STATE_DONE && ctx->codec_ops->done)
+ ctx->codec_ops->done(ctx);
+ hantro_job_finish(vpu, ctx, result);
+ }
+}
+
+void hantro_watchdog(struct work_struct *work)
+{
+ struct hantro_dev *vpu;
+ struct hantro_ctx *ctx;
+
+ vpu = container_of(to_delayed_work(work),
+ struct hantro_dev, watchdog_work);
+ ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
+ if (ctx) {
+ vpu_err("frame processing timed out!\n");
+ if (ctx->codec_ops->reset)
+ ctx->codec_ops->reset(ctx);
+ hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
+ }
+}
+
+void hantro_start_prepare_run(struct hantro_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *src_buf;
+
+ src_buf = hantro_get_src_buf(ctx);
+ v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+
+ if (!ctx->is_encoder && !ctx->dev->variant->late_postproc) {
+ if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
+ hantro_postproc_enable(ctx);
+ else
+ hantro_postproc_disable(ctx);
+ }
+}
+
+void hantro_end_prepare_run(struct hantro_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *src_buf;
+
+ if (!ctx->is_encoder && ctx->dev->variant->late_postproc) {
+ if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
+ hantro_postproc_enable(ctx);
+ else
+ hantro_postproc_disable(ctx);
+ }
+
+ src_buf = hantro_get_src_buf(ctx);
+ v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+
+ /* Kick the watchdog. */
+ schedule_delayed_work(&ctx->dev->watchdog_work,
+ msecs_to_jiffies(2000));
+}
+
+static void device_run(void *priv)
+{
+ struct hantro_ctx *ctx = priv;
+ struct vb2_v4l2_buffer *src, *dst;
+ int ret;
+
+ src = hantro_get_src_buf(ctx);
+ dst = hantro_get_dst_buf(ctx);
+
+ ret = pm_runtime_resume_and_get(ctx->dev->dev);
+ if (ret < 0)
+ goto err_cancel_job;
+
+ ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
+ if (ret)
+ goto err_cancel_job;
+
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
+
+ if (ctx->codec_ops->run(ctx))
+ goto err_cancel_job;
+
+ return;
+
+err_cancel_job:
+ hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
+}
+
+static const struct v4l2_m2m_ops vpu_m2m_ops = {
+ .device_run = device_run,
+};
+
+static int
+queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct hantro_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &hantro_queue_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+
+ /*
+ * Driver does mostly sequential access, so sacrifice TLB efficiency
+ * for faster allocation. Also, no CPU access on the source queue,
+ * so no kernel mapping needed.
+ */
+ src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->vpu_mutex;
+ src_vq->dev = ctx->dev->v4l2_dev.dev;
+ src_vq->supports_requests = true;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->bidirectional = true;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
+ /*
+ * The Kernel needs access to the JPEG destination buffer for the
+ * JPEG encoder to fill in the JPEG headers.
+ */
+ if (!ctx->is_encoder)
+ dst_vq->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &hantro_queue_ops;
+ dst_vq->buf_struct_size = sizeof(struct hantro_decoded_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->vpu_mutex;
+ dst_vq->dev = ctx->dev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
+{
+ if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) {
+ const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps;
+
+ if (sps->chroma_format_idc > 1)
+ /* Only 4:0:0 and 4:2:0 are supported */
+ return -EINVAL;
+ if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
+ /* Luma and chroma bit depth mismatch */
+ return -EINVAL;
+ if (sps->bit_depth_luma_minus8 != 0)
+ /* Only 8-bit is supported */
+ return -EINVAL;
+ } else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) {
+ const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
+
+ if (sps->bit_depth_luma_minus8 != 0 && sps->bit_depth_luma_minus8 != 2)
+ /* Only 8-bit and 10-bit are supported */
+ return -EINVAL;
+ } else if (ctrl->id == V4L2_CID_STATELESS_VP9_FRAME) {
+ const struct v4l2_ctrl_vp9_frame *dec_params = ctrl->p_new.p_vp9_frame;
+
+ /* We only support profile 0 */
+ if (dec_params->profile != 0)
+ return -EINVAL;
+ } else if (ctrl->id == V4L2_CID_STATELESS_AV1_SEQUENCE) {
+ const struct v4l2_ctrl_av1_sequence *sequence = ctrl->p_new.p_av1_sequence;
+
+ if (sequence->bit_depth != 8 && sequence->bit_depth != 10)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hantro_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hantro_ctx *ctx;
+
+ ctx = container_of(ctrl->handler,
+ struct hantro_ctx, ctrl_handler);
+
+ vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ ctx->jpeg_quality = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hantro_vp9_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hantro_ctx *ctx;
+
+ ctx = container_of(ctrl->handler,
+ struct hantro_ctx, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_STATELESS_VP9_FRAME: {
+ int bit_depth = ctrl->p_new.p_vp9_frame->bit_depth;
+
+ if (ctx->bit_depth == bit_depth)
+ return 0;
+
+ return hantro_reset_raw_fmt(ctx, bit_depth, HANTRO_AUTO_POSTPROC);
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hantro_hevc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hantro_ctx *ctx;
+
+ ctx = container_of(ctrl->handler,
+ struct hantro_ctx, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_STATELESS_HEVC_SPS: {
+ const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
+ int bit_depth = sps->bit_depth_luma_minus8 + 8;
+
+ if (ctx->bit_depth == bit_depth)
+ return 0;
+
+ return hantro_reset_raw_fmt(ctx, bit_depth, HANTRO_AUTO_POSTPROC);
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hantro_av1_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hantro_ctx *ctx;
+
+ ctx = container_of(ctrl->handler,
+ struct hantro_ctx, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_STATELESS_AV1_SEQUENCE:
+ {
+ int bit_depth = ctrl->p_new.p_av1_sequence->bit_depth;
+ bool need_postproc = HANTRO_AUTO_POSTPROC;
+
+ if (ctrl->p_new.p_av1_sequence->flags
+ & V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT)
+ need_postproc = HANTRO_FORCE_POSTPROC;
+
+ if (ctx->bit_depth == bit_depth &&
+ ctx->need_postproc == need_postproc)
+ return 0;
+
+ return hantro_reset_raw_fmt(ctx, bit_depth, need_postproc);
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops hantro_ctrl_ops = {
+ .try_ctrl = hantro_try_ctrl,
+};
+
+static const struct v4l2_ctrl_ops hantro_jpeg_ctrl_ops = {
+ .s_ctrl = hantro_jpeg_s_ctrl,
+};
+
+static const struct v4l2_ctrl_ops hantro_vp9_ctrl_ops = {
+ .s_ctrl = hantro_vp9_s_ctrl,
+};
+
+static const struct v4l2_ctrl_ops hantro_hevc_ctrl_ops = {
+ .try_ctrl = hantro_try_ctrl,
+ .s_ctrl = hantro_hevc_s_ctrl,
+};
+
+static const struct v4l2_ctrl_ops hantro_av1_ctrl_ops = {
+ .try_ctrl = hantro_try_ctrl,
+ .s_ctrl = hantro_av1_s_ctrl,
+};
+
+#define HANTRO_JPEG_ACTIVE_MARKERS (V4L2_JPEG_ACTIVE_MARKER_APP0 | \
+ V4L2_JPEG_ACTIVE_MARKER_COM | \
+ V4L2_JPEG_ACTIVE_MARKER_DQT | \
+ V4L2_JPEG_ACTIVE_MARKER_DHT)
+
+static const struct hantro_ctrl controls[] = {
+ {
+ .codec = HANTRO_JPEG_ENCODER,
+ .cfg = {
+ .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ .min = 5,
+ .max = 100,
+ .step = 1,
+ .def = 50,
+ .ops = &hantro_jpeg_ctrl_ops,
+ },
+ }, {
+ .codec = HANTRO_JPEG_ENCODER,
+ .cfg = {
+ .id = V4L2_CID_JPEG_ACTIVE_MARKER,
+ .max = HANTRO_JPEG_ACTIVE_MARKERS,
+ .def = HANTRO_JPEG_ACTIVE_MARKERS,
+ /*
+ * Changing the set of active markers/segments also
+ * messes up the alignment of the JPEG header, which
+ * is needed to allow the hardware to write directly
+ * to the output buffer. Implementing this introduces
+ * a lot of complexity for little gain, as the markers
+ * enabled is already the minimum required set.
+ */
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ },
+ }, {
+ .codec = HANTRO_MPEG2_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_MPEG2_SEQUENCE,
+ },
+ }, {
+ .codec = HANTRO_MPEG2_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_MPEG2_PICTURE,
+ },
+ }, {
+ .codec = HANTRO_MPEG2_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_MPEG2_QUANTISATION,
+ },
+ }, {
+ .codec = HANTRO_VP8_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_VP8_FRAME,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SPS,
+ .ops = &hantro_ctrl_ops,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_PPS,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_DECODE_MODE,
+ .min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ .def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ .max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_START_CODE,
+ .min = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ .def = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ .max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .menu_skip_mask =
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
+ .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
+ }
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_DECODE_MODE,
+ .min = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+ .max = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+ .def = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_START_CODE,
+ .min = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+ .max = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+ .def = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
+ .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
+ .def = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
+ .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_SPS,
+ .ops = &hantro_hevc_ctrl_ops,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_PPS,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_DECODE_PARAMS,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX,
+ },
+ }, {
+ .codec = HANTRO_VP9_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_VP9_FRAME,
+ .ops = &hantro_vp9_ctrl_ops,
+ },
+ }, {
+ .codec = HANTRO_VP9_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_VP9_COMPRESSED_HDR,
+ },
+ }, {
+ .codec = HANTRO_AV1_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_AV1_FRAME,
+ },
+ }, {
+ .codec = HANTRO_AV1_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY,
+ .dims = { V4L2_AV1_MAX_TILE_COUNT },
+ },
+ }, {
+ .codec = HANTRO_AV1_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_AV1_SEQUENCE,
+ .ops = &hantro_av1_ctrl_ops,
+ },
+ }, {
+ .codec = HANTRO_AV1_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_AV1_FILM_GRAIN,
+ },
+ },
+};
+
+static int hantro_ctrls_setup(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ int allowed_codecs)
+{
+ int i, num_ctrls = ARRAY_SIZE(controls);
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, num_ctrls);
+
+ for (i = 0; i < num_ctrls; i++) {
+ if (!(allowed_codecs & controls[i].codec))
+ continue;
+
+ v4l2_ctrl_new_custom(&ctx->ctrl_handler,
+ &controls[i].cfg, NULL);
+ if (ctx->ctrl_handler.error) {
+ vpu_err("Adding control (%d) failed %d\n",
+ controls[i].cfg.id,
+ ctx->ctrl_handler.error);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return ctx->ctrl_handler.error;
+ }
+ }
+ return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+}
+
+/*
+ * V4L2 file operations.
+ */
+
+static int hantro_open(struct file *filp)
+{
+ struct hantro_dev *vpu = video_drvdata(filp);
+ struct video_device *vdev = video_devdata(filp);
+ struct hantro_func *func = hantro_vdev_to_func(vdev);
+ struct hantro_ctx *ctx;
+ int allowed_codecs, ret;
+
+ /*
+ * We do not need any extra locking here, because we operate only
+ * on local data here, except reading few fields from dev, which
+ * do not change through device's lifetime (which is guaranteed by
+ * reference on module from open()) and V4L2 internal objects (such
+ * as vdev and ctx->fh), which have proper locking done in respective
+ * helper functions used here.
+ */
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = vpu;
+ if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
+ allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS;
+ ctx->is_encoder = true;
+ } else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
+ allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
+ ctx->is_encoder = false;
+ } else {
+ ret = -ENODEV;
+ goto err_ctx_free;
+ }
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx, queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err_ctx_free;
+ }
+
+ v4l2_fh_init(&ctx->fh, vdev);
+ filp->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ hantro_reset_fmts(ctx);
+
+ ret = hantro_ctrls_setup(vpu, ctx, allowed_codecs);
+ if (ret) {
+ vpu_err("Failed to set up controls\n");
+ goto err_fh_free;
+ }
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+
+ return 0;
+
+err_fh_free:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+err_ctx_free:
+ kfree(ctx);
+ return ret;
+}
+
+static int hantro_release(struct file *filp)
+{
+ struct hantro_ctx *ctx =
+ container_of(filp->private_data, struct hantro_ctx, fh);
+
+ /*
+ * No need for extra locking because this was the last reference
+ * to this file.
+ */
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations hantro_fops = {
+ .owner = THIS_MODULE,
+ .open = hantro_open,
+ .release = hantro_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct of_device_id of_hantro_match[] = {
+#ifdef CONFIG_VIDEO_HANTRO_ROCKCHIP
+ { .compatible = "rockchip,px30-vpu", .data = &px30_vpu_variant, },
+ { .compatible = "rockchip,rk3036-vpu", .data = &rk3036_vpu_variant, },
+ { .compatible = "rockchip,rk3066-vpu", .data = &rk3066_vpu_variant, },
+ { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
+ { .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, },
+ { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
+ { .compatible = "rockchip,rk3568-vepu", .data = &rk3568_vepu_variant, },
+ { .compatible = "rockchip,rk3568-vpu", .data = &rk3568_vpu_variant, },
+ { .compatible = "rockchip,rk3588-av1-vpu", .data = &rk3588_vpu981_variant, },
+#endif
+#ifdef CONFIG_VIDEO_HANTRO_IMX8M
+ { .compatible = "nxp,imx8mm-vpu-g1", .data = &imx8mm_vpu_g1_variant, },
+ { .compatible = "nxp,imx8mq-vpu", .data = &imx8mq_vpu_variant, },
+ { .compatible = "nxp,imx8mq-vpu-g1", .data = &imx8mq_vpu_g1_variant },
+ { .compatible = "nxp,imx8mq-vpu-g2", .data = &imx8mq_vpu_g2_variant },
+#endif
+#ifdef CONFIG_VIDEO_HANTRO_SAMA5D4
+ { .compatible = "microchip,sama5d4-vdec", .data = &sama5d4_vdec_variant, },
+#endif
+#ifdef CONFIG_VIDEO_HANTRO_SUNXI
+ { .compatible = "allwinner,sun50i-h6-vpu-g2", .data = &sunxi_vpu_variant, },
+#endif
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_hantro_match);
+
+static int hantro_register_entity(struct media_device *mdev,
+ struct media_entity *entity,
+ const char *entity_name,
+ struct media_pad *pads, int num_pads,
+ int function, struct video_device *vdev)
+{
+ char *name;
+ int ret;
+
+ entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
+ if (function == MEDIA_ENT_F_IO_V4L) {
+ entity->info.dev.major = VIDEO_MAJOR;
+ entity->info.dev.minor = vdev->minor;
+ }
+
+ name = devm_kasprintf(mdev->dev, GFP_KERNEL, "%s-%s", vdev->name,
+ entity_name);
+ if (!name)
+ return -ENOMEM;
+
+ entity->name = name;
+ entity->function = function;
+
+ ret = media_entity_pads_init(entity, num_pads, pads);
+ if (ret)
+ return ret;
+
+ ret = media_device_register_entity(mdev, entity);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hantro_attach_func(struct hantro_dev *vpu,
+ struct hantro_func *func)
+{
+ struct media_device *mdev = &vpu->mdev;
+ struct media_link *link;
+ int ret;
+
+ /* Create the three encoder entities with their pads */
+ func->source_pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = hantro_register_entity(mdev, &func->vdev.entity, "source",
+ &func->source_pad, 1, MEDIA_ENT_F_IO_V4L,
+ &func->vdev);
+ if (ret)
+ return ret;
+
+ func->proc_pads[0].flags = MEDIA_PAD_FL_SINK;
+ func->proc_pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ ret = hantro_register_entity(mdev, &func->proc, "proc",
+ func->proc_pads, 2, func->id,
+ &func->vdev);
+ if (ret)
+ goto err_rel_entity0;
+
+ func->sink_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = hantro_register_entity(mdev, &func->sink, "sink",
+ &func->sink_pad, 1, MEDIA_ENT_F_IO_V4L,
+ &func->vdev);
+ if (ret)
+ goto err_rel_entity1;
+
+ /* Connect the three entities */
+ ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rel_entity2;
+
+ ret = media_create_pad_link(&func->proc, 1, &func->sink, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rm_links0;
+
+ /* Create video interface */
+ func->intf_devnode = media_devnode_create(mdev, MEDIA_INTF_T_V4L_VIDEO,
+ 0, VIDEO_MAJOR,
+ func->vdev.minor);
+ if (!func->intf_devnode) {
+ ret = -ENOMEM;
+ goto err_rm_links1;
+ }
+
+ /* Connect the two DMA engines to the interface */
+ link = media_create_intf_link(&func->vdev.entity,
+ &func->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_devnode;
+ }
+
+ link = media_create_intf_link(&func->sink, &func->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_devnode;
+ }
+ return 0;
+
+err_rm_devnode:
+ media_devnode_remove(func->intf_devnode);
+
+err_rm_links1:
+ media_entity_remove_links(&func->sink);
+
+err_rm_links0:
+ media_entity_remove_links(&func->proc);
+ media_entity_remove_links(&func->vdev.entity);
+
+err_rel_entity2:
+ media_device_unregister_entity(&func->sink);
+
+err_rel_entity1:
+ media_device_unregister_entity(&func->proc);
+
+err_rel_entity0:
+ media_device_unregister_entity(&func->vdev.entity);
+ return ret;
+}
+
+static void hantro_detach_func(struct hantro_func *func)
+{
+ media_devnode_remove(func->intf_devnode);
+ media_entity_remove_links(&func->sink);
+ media_entity_remove_links(&func->proc);
+ media_entity_remove_links(&func->vdev.entity);
+ media_device_unregister_entity(&func->sink);
+ media_device_unregister_entity(&func->proc);
+ media_device_unregister_entity(&func->vdev.entity);
+}
+
+static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
+{
+ const struct of_device_id *match;
+ struct hantro_func *func;
+ struct video_device *vfd;
+ int ret;
+
+ match = of_match_node(of_hantro_match, vpu->dev->of_node);
+ func = devm_kzalloc(vpu->dev, sizeof(*func), GFP_KERNEL);
+ if (!func) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
+ return -ENOMEM;
+ }
+
+ func->id = funcid;
+
+ vfd = &func->vdev;
+ vfd->fops = &hantro_fops;
+ vfd->release = video_device_release_empty;
+ vfd->lock = &vpu->vpu_mutex;
+ vfd->v4l2_dev = &vpu->v4l2_dev;
+ vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+ vfd->ioctl_ops = &hantro_ioctl_ops;
+ snprintf(vfd->name, sizeof(vfd->name), "%s-%s", match->compatible,
+ funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ? "enc" : "dec");
+
+ if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
+ vpu->encoder = func;
+ v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD);
+ v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD);
+ } else {
+ vpu->decoder = func;
+ v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
+ v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
+ }
+
+ video_set_drvdata(vfd, vpu);
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ ret = hantro_attach_func(vpu, func);
+ if (ret) {
+ v4l2_err(&vpu->v4l2_dev,
+ "Failed to attach functionality to the media device\n");
+ goto err_unreg_dev;
+ }
+
+ v4l2_info(&vpu->v4l2_dev, "registered %s as /dev/video%d\n", vfd->name,
+ vfd->num);
+
+ return 0;
+
+err_unreg_dev:
+ video_unregister_device(vfd);
+ return ret;
+}
+
+static int hantro_add_enc_func(struct hantro_dev *vpu)
+{
+ if (!vpu->variant->enc_fmts)
+ return 0;
+
+ return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
+}
+
+static int hantro_add_dec_func(struct hantro_dev *vpu)
+{
+ if (!vpu->variant->dec_fmts)
+ return 0;
+
+ return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
+}
+
+static void hantro_remove_func(struct hantro_dev *vpu,
+ unsigned int funcid)
+{
+ struct hantro_func *func;
+
+ if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
+ func = vpu->encoder;
+ else
+ func = vpu->decoder;
+
+ if (!func)
+ return;
+
+ hantro_detach_func(func);
+ video_unregister_device(&func->vdev);
+}
+
+static void hantro_remove_enc_func(struct hantro_dev *vpu)
+{
+ hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
+}
+
+static void hantro_remove_dec_func(struct hantro_dev *vpu)
+{
+ hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
+}
+
+static const struct media_device_ops hantro_m2m_media_ops = {
+ .req_validate = vb2_request_validate,
+ .req_queue = v4l2_m2m_request_queue,
+};
+
+static int hantro_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct hantro_dev *vpu;
+ int num_bases;
+ int i, ret;
+
+ vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
+ if (!vpu)
+ return -ENOMEM;
+
+ vpu->dev = &pdev->dev;
+ vpu->pdev = pdev;
+ mutex_init(&vpu->vpu_mutex);
+ spin_lock_init(&vpu->irqlock);
+
+ match = of_match_node(of_hantro_match, pdev->dev.of_node);
+ vpu->variant = match->data;
+
+ /*
+ * Support for nxp,imx8mq-vpu is kept for backwards compatibility
+ * but it's deprecated. Please update your DTS file to use
+ * nxp,imx8mq-vpu-g1 or nxp,imx8mq-vpu-g2 instead.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node, "nxp,imx8mq-vpu"))
+ dev_warn(&pdev->dev, "%s compatible is deprecated\n",
+ match->compatible);
+
+ INIT_DELAYED_WORK(&vpu->watchdog_work, hantro_watchdog);
+
+ vpu->clocks = devm_kcalloc(&pdev->dev, vpu->variant->num_clocks,
+ sizeof(*vpu->clocks), GFP_KERNEL);
+ if (!vpu->clocks)
+ return -ENOMEM;
+
+ if (vpu->variant->num_clocks > 1) {
+ for (i = 0; i < vpu->variant->num_clocks; i++)
+ vpu->clocks[i].id = vpu->variant->clk_names[i];
+
+ ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
+ vpu->clocks);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * If the driver has a single clk, chances are there will be no
+ * actual name in the DT bindings.
+ */
+ vpu->clocks[0].clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(vpu->clocks[0].clk))
+ return PTR_ERR(vpu->clocks[0].clk);
+ }
+
+ vpu->resets = devm_reset_control_array_get_optional_exclusive(&pdev->dev);
+ if (IS_ERR(vpu->resets))
+ return PTR_ERR(vpu->resets);
+
+ num_bases = vpu->variant->num_regs ?: 1;
+ vpu->reg_bases = devm_kcalloc(&pdev->dev, num_bases,
+ sizeof(*vpu->reg_bases), GFP_KERNEL);
+ if (!vpu->reg_bases)
+ return -ENOMEM;
+
+ for (i = 0; i < num_bases; i++) {
+ vpu->reg_bases[i] = vpu->variant->reg_names ?
+ devm_platform_ioremap_resource_byname(pdev, vpu->variant->reg_names[i]) :
+ devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vpu->reg_bases[i]))
+ return PTR_ERR(vpu->reg_bases[i]);
+ }
+ vpu->enc_base = vpu->reg_bases[0] + vpu->variant->enc_offset;
+ vpu->dec_base = vpu->reg_bases[0] + vpu->variant->dec_offset;
+
+ /**
+ * TODO: Eventually allow taking advantage of full 64-bit address space.
+ * Until then we assume the MSB portion of buffers' base addresses is
+ * always 0 due to this masking operation.
+ */
+ ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
+ return ret;
+ }
+ vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+
+ for (i = 0; i < vpu->variant->num_irqs; i++) {
+ const char *irq_name;
+ int irq;
+
+ if (!vpu->variant->irqs[i].handler)
+ continue;
+
+ if (vpu->variant->num_irqs > 1) {
+ irq_name = vpu->variant->irqs[i].name;
+ irq = platform_get_irq_byname(vpu->pdev, irq_name);
+ } else {
+ /*
+ * If the driver has a single IRQ, chances are there
+ * will be no actual name in the DT bindings.
+ */
+ irq_name = "default";
+ irq = platform_get_irq(vpu->pdev, 0);
+ }
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(vpu->dev, irq,
+ vpu->variant->irqs[i].handler, 0,
+ dev_name(vpu->dev), vpu);
+ if (ret) {
+ dev_err(vpu->dev, "Could not request %s IRQ.\n",
+ irq_name);
+ return ret;
+ }
+ }
+
+ if (vpu->variant->init) {
+ ret = vpu->variant->init(vpu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init VPU hardware\n");
+ return ret;
+ }
+ }
+
+ pm_runtime_set_autosuspend_delay(vpu->dev, 100);
+ pm_runtime_use_autosuspend(vpu->dev);
+ pm_runtime_enable(vpu->dev);
+
+ ret = reset_control_deassert(vpu->resets);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to deassert resets\n");
+ goto err_pm_disable;
+ }
+
+ ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare clocks\n");
+ goto err_rst_assert;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ goto err_clk_unprepare;
+ }
+ platform_set_drvdata(pdev, vpu);
+
+ vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
+ if (IS_ERR(vpu->m2m_dev)) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(vpu->m2m_dev);
+ goto err_v4l2_unreg;
+ }
+
+ vpu->mdev.dev = vpu->dev;
+ strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
+ media_device_init(&vpu->mdev);
+ vpu->mdev.ops = &hantro_m2m_media_ops;
+ vpu->v4l2_dev.mdev = &vpu->mdev;
+
+ ret = hantro_add_enc_func(vpu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register encoder\n");
+ goto err_m2m_rel;
+ }
+
+ ret = hantro_add_dec_func(vpu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register decoder\n");
+ goto err_rm_enc_func;
+ }
+
+ ret = media_device_register(&vpu->mdev);
+ if (ret) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n");
+ goto err_rm_dec_func;
+ }
+
+ return 0;
+
+err_rm_dec_func:
+ hantro_remove_dec_func(vpu);
+err_rm_enc_func:
+ hantro_remove_enc_func(vpu);
+err_m2m_rel:
+ media_device_cleanup(&vpu->mdev);
+ v4l2_m2m_release(vpu->m2m_dev);
+err_v4l2_unreg:
+ v4l2_device_unregister(&vpu->v4l2_dev);
+err_clk_unprepare:
+ clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+err_rst_assert:
+ reset_control_assert(vpu->resets);
+err_pm_disable:
+ pm_runtime_dont_use_autosuspend(vpu->dev);
+ pm_runtime_disable(vpu->dev);
+ return ret;
+}
+
+static void hantro_remove(struct platform_device *pdev)
+{
+ struct hantro_dev *vpu = platform_get_drvdata(pdev);
+
+ v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
+
+ media_device_unregister(&vpu->mdev);
+ hantro_remove_dec_func(vpu);
+ hantro_remove_enc_func(vpu);
+ media_device_cleanup(&vpu->mdev);
+ v4l2_m2m_release(vpu->m2m_dev);
+ v4l2_device_unregister(&vpu->v4l2_dev);
+ clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ reset_control_assert(vpu->resets);
+ pm_runtime_dont_use_autosuspend(vpu->dev);
+ pm_runtime_disable(vpu->dev);
+}
+
+#ifdef CONFIG_PM
+static int hantro_runtime_resume(struct device *dev)
+{
+ struct hantro_dev *vpu = dev_get_drvdata(dev);
+
+ if (vpu->variant->runtime_resume)
+ return vpu->variant->runtime_resume(vpu);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops hantro_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(NULL, hantro_runtime_resume, NULL)
+};
+
+static struct platform_driver hantro_driver = {
+ .probe = hantro_probe,
+ .remove_new = hantro_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_hantro_match,
+ .pm = &hantro_pm_ops,
+ },
+};
+module_platform_driver(hantro_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
+MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
+MODULE_DESCRIPTION("Hantro VPU codec driver");
diff --git a/drivers/media/platform/verisilicon/hantro_g1.c b/drivers/media/platform/verisilicon/hantro_g1.c
new file mode 100644
index 0000000000..0ab1cee622
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g1.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ * Copyright (C) 2019 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ * Copyright (C) 2021 Collabora Ltd, Emil Velikov <emil.velikov@collabora.com>
+ */
+
+#include "hantro.h"
+#include "hantro_g1_regs.h"
+
+irqreturn_t hantro_g1_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, G1_REG_INTERRUPT);
+ state = (status & G1_REG_INTERRUPT_DEC_RDY_INT) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+void hantro_g1_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_IRQ_DIS, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+ vdpu_write(vpu, 1, G1_REG_SOFT_RESET);
+}
diff --git a/drivers/media/platform/verisilicon/hantro_g1_h264_dec.c b/drivers/media/platform/verisilicon/hantro_g1_h264_dec.c
new file mode 100644
index 0000000000..9de7f05eff
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g1_h264_dec.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip RK3288 VPU codec driver
+ *
+ * Copyright (c) 2014 Rockchip Electronics Co., Ltd.
+ * Hertz Wong <hertz.wong@rock-chips.com>
+ * Herman Chen <herman.chen@rock-chips.com>
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#include <linux/types.h>
+#include <linux/sort.h>
+
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro_g1_regs.h"
+#include "hantro_hw.h"
+#include "hantro_v4l2.h"
+
+static void set_params(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ const struct v4l2_ctrl_h264_decode_params *dec_param = ctrls->decode;
+ const struct v4l2_ctrl_h264_sps *sps = ctrls->sps;
+ const struct v4l2_ctrl_h264_pps *pps = ctrls->pps;
+ struct hantro_dev *vpu = ctx->dev;
+ u32 reg;
+
+ /* Decoder control register 0. */
+ reg = G1_REG_DEC_CTRL0_DEC_AXI_AUTO;
+ if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
+ reg |= G1_REG_DEC_CTRL0_SEQ_MBAFF_E;
+ if (sps->profile_idc > 66) {
+ reg |= G1_REG_DEC_CTRL0_PICORD_COUNT_E;
+ if (dec_param->nal_ref_idc)
+ reg |= G1_REG_DEC_CTRL0_WRITE_MVS_E;
+ }
+
+ if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) &&
+ (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD ||
+ dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC))
+ reg |= G1_REG_DEC_CTRL0_PIC_INTERLACE_E;
+ if (dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)
+ reg |= G1_REG_DEC_CTRL0_PIC_FIELDMODE_E;
+ if (!(dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD))
+ reg |= G1_REG_DEC_CTRL0_PIC_TOPFIELD_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
+
+ /* Decoder control register 1. */
+ reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width)) |
+ G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->src_fmt.height)) |
+ G1_REG_DEC_CTRL1_REF_FRAMES(sps->max_num_ref_frames);
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL1);
+
+ /* Decoder control register 2. */
+ reg = G1_REG_DEC_CTRL2_CH_QP_OFFSET(pps->chroma_qp_index_offset) |
+ G1_REG_DEC_CTRL2_CH_QP_OFFSET2(pps->second_chroma_qp_index_offset);
+
+ if (pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT)
+ reg |= G1_REG_DEC_CTRL2_TYPE1_QUANT_E;
+ if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY))
+ reg |= G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2);
+
+ /* Decoder control register 3. */
+ reg = G1_REG_DEC_CTRL3_START_CODE_E |
+ G1_REG_DEC_CTRL3_INIT_QP(pps->pic_init_qp_minus26 + 26) |
+ G1_REG_DEC_CTRL3_STREAM_LEN(vb2_get_plane_payload(&src_buf->vb2_buf, 0));
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL3);
+
+ /* Decoder control register 4. */
+ reg = G1_REG_DEC_CTRL4_FRAMENUM_LEN(sps->log2_max_frame_num_minus4 + 4) |
+ G1_REG_DEC_CTRL4_FRAMENUM(dec_param->frame_num) |
+ G1_REG_DEC_CTRL4_WEIGHT_BIPR_IDC(pps->weighted_bipred_idc);
+ if (pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE)
+ reg |= G1_REG_DEC_CTRL4_CABAC_E;
+ if (sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE)
+ reg |= G1_REG_DEC_CTRL4_DIR_8X8_INFER_E;
+ if (sps->profile_idc >= 100 && sps->chroma_format_idc == 0)
+ reg |= G1_REG_DEC_CTRL4_BLACKWHITE_E;
+ if (pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED)
+ reg |= G1_REG_DEC_CTRL4_WEIGHT_PRED_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL4);
+
+ /* Decoder control register 5. */
+ reg = G1_REG_DEC_CTRL5_REFPIC_MK_LEN(dec_param->dec_ref_pic_marking_bit_size) |
+ G1_REG_DEC_CTRL5_IDR_PIC_ID(dec_param->idr_pic_id);
+ if (pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED)
+ reg |= G1_REG_DEC_CTRL5_CONST_INTRA_E;
+ if (pps->flags & V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT)
+ reg |= G1_REG_DEC_CTRL5_FILT_CTRL_PRES;
+ if (pps->flags & V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT)
+ reg |= G1_REG_DEC_CTRL5_RDPIC_CNT_PRES;
+ if (pps->flags & V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE)
+ reg |= G1_REG_DEC_CTRL5_8X8TRANS_FLAG_E;
+ if (dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC)
+ reg |= G1_REG_DEC_CTRL5_IDR_PIC_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL5);
+
+ /* Decoder control register 6. */
+ reg = G1_REG_DEC_CTRL6_PPS_ID(pps->pic_parameter_set_id) |
+ G1_REG_DEC_CTRL6_REFIDX0_ACTIVE(pps->num_ref_idx_l0_default_active_minus1 + 1) |
+ G1_REG_DEC_CTRL6_REFIDX1_ACTIVE(pps->num_ref_idx_l1_default_active_minus1 + 1) |
+ G1_REG_DEC_CTRL6_POC_LENGTH(dec_param->pic_order_cnt_bit_size);
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL6);
+
+ /* Error concealment register. */
+ vdpu_write_relaxed(vpu, 0, G1_REG_ERR_CONC);
+
+ /* Prediction filter tap register. */
+ vdpu_write_relaxed(vpu,
+ G1_REG_PRED_FLT_PRED_BC_TAP_0_0(1) |
+ G1_REG_PRED_FLT_PRED_BC_TAP_0_1(-5 & 0x3ff) |
+ G1_REG_PRED_FLT_PRED_BC_TAP_0_2(20),
+ G1_REG_PRED_FLT);
+
+ /* Reference picture buffer control register. */
+ vdpu_write_relaxed(vpu, 0, G1_REG_REF_BUF_CTRL);
+
+ /* Reference picture buffer control register 2. */
+ vdpu_write_relaxed(vpu, G1_REG_REF_BUF_CTRL2_APF_THRESHOLD(8),
+ G1_REG_REF_BUF_CTRL2);
+}
+
+static void set_ref(struct hantro_ctx *ctx)
+{
+ const struct v4l2_h264_reference *b0_reflist, *b1_reflist, *p_reflist;
+ struct hantro_dev *vpu = ctx->dev;
+ int reg_num;
+ u32 reg;
+ int i;
+
+ vdpu_write_relaxed(vpu, ctx->h264_dec.dpb_valid, G1_REG_VALID_REF);
+ vdpu_write_relaxed(vpu, ctx->h264_dec.dpb_longterm, G1_REG_LT_REF);
+
+ /*
+ * Set up reference frame picture numbers.
+ *
+ * Each G1_REG_REF_PIC(x) register contains numbers of two
+ * subsequential reference pictures.
+ */
+ for (i = 0; i < HANTRO_H264_DPB_SIZE; i += 2) {
+ reg = G1_REG_REF_PIC_REFER0_NBR(hantro_h264_get_ref_nbr(ctx, i)) |
+ G1_REG_REF_PIC_REFER1_NBR(hantro_h264_get_ref_nbr(ctx, i + 1));
+ vdpu_write_relaxed(vpu, reg, G1_REG_REF_PIC(i / 2));
+ }
+
+ b0_reflist = ctx->h264_dec.reflists.b0;
+ b1_reflist = ctx->h264_dec.reflists.b1;
+ p_reflist = ctx->h264_dec.reflists.p;
+
+ /*
+ * Each G1_REG_BD_REF_PIC(x) register contains three entries
+ * of each forward and backward picture list.
+ */
+ reg_num = 0;
+ for (i = 0; i < 15; i += 3) {
+ reg = G1_REG_BD_REF_PIC_BINIT_RLIST_F0(b0_reflist[i].index) |
+ G1_REG_BD_REF_PIC_BINIT_RLIST_F1(b0_reflist[i + 1].index) |
+ G1_REG_BD_REF_PIC_BINIT_RLIST_F2(b0_reflist[i + 2].index) |
+ G1_REG_BD_REF_PIC_BINIT_RLIST_B0(b1_reflist[i].index) |
+ G1_REG_BD_REF_PIC_BINIT_RLIST_B1(b1_reflist[i + 1].index) |
+ G1_REG_BD_REF_PIC_BINIT_RLIST_B2(b1_reflist[i + 2].index);
+ vdpu_write_relaxed(vpu, reg, G1_REG_BD_REF_PIC(reg_num++));
+ }
+
+ /*
+ * G1_REG_BD_P_REF_PIC register contains last entries (index 15)
+ * of forward and backward reference picture lists and first 4 entries
+ * of P forward picture list.
+ */
+ reg = G1_REG_BD_P_REF_PIC_BINIT_RLIST_F15(b0_reflist[15].index) |
+ G1_REG_BD_P_REF_PIC_BINIT_RLIST_B15(b1_reflist[15].index) |
+ G1_REG_BD_P_REF_PIC_PINIT_RLIST_F0(p_reflist[0].index) |
+ G1_REG_BD_P_REF_PIC_PINIT_RLIST_F1(p_reflist[1].index) |
+ G1_REG_BD_P_REF_PIC_PINIT_RLIST_F2(p_reflist[2].index) |
+ G1_REG_BD_P_REF_PIC_PINIT_RLIST_F3(p_reflist[3].index);
+ vdpu_write_relaxed(vpu, reg, G1_REG_BD_P_REF_PIC);
+
+ /*
+ * Each G1_REG_FWD_PIC(x) register contains six consecutive
+ * entries of P forward picture list, starting from index 4.
+ */
+ reg_num = 0;
+ for (i = 4; i < HANTRO_H264_DPB_SIZE; i += 6) {
+ reg = G1_REG_FWD_PIC_PINIT_RLIST_F0(p_reflist[i].index) |
+ G1_REG_FWD_PIC_PINIT_RLIST_F1(p_reflist[i + 1].index) |
+ G1_REG_FWD_PIC_PINIT_RLIST_F2(p_reflist[i + 2].index) |
+ G1_REG_FWD_PIC_PINIT_RLIST_F3(p_reflist[i + 3].index) |
+ G1_REG_FWD_PIC_PINIT_RLIST_F4(p_reflist[i + 4].index) |
+ G1_REG_FWD_PIC_PINIT_RLIST_F5(p_reflist[i + 5].index);
+ vdpu_write_relaxed(vpu, reg, G1_REG_FWD_PIC(reg_num++));
+ }
+
+ /* Set up addresses of DPB buffers. */
+ for (i = 0; i < HANTRO_H264_DPB_SIZE; i++) {
+ dma_addr_t dma_addr = hantro_h264_get_ref_buf(ctx, i);
+
+ vdpu_write_relaxed(vpu, dma_addr, G1_REG_ADDR_REF(i));
+ }
+}
+
+static void set_buffers(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ struct vb2_v4l2_buffer *dst_buf;
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t src_dma, dst_dma;
+ size_t offset = 0;
+
+ /* Source (stream) buffer. */
+ src_dma = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ vdpu_write_relaxed(vpu, src_dma, G1_REG_ADDR_STR);
+
+ /* Destination (decoded frame) buffer. */
+ dst_buf = hantro_get_dst_buf(ctx);
+ dst_dma = hantro_get_dec_buf_addr(ctx, &dst_buf->vb2_buf);
+ /* Adjust dma addr to start at second line for bottom field */
+ if (ctrls->decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
+ offset = ALIGN(ctx->src_fmt.width, MB_DIM);
+ vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DST);
+
+ /* Higher profiles require DMV buffer appended to reference frames. */
+ if (ctrls->sps->profile_idc > 66 && ctrls->decode->nal_ref_idc) {
+ unsigned int bytes_per_mb = 384;
+
+ /* DMV buffer for monochrome start directly after Y-plane */
+ if (ctrls->sps->profile_idc >= 100 &&
+ ctrls->sps->chroma_format_idc == 0)
+ bytes_per_mb = 256;
+ offset = bytes_per_mb * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+
+ /*
+ * DMV buffer is split in two for field encoded frames,
+ * adjust offset for bottom field
+ */
+ if (ctrls->decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
+ offset += 32 * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+ vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DIR_MV);
+ }
+
+ /* Auxiliary buffer prepared in hantro_g1_h264_dec_prepare_table(). */
+ vdpu_write_relaxed(vpu, ctx->h264_dec.priv.dma, G1_REG_ADDR_QTABLE);
+}
+
+int hantro_g1_h264_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf;
+ int ret;
+
+ /* Prepare the H264 decoder context. */
+ ret = hantro_h264_dec_prepare_run(ctx);
+ if (ret)
+ return ret;
+
+ /* Configure hardware registers. */
+ src_buf = hantro_get_src_buf(ctx);
+ set_params(ctx, src_buf);
+ set_ref(ctx);
+ set_buffers(ctx, src_buf);
+
+ hantro_end_prepare_run(ctx);
+
+ /* Start decoding! */
+ vdpu_write_relaxed(vpu,
+ G1_REG_CONFIG_DEC_AXI_RD_ID(0xffu) |
+ G1_REG_CONFIG_DEC_TIMEOUT_E |
+ G1_REG_CONFIG_DEC_OUT_ENDIAN |
+ G1_REG_CONFIG_DEC_STRENDIAN_E |
+ G1_REG_CONFIG_DEC_MAX_BURST(16) |
+ G1_REG_CONFIG_DEC_OUTSWAP32_E |
+ G1_REG_CONFIG_DEC_INSWAP32_E |
+ G1_REG_CONFIG_DEC_STRSWAP32_E |
+ G1_REG_CONFIG_DEC_CLK_GATE_E,
+ G1_REG_CONFIG);
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c b/drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c
new file mode 100644
index 0000000000..9aea331e1a
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <media/v4l2-mem2mem.h>
+#include "hantro.h"
+#include "hantro_hw.h"
+#include "hantro_g1_regs.h"
+
+#define G1_SWREG(nr) ((nr) * 4)
+
+#define G1_REG_RLC_VLC_BASE G1_SWREG(12)
+#define G1_REG_DEC_OUT_BASE G1_SWREG(13)
+#define G1_REG_REFER0_BASE G1_SWREG(14)
+#define G1_REG_REFER1_BASE G1_SWREG(15)
+#define G1_REG_REFER2_BASE G1_SWREG(16)
+#define G1_REG_REFER3_BASE G1_SWREG(17)
+#define G1_REG_QTABLE_BASE G1_SWREG(40)
+
+#define G1_REG_DEC_AXI_RD_ID(v) (((v) << 24) & GENMASK(31, 24))
+#define G1_REG_DEC_TIMEOUT_E(v) ((v) ? BIT(23) : 0)
+#define G1_REG_DEC_STRSWAP32_E(v) ((v) ? BIT(22) : 0)
+#define G1_REG_DEC_STRENDIAN_E(v) ((v) ? BIT(21) : 0)
+#define G1_REG_DEC_INSWAP32_E(v) ((v) ? BIT(20) : 0)
+#define G1_REG_DEC_OUTSWAP32_E(v) ((v) ? BIT(19) : 0)
+#define G1_REG_DEC_DATA_DISC_E(v) ((v) ? BIT(18) : 0)
+#define G1_REG_DEC_LATENCY(v) (((v) << 11) & GENMASK(16, 11))
+#define G1_REG_DEC_CLK_GATE_E(v) ((v) ? BIT(10) : 0)
+#define G1_REG_DEC_IN_ENDIAN(v) ((v) ? BIT(9) : 0)
+#define G1_REG_DEC_OUT_ENDIAN(v) ((v) ? BIT(8) : 0)
+#define G1_REG_DEC_ADV_PRE_DIS(v) ((v) ? BIT(6) : 0)
+#define G1_REG_DEC_SCMD_DIS(v) ((v) ? BIT(5) : 0)
+#define G1_REG_DEC_MAX_BURST(v) (((v) << 0) & GENMASK(4, 0))
+
+#define G1_REG_DEC_MODE(v) (((v) << 28) & GENMASK(31, 28))
+#define G1_REG_RLC_MODE_E(v) ((v) ? BIT(27) : 0)
+#define G1_REG_PIC_INTERLACE_E(v) ((v) ? BIT(23) : 0)
+#define G1_REG_PIC_FIELDMODE_E(v) ((v) ? BIT(22) : 0)
+#define G1_REG_PIC_B_E(v) ((v) ? BIT(21) : 0)
+#define G1_REG_PIC_INTER_E(v) ((v) ? BIT(20) : 0)
+#define G1_REG_PIC_TOPFIELD_E(v) ((v) ? BIT(19) : 0)
+#define G1_REG_FWD_INTERLACE_E(v) ((v) ? BIT(18) : 0)
+#define G1_REG_FILTERING_DIS(v) ((v) ? BIT(14) : 0)
+#define G1_REG_WRITE_MVS_E(v) ((v) ? BIT(12) : 0)
+#define G1_REG_DEC_AXI_WR_ID(v) (((v) << 0) & GENMASK(7, 0))
+
+#define G1_REG_PIC_MB_WIDTH(v) (((v) << 23) & GENMASK(31, 23))
+#define G1_REG_PIC_MB_HEIGHT_P(v) (((v) << 11) & GENMASK(18, 11))
+#define G1_REG_ALT_SCAN_E(v) ((v) ? BIT(6) : 0)
+#define G1_REG_TOPFIELDFIRST_E(v) ((v) ? BIT(5) : 0)
+
+#define G1_REG_STRM_START_BIT(v) (((v) << 26) & GENMASK(31, 26))
+#define G1_REG_QSCALE_TYPE(v) ((v) ? BIT(24) : 0)
+#define G1_REG_CON_MV_E(v) ((v) ? BIT(4) : 0)
+#define G1_REG_INTRA_DC_PREC(v) (((v) << 2) & GENMASK(3, 2))
+#define G1_REG_INTRA_VLC_TAB(v) ((v) ? BIT(1) : 0)
+#define G1_REG_FRAME_PRED_DCT(v) ((v) ? BIT(0) : 0)
+
+#define G1_REG_INIT_QP(v) (((v) << 25) & GENMASK(30, 25))
+#define G1_REG_STREAM_LEN(v) (((v) << 0) & GENMASK(23, 0))
+
+#define G1_REG_ALT_SCAN_FLAG_E(v) ((v) ? BIT(19) : 0)
+#define G1_REG_FCODE_FWD_HOR(v) (((v) << 15) & GENMASK(18, 15))
+#define G1_REG_FCODE_FWD_VER(v) (((v) << 11) & GENMASK(14, 11))
+#define G1_REG_FCODE_BWD_HOR(v) (((v) << 7) & GENMASK(10, 7))
+#define G1_REG_FCODE_BWD_VER(v) (((v) << 3) & GENMASK(6, 3))
+#define G1_REG_MV_ACCURACY_FWD(v) ((v) ? BIT(2) : 0)
+#define G1_REG_MV_ACCURACY_BWD(v) ((v) ? BIT(1) : 0)
+
+#define G1_REG_STARTMB_X(v) (((v) << 23) & GENMASK(31, 23))
+#define G1_REG_STARTMB_Y(v) (((v) << 15) & GENMASK(22, 15))
+
+#define G1_REG_APF_THRESHOLD(v) (((v) << 0) & GENMASK(13, 0))
+
+static void
+hantro_g1_mpeg2_dec_set_quantisation(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
+{
+ struct v4l2_ctrl_mpeg2_quantisation *q;
+
+ q = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_MPEG2_QUANTISATION);
+ hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, q);
+ vdpu_write_relaxed(vpu, ctx->mpeg2_dec.qtable.dma, G1_REG_QTABLE_BASE);
+}
+
+static void
+hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf,
+ const struct v4l2_ctrl_mpeg2_sequence *seq,
+ const struct v4l2_ctrl_mpeg2_picture *pic)
+{
+ dma_addr_t forward_addr = 0, backward_addr = 0;
+ dma_addr_t current_addr, addr;
+
+ switch (pic->picture_coding_type) {
+ case V4L2_MPEG2_PIC_CODING_TYPE_B:
+ backward_addr = hantro_get_ref(ctx, pic->backward_ref_ts);
+ fallthrough;
+ case V4L2_MPEG2_PIC_CODING_TYPE_P:
+ forward_addr = hantro_get_ref(ctx, pic->forward_ref_ts);
+ }
+
+ /* Source bitstream buffer */
+ addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ vdpu_write_relaxed(vpu, addr, G1_REG_RLC_VLC_BASE);
+
+ /* Destination frame buffer */
+ addr = hantro_get_dec_buf_addr(ctx, dst_buf);
+ current_addr = addr;
+
+ if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD)
+ addr += ALIGN(ctx->dst_fmt.width, 16);
+ vdpu_write_relaxed(vpu, addr, G1_REG_DEC_OUT_BASE);
+
+ if (!forward_addr)
+ forward_addr = current_addr;
+ if (!backward_addr)
+ backward_addr = current_addr;
+
+ /* Set forward ref frame (top/bottom field) */
+ if (pic->picture_structure == V4L2_MPEG2_PIC_FRAME ||
+ pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B ||
+ (pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD &&
+ pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST) ||
+ (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD &&
+ !(pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST))) {
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER1_BASE);
+ } else if (pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) {
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, current_addr, G1_REG_REFER1_BASE);
+ } else if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD) {
+ vdpu_write_relaxed(vpu, current_addr, G1_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER1_BASE);
+ }
+
+ /* Set backward ref frame (top/bottom field) */
+ vdpu_write_relaxed(vpu, backward_addr, G1_REG_REFER2_BASE);
+ vdpu_write_relaxed(vpu, backward_addr, G1_REG_REFER3_BASE);
+}
+
+int hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ const struct v4l2_ctrl_mpeg2_sequence *seq;
+ const struct v4l2_ctrl_mpeg2_picture *pic;
+ u32 reg;
+
+ src_buf = hantro_get_src_buf(ctx);
+ dst_buf = hantro_get_dst_buf(ctx);
+
+ /* Apply request controls if any */
+ hantro_start_prepare_run(ctx);
+
+ seq = hantro_get_ctrl(ctx,
+ V4L2_CID_STATELESS_MPEG2_SEQUENCE);
+ pic = hantro_get_ctrl(ctx,
+ V4L2_CID_STATELESS_MPEG2_PICTURE);
+
+ reg = G1_REG_DEC_AXI_RD_ID(0) |
+ G1_REG_DEC_TIMEOUT_E(1) |
+ G1_REG_DEC_STRSWAP32_E(1) |
+ G1_REG_DEC_STRENDIAN_E(1) |
+ G1_REG_DEC_INSWAP32_E(1) |
+ G1_REG_DEC_OUTSWAP32_E(1) |
+ G1_REG_DEC_DATA_DISC_E(0) |
+ G1_REG_DEC_LATENCY(0) |
+ G1_REG_DEC_CLK_GATE_E(1) |
+ G1_REG_DEC_IN_ENDIAN(1) |
+ G1_REG_DEC_OUT_ENDIAN(1) |
+ G1_REG_DEC_ADV_PRE_DIS(0) |
+ G1_REG_DEC_SCMD_DIS(0) |
+ G1_REG_DEC_MAX_BURST(16);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(2));
+
+ reg = G1_REG_DEC_MODE(5) |
+ G1_REG_RLC_MODE_E(0) |
+ G1_REG_PIC_INTERLACE_E(!(seq->flags & V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE)) |
+ G1_REG_PIC_FIELDMODE_E(pic->picture_structure != V4L2_MPEG2_PIC_FRAME) |
+ G1_REG_PIC_B_E(pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B) |
+ G1_REG_PIC_INTER_E(pic->picture_coding_type != V4L2_MPEG2_PIC_CODING_TYPE_I) |
+ G1_REG_PIC_TOPFIELD_E(pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) |
+ G1_REG_FWD_INTERLACE_E(0) |
+ G1_REG_FILTERING_DIS(1) |
+ G1_REG_WRITE_MVS_E(0) |
+ G1_REG_DEC_AXI_WR_ID(0);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(3));
+
+ reg = G1_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
+ G1_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
+ G1_REG_ALT_SCAN_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
+ G1_REG_TOPFIELDFIRST_E(pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(4));
+
+ reg = G1_REG_STRM_START_BIT(0) |
+ G1_REG_QSCALE_TYPE(pic->flags & V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE) |
+ G1_REG_CON_MV_E(pic->flags & V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV) |
+ G1_REG_INTRA_DC_PREC(pic->intra_dc_precision) |
+ G1_REG_INTRA_VLC_TAB(pic->flags & V4L2_MPEG2_PIC_FLAG_INTRA_VLC) |
+ G1_REG_FRAME_PRED_DCT(pic->flags & V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(5));
+
+ reg = G1_REG_INIT_QP(1) |
+ G1_REG_STREAM_LEN(vb2_get_plane_payload(&src_buf->vb2_buf, 0));
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(6));
+
+ reg = G1_REG_ALT_SCAN_FLAG_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
+ G1_REG_FCODE_FWD_HOR(pic->f_code[0][0]) |
+ G1_REG_FCODE_FWD_VER(pic->f_code[0][1]) |
+ G1_REG_FCODE_BWD_HOR(pic->f_code[1][0]) |
+ G1_REG_FCODE_BWD_VER(pic->f_code[1][1]) |
+ G1_REG_MV_ACCURACY_FWD(1) |
+ G1_REG_MV_ACCURACY_BWD(1);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(18));
+
+ reg = G1_REG_STARTMB_X(0) |
+ G1_REG_STARTMB_Y(0);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(48));
+
+ reg = G1_REG_APF_THRESHOLD(8);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(55));
+
+ hantro_g1_mpeg2_dec_set_quantisation(vpu, ctx);
+ hantro_g1_mpeg2_dec_set_buffers(vpu, ctx, &src_buf->vb2_buf,
+ &dst_buf->vb2_buf,
+ seq, pic);
+
+ hantro_end_prepare_run(ctx);
+
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_g1_regs.h b/drivers/media/platform/verisilicon/hantro_g1_regs.h
new file mode 100644
index 0000000000..c623b3b0be
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g1_regs.h
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#ifndef HANTRO_G1_REGS_H_
+#define HANTRO_G1_REGS_H_
+
+#define G1_SWREG(nr) ((nr) * 4)
+
+/* Decoder registers. */
+#define G1_REG_INTERRUPT 0x004
+#define G1_REG_INTERRUPT_DEC_PIC_INF BIT(24)
+#define G1_REG_INTERRUPT_DEC_TIMEOUT BIT(18)
+#define G1_REG_INTERRUPT_DEC_SLICE_INT BIT(17)
+#define G1_REG_INTERRUPT_DEC_ERROR_INT BIT(16)
+#define G1_REG_INTERRUPT_DEC_ASO_INT BIT(15)
+#define G1_REG_INTERRUPT_DEC_BUFFER_INT BIT(14)
+#define G1_REG_INTERRUPT_DEC_BUS_INT BIT(13)
+#define G1_REG_INTERRUPT_DEC_RDY_INT BIT(12)
+#define G1_REG_INTERRUPT_DEC_IRQ BIT(8)
+#define G1_REG_INTERRUPT_DEC_IRQ_DIS BIT(4)
+#define G1_REG_INTERRUPT_DEC_E BIT(0)
+#define G1_REG_CONFIG 0x008
+#define G1_REG_CONFIG_DEC_AXI_RD_ID(x) (((x) & 0xff) << 24)
+#define G1_REG_CONFIG_DEC_TIMEOUT_E BIT(23)
+#define G1_REG_CONFIG_DEC_STRSWAP32_E BIT(22)
+#define G1_REG_CONFIG_DEC_STRENDIAN_E BIT(21)
+#define G1_REG_CONFIG_DEC_INSWAP32_E BIT(20)
+#define G1_REG_CONFIG_DEC_OUTSWAP32_E BIT(19)
+#define G1_REG_CONFIG_DEC_DATA_DISC_E BIT(18)
+#define G1_REG_CONFIG_TILED_MODE_MSB BIT(17)
+#define G1_REG_CONFIG_DEC_OUT_TILED_E BIT(17)
+#define G1_REG_CONFIG_DEC_LATENCY(x) (((x) & 0x3f) << 11)
+#define G1_REG_CONFIG_DEC_CLK_GATE_E BIT(10)
+#define G1_REG_CONFIG_DEC_IN_ENDIAN BIT(9)
+#define G1_REG_CONFIG_DEC_OUT_ENDIAN BIT(8)
+#define G1_REG_CONFIG_PRIORITY_MODE(x) (((x) & 0x7) << 5)
+#define G1_REG_CONFIG_TILED_MODE_LSB BIT(7)
+#define G1_REG_CONFIG_DEC_ADV_PRE_DIS BIT(6)
+#define G1_REG_CONFIG_DEC_SCMD_DIS BIT(5)
+#define G1_REG_CONFIG_DEC_MAX_BURST(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL0 0x00c
+#define G1_REG_DEC_CTRL0_DEC_MODE(x) (((x) & 0xf) << 28)
+#define G1_REG_DEC_CTRL0_RLC_MODE_E BIT(27)
+#define G1_REG_DEC_CTRL0_SKIP_MODE BIT(26)
+#define G1_REG_DEC_CTRL0_DIVX3_E BIT(25)
+#define G1_REG_DEC_CTRL0_PJPEG_E BIT(24)
+#define G1_REG_DEC_CTRL0_PIC_INTERLACE_E BIT(23)
+#define G1_REG_DEC_CTRL0_PIC_FIELDMODE_E BIT(22)
+#define G1_REG_DEC_CTRL0_PIC_B_E BIT(21)
+#define G1_REG_DEC_CTRL0_PIC_INTER_E BIT(20)
+#define G1_REG_DEC_CTRL0_PIC_TOPFIELD_E BIT(19)
+#define G1_REG_DEC_CTRL0_FWD_INTERLACE_E BIT(18)
+#define G1_REG_DEC_CTRL0_SORENSON_E BIT(17)
+#define G1_REG_DEC_CTRL0_REF_TOPFIELD_E BIT(16)
+#define G1_REG_DEC_CTRL0_DEC_OUT_DIS BIT(15)
+#define G1_REG_DEC_CTRL0_FILTERING_DIS BIT(14)
+#define G1_REG_DEC_CTRL0_WEBP_E BIT(13)
+#define G1_REG_DEC_CTRL0_MVC_E BIT(13)
+#define G1_REG_DEC_CTRL0_PIC_FIXED_QUANT BIT(13)
+#define G1_REG_DEC_CTRL0_WRITE_MVS_E BIT(12)
+#define G1_REG_DEC_CTRL0_REFTOPFIRST_E BIT(11)
+#define G1_REG_DEC_CTRL0_SEQ_MBAFF_E BIT(10)
+#define G1_REG_DEC_CTRL0_PICORD_COUNT_E BIT(9)
+#define G1_REG_DEC_CTRL0_DEC_AHB_HLOCK_E BIT(8)
+#define G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(x) (((x) & 0xff) << 0)
+/* Setting AXI ID to 0xff to get auto generated ID to avoid possible conflicts */
+#define G1_REG_DEC_CTRL0_DEC_AXI_AUTO G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(0xff)
+#define G1_REG_DEC_CTRL1 0x010
+#define G1_REG_DEC_CTRL1_PIC_MB_WIDTH(x) (((x) & 0x1ff) << 23)
+#define G1_REG_DEC_CTRL1_MB_WIDTH_OFF(x) (((x) & 0xf) << 19)
+#define G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(x) (((x) & 0xff) << 11)
+#define G1_REG_DEC_CTRL1_MB_HEIGHT_OFF(x) (((x) & 0xf) << 7)
+#define G1_REG_DEC_CTRL1_ALT_SCAN_E BIT(6)
+#define G1_REG_DEC_CTRL1_TOPFIELDFIRST_E BIT(5)
+#define G1_REG_DEC_CTRL1_REF_FRAMES(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL1_PIC_MB_W_EXT(x) (((x) & 0x7) << 3)
+#define G1_REG_DEC_CTRL1_PIC_MB_H_EXT(x) (((x) & 0x7) << 0)
+#define G1_REG_DEC_CTRL1_PIC_REFER_FLAG BIT(0)
+#define G1_REG_DEC_CTRL2 0x014
+#define G1_REG_DEC_CTRL2_STRM_START_BIT(x) (((x) & 0x3f) << 26)
+#define G1_REG_DEC_CTRL2_SYNC_MARKER_E BIT(25)
+#define G1_REG_DEC_CTRL2_TYPE1_QUANT_E BIT(24)
+#define G1_REG_DEC_CTRL2_CH_QP_OFFSET(x) (((x) & 0x1f) << 19)
+#define G1_REG_DEC_CTRL2_CH_QP_OFFSET2(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E BIT(0)
+#define G1_REG_DEC_CTRL2_INTRADC_VLC_THR(x) (((x) & 0x7) << 16)
+#define G1_REG_DEC_CTRL2_VOP_TIME_INCR(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL2_DQ_PROFILE BIT(24)
+#define G1_REG_DEC_CTRL2_DQBI_LEVEL BIT(23)
+#define G1_REG_DEC_CTRL2_RANGE_RED_FRM_E BIT(22)
+#define G1_REG_DEC_CTRL2_FAST_UVMC_E BIT(20)
+#define G1_REG_DEC_CTRL2_TRANSDCTAB BIT(17)
+#define G1_REG_DEC_CTRL2_TRANSACFRM(x) (((x) & 0x3) << 15)
+#define G1_REG_DEC_CTRL2_TRANSACFRM2(x) (((x) & 0x3) << 13)
+#define G1_REG_DEC_CTRL2_MB_MODE_TAB(x) (((x) & 0x7) << 10)
+#define G1_REG_DEC_CTRL2_MVTAB(x) (((x) & 0x7) << 7)
+#define G1_REG_DEC_CTRL2_CBPTAB(x) (((x) & 0x7) << 4)
+#define G1_REG_DEC_CTRL2_2MV_BLK_PAT_TAB(x) (((x) & 0x3) << 2)
+#define G1_REG_DEC_CTRL2_4MV_BLK_PAT_TAB(x) (((x) & 0x3) << 0)
+#define G1_REG_DEC_CTRL2_QSCALE_TYPE BIT(24)
+#define G1_REG_DEC_CTRL2_CON_MV_E BIT(4)
+#define G1_REG_DEC_CTRL2_INTRA_DC_PREC(x) (((x) & 0x3) << 2)
+#define G1_REG_DEC_CTRL2_INTRA_VLC_TAB BIT(1)
+#define G1_REG_DEC_CTRL2_FRAME_PRED_DCT BIT(0)
+#define G1_REG_DEC_CTRL2_JPEG_QTABLES(x) (((x) & 0x3) << 11)
+#define G1_REG_DEC_CTRL2_JPEG_MODE(x) (((x) & 0x7) << 8)
+#define G1_REG_DEC_CTRL2_JPEG_FILRIGHT_E BIT(7)
+#define G1_REG_DEC_CTRL2_JPEG_STREAM_ALL BIT(6)
+#define G1_REG_DEC_CTRL2_CR_AC_VLCTABLE BIT(5)
+#define G1_REG_DEC_CTRL2_CB_AC_VLCTABLE BIT(4)
+#define G1_REG_DEC_CTRL2_CR_DC_VLCTABLE BIT(3)
+#define G1_REG_DEC_CTRL2_CB_DC_VLCTABLE BIT(2)
+#define G1_REG_DEC_CTRL2_CR_DC_VLCTABLE3 BIT(1)
+#define G1_REG_DEC_CTRL2_CB_DC_VLCTABLE3 BIT(0)
+#define G1_REG_DEC_CTRL2_STRM1_START_BIT(x) (((x) & 0x3f) << 18)
+#define G1_REG_DEC_CTRL2_HUFFMAN_E BIT(17)
+#define G1_REG_DEC_CTRL2_MULTISTREAM_E BIT(16)
+#define G1_REG_DEC_CTRL2_BOOLEAN_VALUE(x) (((x) & 0xff) << 8)
+#define G1_REG_DEC_CTRL2_BOOLEAN_RANGE(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL2_ALPHA_OFFSET(x) (((x) & 0x1f) << 5)
+#define G1_REG_DEC_CTRL2_BETA_OFFSET(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL3 0x018
+#define G1_REG_DEC_CTRL3_START_CODE_E BIT(31)
+#define G1_REG_DEC_CTRL3_INIT_QP(x) (((x) & 0x3f) << 25)
+#define G1_REG_DEC_CTRL3_CH_8PIX_ILEAV_E BIT(24)
+#define G1_REG_DEC_CTRL3_STREAM_LEN_EXT(x) (((x) & 0xff) << 24)
+#define G1_REG_DEC_CTRL3_STREAM_LEN(x) (((x) & 0xffffff) << 0)
+#define G1_REG_DEC_CTRL4 0x01c
+#define G1_REG_DEC_CTRL4_CABAC_E BIT(31)
+#define G1_REG_DEC_CTRL4_BLACKWHITE_E BIT(30)
+#define G1_REG_DEC_CTRL4_DIR_8X8_INFER_E BIT(29)
+#define G1_REG_DEC_CTRL4_WEIGHT_PRED_E BIT(28)
+#define G1_REG_DEC_CTRL4_WEIGHT_BIPR_IDC(x) (((x) & 0x3) << 26)
+#define G1_REG_DEC_CTRL4_AVS_H264_H_EXT BIT(25)
+#define G1_REG_DEC_CTRL4_FRAMENUM_LEN(x) (((x) & 0x1f) << 16)
+#define G1_REG_DEC_CTRL4_FRAMENUM(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL4_BITPLANE0_E BIT(31)
+#define G1_REG_DEC_CTRL4_BITPLANE1_E BIT(30)
+#define G1_REG_DEC_CTRL4_BITPLANE2_E BIT(29)
+#define G1_REG_DEC_CTRL4_ALT_PQUANT(x) (((x) & 0x1f) << 24)
+#define G1_REG_DEC_CTRL4_DQ_EDGES(x) (((x) & 0xf) << 20)
+#define G1_REG_DEC_CTRL4_TTMBF BIT(19)
+#define G1_REG_DEC_CTRL4_PQINDEX(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL4_VC1_HEIGHT_EXT BIT(13)
+#define G1_REG_DEC_CTRL4_BILIN_MC_E BIT(12)
+#define G1_REG_DEC_CTRL4_UNIQP_E BIT(11)
+#define G1_REG_DEC_CTRL4_HALFQP_E BIT(10)
+#define G1_REG_DEC_CTRL4_TTFRM(x) (((x) & 0x3) << 8)
+#define G1_REG_DEC_CTRL4_2ND_BYTE_EMUL_E BIT(7)
+#define G1_REG_DEC_CTRL4_DQUANT_E BIT(6)
+#define G1_REG_DEC_CTRL4_VC1_ADV_E BIT(5)
+#define G1_REG_DEC_CTRL4_PJPEG_FILDOWN_E BIT(26)
+#define G1_REG_DEC_CTRL4_PJPEG_WDIV8 BIT(25)
+#define G1_REG_DEC_CTRL4_PJPEG_HDIV8 BIT(24)
+#define G1_REG_DEC_CTRL4_PJPEG_AH(x) (((x) & 0xf) << 20)
+#define G1_REG_DEC_CTRL4_PJPEG_AL(x) (((x) & 0xf) << 16)
+#define G1_REG_DEC_CTRL4_PJPEG_SS(x) (((x) & 0xff) << 8)
+#define G1_REG_DEC_CTRL4_PJPEG_SE(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL4_DCT1_START_BIT(x) (((x) & 0x3f) << 26)
+#define G1_REG_DEC_CTRL4_DCT2_START_BIT(x) (((x) & 0x3f) << 20)
+#define G1_REG_DEC_CTRL4_CH_MV_RES BIT(13)
+#define G1_REG_DEC_CTRL4_INIT_DC_MATCH0(x) (((x) & 0x7) << 9)
+#define G1_REG_DEC_CTRL4_INIT_DC_MATCH1(x) (((x) & 0x7) << 6)
+#define G1_REG_DEC_CTRL4_VP7_VERSION BIT(5)
+#define G1_REG_DEC_CTRL5 0x020
+#define G1_REG_DEC_CTRL5_CONST_INTRA_E BIT(31)
+#define G1_REG_DEC_CTRL5_FILT_CTRL_PRES BIT(30)
+#define G1_REG_DEC_CTRL5_RDPIC_CNT_PRES BIT(29)
+#define G1_REG_DEC_CTRL5_8X8TRANS_FLAG_E BIT(28)
+#define G1_REG_DEC_CTRL5_REFPIC_MK_LEN(x) (((x) & 0x7ff) << 17)
+#define G1_REG_DEC_CTRL5_IDR_PIC_E BIT(16)
+#define G1_REG_DEC_CTRL5_IDR_PIC_ID(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL5_MV_SCALEFACTOR(x) (((x) & 0xff) << 24)
+#define G1_REG_DEC_CTRL5_REF_DIST_FWD(x) (((x) & 0x1f) << 19)
+#define G1_REG_DEC_CTRL5_REF_DIST_BWD(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL5_LOOP_FILT_LIMIT(x) (((x) & 0xf) << 14)
+#define G1_REG_DEC_CTRL5_VARIANCE_TEST_E BIT(13)
+#define G1_REG_DEC_CTRL5_MV_THRESHOLD(x) (((x) & 0x7) << 10)
+#define G1_REG_DEC_CTRL5_VAR_THRESHOLD(x) (((x) & 0x3ff) << 0)
+#define G1_REG_DEC_CTRL5_DIVX_IDCT_E BIT(8)
+#define G1_REG_DEC_CTRL5_DIVX3_SLICE_SIZE(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL5_PJPEG_REST_FREQ(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL5_RV_PROFILE(x) (((x) & 0x3) << 30)
+#define G1_REG_DEC_CTRL5_RV_OSV_QUANT(x) (((x) & 0x3) << 28)
+#define G1_REG_DEC_CTRL5_RV_FWD_SCALE(x) (((x) & 0x3fff) << 14)
+#define G1_REG_DEC_CTRL5_RV_BWD_SCALE(x) (((x) & 0x3fff) << 0)
+#define G1_REG_DEC_CTRL5_INIT_DC_COMP0(x) (((x) & 0xffff) << 16)
+#define G1_REG_DEC_CTRL5_INIT_DC_COMP1(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL6 0x024
+#define G1_REG_DEC_CTRL6_PPS_ID(x) (((x) & 0xff) << 24)
+#define G1_REG_DEC_CTRL6_REFIDX1_ACTIVE(x) (((x) & 0x1f) << 19)
+#define G1_REG_DEC_CTRL6_REFIDX0_ACTIVE(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL6_POC_LENGTH(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL6_ICOMP0_E BIT(24)
+#define G1_REG_DEC_CTRL6_ISCALE0(x) (((x) & 0xff) << 16)
+#define G1_REG_DEC_CTRL6_ISHIFT0(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL6_STREAM1_LEN(x) (((x) & 0xffffff) << 0)
+#define G1_REG_DEC_CTRL6_PIC_SLICE_AM(x) (((x) & 0x1fff) << 0)
+#define G1_REG_DEC_CTRL6_COEFFS_PART_AM(x) (((x) & 0xf) << 24)
+#define G1_REG_FWD_PIC(i) (0x028 + ((i) * 0x4))
+#define G1_REG_FWD_PIC_PINIT_RLIST_F5(x) (((x) & 0x1f) << 25)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F4(x) (((x) & 0x1f) << 20)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define G1_REG_FWD_PIC1_ICOMP1_E BIT(24)
+#define G1_REG_FWD_PIC1_ISCALE1(x) (((x) & 0xff) << 16)
+#define G1_REG_FWD_PIC1_ISHIFT1(x) (((x) & 0xffff) << 0)
+#define G1_REG_FWD_PIC1_SEGMENT_BASE(x) ((x) << 0)
+#define G1_REG_FWD_PIC1_SEGMENT_UPD_E BIT(1)
+#define G1_REG_FWD_PIC1_SEGMENT_E BIT(0)
+#define G1_REG_DEC_CTRL7 0x02c
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F15(x) (((x) & 0x1f) << 25)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F14(x) (((x) & 0x1f) << 20)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F13(x) (((x) & 0x1f) << 15)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F12(x) (((x) & 0x1f) << 10)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F11(x) (((x) & 0x1f) << 5)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F10(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL7_ICOMP2_E BIT(24)
+#define G1_REG_DEC_CTRL7_ISCALE2(x) (((x) & 0xff) << 16)
+#define G1_REG_DEC_CTRL7_ISHIFT2(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL7_DCT3_START_BIT(x) (((x) & 0x3f) << 24)
+#define G1_REG_DEC_CTRL7_DCT4_START_BIT(x) (((x) & 0x3f) << 18)
+#define G1_REG_DEC_CTRL7_DCT5_START_BIT(x) (((x) & 0x3f) << 12)
+#define G1_REG_DEC_CTRL7_DCT6_START_BIT(x) (((x) & 0x3f) << 6)
+#define G1_REG_DEC_CTRL7_DCT7_START_BIT(x) (((x) & 0x3f) << 0)
+#define G1_REG_ADDR_STR 0x030
+#define G1_REG_ADDR_DST 0x034
+#define G1_REG_ADDR_REF(i) (0x038 + ((i) * 0x4))
+#define G1_REG_ADDR_REF_FIELD_E BIT(1)
+#define G1_REG_ADDR_REF_TOPC_E BIT(0)
+#define G1_REG_REF_PIC(i) (0x078 + ((i) * 0x4))
+#define G1_REG_REF_PIC_FILT_TYPE_E BIT(31)
+#define G1_REG_REF_PIC_FILT_SHARPNESS(x) (((x) & 0x7) << 28)
+#define G1_REG_REF_PIC_MB_ADJ_0(x) (((x) & 0x7f) << 21)
+#define G1_REG_REF_PIC_MB_ADJ_1(x) (((x) & 0x7f) << 14)
+#define G1_REG_REF_PIC_MB_ADJ_2(x) (((x) & 0x7f) << 7)
+#define G1_REG_REF_PIC_MB_ADJ_3(x) (((x) & 0x7f) << 0)
+#define G1_REG_REF_PIC_REFER1_NBR(x) (((x) & 0xffff) << 16)
+#define G1_REG_REF_PIC_REFER0_NBR(x) (((x) & 0xffff) << 0)
+#define G1_REG_REF_PIC_LF_LEVEL_0(x) (((x) & 0x3f) << 18)
+#define G1_REG_REF_PIC_LF_LEVEL_1(x) (((x) & 0x3f) << 12)
+#define G1_REG_REF_PIC_LF_LEVEL_2(x) (((x) & 0x3f) << 6)
+#define G1_REG_REF_PIC_LF_LEVEL_3(x) (((x) & 0x3f) << 0)
+#define G1_REG_REF_PIC_QUANT_DELTA_0(x) (((x) & 0x1f) << 27)
+#define G1_REG_REF_PIC_QUANT_DELTA_1(x) (((x) & 0x1f) << 22)
+#define G1_REG_REF_PIC_QUANT_0(x) (((x) & 0x7ff) << 11)
+#define G1_REG_REF_PIC_QUANT_1(x) (((x) & 0x7ff) << 0)
+#define G1_REG_LT_REF 0x098
+#define G1_REG_VALID_REF 0x09c
+#define G1_REG_ADDR_QTABLE 0x0a0
+#define G1_REG_ADDR_DIR_MV 0x0a4
+#define G1_REG_BD_REF_PIC(i) (0x0a8 + ((i) * 0x4))
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_B2(x) (((x) & 0x1f) << 25)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_F2(x) (((x) & 0x1f) << 20)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_B1(x) (((x) & 0x1f) << 15)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_F1(x) (((x) & 0x1f) << 10)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_B0(x) (((x) & 0x1f) << 5)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define G1_REG_BD_REF_PIC_PRED_TAP_2_M1(x) (((x) & 0x3) << 10)
+#define G1_REG_BD_REF_PIC_PRED_TAP_2_4(x) (((x) & 0x3) << 8)
+#define G1_REG_BD_REF_PIC_PRED_TAP_4_M1(x) (((x) & 0x3) << 6)
+#define G1_REG_BD_REF_PIC_PRED_TAP_4_4(x) (((x) & 0x3) << 4)
+#define G1_REG_BD_REF_PIC_PRED_TAP_6_M1(x) (((x) & 0x3) << 2)
+#define G1_REG_BD_REF_PIC_PRED_TAP_6_4(x) (((x) & 0x3) << 0)
+#define G1_REG_BD_REF_PIC_QUANT_DELTA_2(x) (((x) & 0x1f) << 27)
+#define G1_REG_BD_REF_PIC_QUANT_DELTA_3(x) (((x) & 0x1f) << 22)
+#define G1_REG_BD_REF_PIC_QUANT_2(x) (((x) & 0x7ff) << 11)
+#define G1_REG_BD_REF_PIC_QUANT_3(x) (((x) & 0x7ff) << 0)
+#define G1_REG_BD_P_REF_PIC 0x0bc
+#define G1_REG_BD_P_REF_PIC_QUANT_DELTA_4(x) (((x) & 0x1f) << 27)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 25)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 20)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 15)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 10)
+#define G1_REG_BD_P_REF_PIC_BINIT_RLIST_B15(x) (((x) & 0x1f) << 5)
+#define G1_REG_BD_P_REF_PIC_BINIT_RLIST_F15(x) (((x) & 0x1f) << 0)
+#define G1_REG_ERR_CONC 0x0c0
+#define G1_REG_ERR_CONC_STARTMB_X(x) (((x) & 0x1ff) << 23)
+#define G1_REG_ERR_CONC_STARTMB_Y(x) (((x) & 0xff) << 15)
+#define G1_REG_PRED_FLT 0x0c4
+#define G1_REG_PRED_FLT_PRED_BC_TAP_0_0(x) (((x) & 0x3ff) << 22)
+#define G1_REG_PRED_FLT_PRED_BC_TAP_0_1(x) (((x) & 0x3ff) << 12)
+#define G1_REG_PRED_FLT_PRED_BC_TAP_0_2(x) (((x) & 0x3ff) << 2)
+#define G1_REG_REF_BUF_CTRL 0x0cc
+#define G1_REG_REF_BUF_CTRL_REFBU_E BIT(31)
+#define G1_REG_REF_BUF_CTRL_REFBU_THR(x) (((x) & 0xfff) << 19)
+#define G1_REG_REF_BUF_CTRL_REFBU_PICID(x) (((x) & 0x1f) << 14)
+#define G1_REG_REF_BUF_CTRL_REFBU_EVAL_E BIT(13)
+#define G1_REG_REF_BUF_CTRL_REFBU_FPARMOD_E BIT(12)
+#define G1_REG_REF_BUF_CTRL_REFBU_Y_OFFSET(x) (((x) & 0x1ff) << 0)
+#define G1_REG_REF_BUF_CTRL2 0x0dc
+#define G1_REG_REF_BUF_CTRL2_REFBU2_BUF_E BIT(31)
+#define G1_REG_REF_BUF_CTRL2_REFBU2_THR(x) (((x) & 0xfff) << 19)
+#define G1_REG_REF_BUF_CTRL2_REFBU2_PICID(x) (((x) & 0x1f) << 14)
+#define G1_REG_REF_BUF_CTRL2_APF_THRESHOLD(x) (((x) & 0x3fff) << 0)
+#define G1_REG_SOFT_RESET 0x194
+
+/* Post-processor registers. */
+#define G1_REG_PP_INTERRUPT G1_SWREG(60)
+#define G1_REG_PP_READY_IRQ BIT(12)
+#define G1_REG_PP_IRQ BIT(8)
+#define G1_REG_PP_IRQ_DIS BIT(4)
+#define G1_REG_PP_PIPELINE_EN BIT(1)
+#define G1_REG_PP_EXTERNAL_TRIGGER BIT(0)
+#define G1_REG_PP_DEV_CONFIG G1_SWREG(61)
+#define G1_REG_PP_AXI_RD_ID(v) (((v) << 24) & GENMASK(31, 24))
+#define G1_REG_PP_AXI_WR_ID(v) (((v) << 16) & GENMASK(23, 16))
+#define G1_REG_PP_INSWAP32_E(v) ((v) ? BIT(10) : 0)
+#define G1_REG_PP_DATA_DISC_E(v) ((v) ? BIT(9) : 0)
+#define G1_REG_PP_CLK_GATE_E(v) ((v) ? BIT(8) : 0)
+#define G1_REG_PP_IN_ENDIAN(v) ((v) ? BIT(7) : 0)
+#define G1_REG_PP_OUT_ENDIAN(v) ((v) ? BIT(6) : 0)
+#define G1_REG_PP_OUTSWAP32_E(v) ((v) ? BIT(5) : 0)
+#define G1_REG_PP_MAX_BURST(v) (((v) << 0) & GENMASK(4, 0))
+#define G1_REG_PP_IN_LUMA_BASE G1_SWREG(63)
+#define G1_REG_PP_IN_CB_BASE G1_SWREG(64)
+#define G1_REG_PP_IN_CR_BASE G1_SWREG(65)
+#define G1_REG_PP_OUT_LUMA_BASE G1_SWREG(66)
+#define G1_REG_PP_OUT_CHROMA_BASE G1_SWREG(67)
+#define G1_REG_PP_CONTRAST_ADJUST G1_SWREG(68)
+#define G1_REG_PP_COLOR_CONVERSION G1_SWREG(69)
+#define G1_REG_PP_COLOR_CONVERSION0 G1_SWREG(70)
+#define G1_REG_PP_COLOR_CONVERSION1 G1_SWREG(71)
+#define G1_REG_PP_INPUT_SIZE G1_SWREG(72)
+#define G1_REG_PP_INPUT_SIZE_HEIGHT(v) (((v) << 9) & GENMASK(16, 9))
+#define G1_REG_PP_INPUT_SIZE_WIDTH(v) (((v) << 0) & GENMASK(8, 0))
+#define G1_REG_PP_SCALING0 G1_SWREG(79)
+#define G1_REG_PP_PADD_R(v) (((v) << 23) & GENMASK(27, 23))
+#define G1_REG_PP_PADD_G(v) (((v) << 18) & GENMASK(22, 18))
+#define G1_REG_PP_RANGEMAP_Y(v) ((v) ? BIT(31) : 0)
+#define G1_REG_PP_RANGEMAP_C(v) ((v) ? BIT(30) : 0)
+#define G1_REG_PP_YCBCR_RANGE(v) ((v) ? BIT(29) : 0)
+#define G1_REG_PP_RGB_16(v) ((v) ? BIT(28) : 0)
+#define G1_REG_PP_SCALING1 G1_SWREG(80)
+#define G1_REG_PP_PADD_B(v) (((v) << 18) & GENMASK(22, 18))
+#define G1_REG_PP_MASK_R G1_SWREG(82)
+#define G1_REG_PP_MASK_G G1_SWREG(83)
+#define G1_REG_PP_MASK_B G1_SWREG(84)
+#define G1_REG_PP_CONTROL G1_SWREG(85)
+#define G1_REG_PP_CONTROL_IN_FMT(v) (((v) << 29) & GENMASK(31, 29))
+#define G1_REG_PP_CONTROL_OUT_FMT(v) (((v) << 26) & GENMASK(28, 26))
+#define G1_REG_PP_CONTROL_OUT_HEIGHT(v) (((v) << 15) & GENMASK(25, 15))
+#define G1_REG_PP_CONTROL_OUT_WIDTH(v) (((v) << 4) & GENMASK(14, 4))
+#define G1_REG_PP_MASK1_ORIG_WIDTH G1_SWREG(88)
+#define G1_REG_PP_ORIG_WIDTH(v) (((v) << 23) & GENMASK(31, 23))
+#define G1_REG_PP_DISPLAY_WIDTH G1_SWREG(92)
+#define G1_REG_PP_FUSE G1_SWREG(99)
+
+#endif /* HANTRO_G1_REGS_H_ */
diff --git a/drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c b/drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
new file mode 100644
index 0000000000..851eb67f19
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VP8 codec driver
+ *
+ * Copyright (C) 2019 Rockchip Electronics Co., Ltd.
+ * ZhiChao Yu <zhichao.yu@rock-chips.com>
+ *
+ * Copyright (C) 2019 Google, Inc.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro_hw.h"
+#include "hantro.h"
+#include "hantro_g1_regs.h"
+
+/* DCT partition base address regs */
+static const struct hantro_reg vp8_dec_dct_base[8] = {
+ { G1_REG_ADDR_STR, 0, 0xffffffff },
+ { G1_REG_ADDR_REF(8), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(9), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(10), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(11), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(12), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(14), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(15), 0, 0xffffffff },
+};
+
+/* Loop filter level regs */
+static const struct hantro_reg vp8_dec_lf_level[4] = {
+ { G1_REG_REF_PIC(2), 18, 0x3f },
+ { G1_REG_REF_PIC(2), 12, 0x3f },
+ { G1_REG_REF_PIC(2), 6, 0x3f },
+ { G1_REG_REF_PIC(2), 0, 0x3f },
+};
+
+/* Macroblock loop filter level adjustment regs */
+static const struct hantro_reg vp8_dec_mb_adj[4] = {
+ { G1_REG_REF_PIC(0), 21, 0x7f },
+ { G1_REG_REF_PIC(0), 14, 0x7f },
+ { G1_REG_REF_PIC(0), 7, 0x7f },
+ { G1_REG_REF_PIC(0), 0, 0x7f },
+};
+
+/* Reference frame adjustment regs */
+static const struct hantro_reg vp8_dec_ref_adj[4] = {
+ { G1_REG_REF_PIC(1), 21, 0x7f },
+ { G1_REG_REF_PIC(1), 14, 0x7f },
+ { G1_REG_REF_PIC(1), 7, 0x7f },
+ { G1_REG_REF_PIC(1), 0, 0x7f },
+};
+
+/* Quantizer */
+static const struct hantro_reg vp8_dec_quant[4] = {
+ { G1_REG_REF_PIC(3), 11, 0x7ff },
+ { G1_REG_REF_PIC(3), 0, 0x7ff },
+ { G1_REG_BD_REF_PIC(4), 11, 0x7ff },
+ { G1_REG_BD_REF_PIC(4), 0, 0x7ff },
+};
+
+/* Quantizer delta regs */
+static const struct hantro_reg vp8_dec_quant_delta[5] = {
+ { G1_REG_REF_PIC(3), 27, 0x1f },
+ { G1_REG_REF_PIC(3), 22, 0x1f },
+ { G1_REG_BD_REF_PIC(4), 27, 0x1f },
+ { G1_REG_BD_REF_PIC(4), 22, 0x1f },
+ { G1_REG_BD_P_REF_PIC, 27, 0x1f },
+};
+
+/* DCT partition start bits regs */
+static const struct hantro_reg vp8_dec_dct_start_bits[8] = {
+ { G1_REG_DEC_CTRL2, 26, 0x3f }, { G1_REG_DEC_CTRL4, 26, 0x3f },
+ { G1_REG_DEC_CTRL4, 20, 0x3f }, { G1_REG_DEC_CTRL7, 24, 0x3f },
+ { G1_REG_DEC_CTRL7, 18, 0x3f }, { G1_REG_DEC_CTRL7, 12, 0x3f },
+ { G1_REG_DEC_CTRL7, 6, 0x3f }, { G1_REG_DEC_CTRL7, 0, 0x3f },
+};
+
+/* Precision filter tap regs */
+static const struct hantro_reg vp8_dec_pred_bc_tap[8][4] = {
+ {
+ { G1_REG_PRED_FLT, 22, 0x3ff },
+ { G1_REG_PRED_FLT, 12, 0x3ff },
+ { G1_REG_PRED_FLT, 2, 0x3ff },
+ { G1_REG_REF_PIC(4), 22, 0x3ff },
+ },
+ {
+ { G1_REG_REF_PIC(4), 12, 0x3ff },
+ { G1_REG_REF_PIC(4), 2, 0x3ff },
+ { G1_REG_REF_PIC(5), 22, 0x3ff },
+ { G1_REG_REF_PIC(5), 12, 0x3ff },
+ },
+ {
+ { G1_REG_REF_PIC(5), 2, 0x3ff },
+ { G1_REG_REF_PIC(6), 22, 0x3ff },
+ { G1_REG_REF_PIC(6), 12, 0x3ff },
+ { G1_REG_REF_PIC(6), 2, 0x3ff },
+ },
+ {
+ { G1_REG_REF_PIC(7), 22, 0x3ff },
+ { G1_REG_REF_PIC(7), 12, 0x3ff },
+ { G1_REG_REF_PIC(7), 2, 0x3ff },
+ { G1_REG_LT_REF, 22, 0x3ff },
+ },
+ {
+ { G1_REG_LT_REF, 12, 0x3ff },
+ { G1_REG_LT_REF, 2, 0x3ff },
+ { G1_REG_VALID_REF, 22, 0x3ff },
+ { G1_REG_VALID_REF, 12, 0x3ff },
+ },
+ {
+ { G1_REG_VALID_REF, 2, 0x3ff },
+ { G1_REG_BD_REF_PIC(0), 22, 0x3ff },
+ { G1_REG_BD_REF_PIC(0), 12, 0x3ff },
+ { G1_REG_BD_REF_PIC(0), 2, 0x3ff },
+ },
+ {
+ { G1_REG_BD_REF_PIC(1), 22, 0x3ff },
+ { G1_REG_BD_REF_PIC(1), 12, 0x3ff },
+ { G1_REG_BD_REF_PIC(1), 2, 0x3ff },
+ { G1_REG_BD_REF_PIC(2), 22, 0x3ff },
+ },
+ {
+ { G1_REG_BD_REF_PIC(2), 12, 0x3ff },
+ { G1_REG_BD_REF_PIC(2), 2, 0x3ff },
+ { G1_REG_BD_REF_PIC(3), 22, 0x3ff },
+ { G1_REG_BD_REF_PIC(3), 12, 0x3ff },
+ },
+};
+
+/*
+ * Set loop filters
+ */
+static void cfg_lf(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ const struct v4l2_vp8_segment *seg = &hdr->segment;
+ const struct v4l2_vp8_loop_filter *lf = &hdr->lf;
+ struct hantro_dev *vpu = ctx->dev;
+ unsigned int i;
+ u32 reg;
+
+ if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) {
+ hantro_reg_write(vpu, &vp8_dec_lf_level[0], lf->level);
+ } else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) {
+ for (i = 0; i < 4; i++) {
+ u32 lf_level = clamp(lf->level + seg->lf_update[i],
+ 0, 63);
+
+ hantro_reg_write(vpu, &vp8_dec_lf_level[i], lf_level);
+ }
+ } else {
+ for (i = 0; i < 4; i++)
+ hantro_reg_write(vpu, &vp8_dec_lf_level[i],
+ seg->lf_update[i]);
+ }
+
+ reg = G1_REG_REF_PIC_FILT_SHARPNESS(lf->sharpness_level);
+ if (lf->flags & V4L2_VP8_LF_FILTER_TYPE_SIMPLE)
+ reg |= G1_REG_REF_PIC_FILT_TYPE_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_REF_PIC(0));
+
+ if (lf->flags & V4L2_VP8_LF_ADJ_ENABLE) {
+ for (i = 0; i < 4; i++) {
+ hantro_reg_write(vpu, &vp8_dec_mb_adj[i],
+ lf->mb_mode_delta[i]);
+ hantro_reg_write(vpu, &vp8_dec_ref_adj[i],
+ lf->ref_frm_delta[i]);
+ }
+ }
+}
+
+/*
+ * Set quantization parameters
+ */
+static void cfg_qp(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ const struct v4l2_vp8_quantization *q = &hdr->quant;
+ const struct v4l2_vp8_segment *seg = &hdr->segment;
+ struct hantro_dev *vpu = ctx->dev;
+ unsigned int i;
+
+ if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) {
+ hantro_reg_write(vpu, &vp8_dec_quant[0], q->y_ac_qi);
+ } else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) {
+ for (i = 0; i < 4; i++) {
+ u32 quant = clamp(q->y_ac_qi + seg->quant_update[i],
+ 0, 127);
+
+ hantro_reg_write(vpu, &vp8_dec_quant[i], quant);
+ }
+ } else {
+ for (i = 0; i < 4; i++)
+ hantro_reg_write(vpu, &vp8_dec_quant[i],
+ seg->quant_update[i]);
+ }
+
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[0], q->y_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[1], q->y2_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[2], q->y2_ac_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[3], q->uv_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[4], q->uv_ac_delta);
+}
+
+/*
+ * set control partition and DCT partition regs
+ *
+ * VP8 frame stream data layout:
+ *
+ * first_part_size parttion_sizes[0]
+ * ^ ^
+ * src_dma | |
+ * ^ +--------+------+ +-----+-----+
+ * | | control part | | |
+ * +--------+----------------+------------------+-----------+-----+-----------+
+ * | tag 3B | extra 7B | hdr | mb_data | DCT sz | DCT part0 | ... | DCT partn |
+ * +--------+-----------------------------------+-----------+-----+-----------+
+ * | | | |
+ * v +----+---+ v
+ * mb_start | src_dma_end
+ * v
+ * DCT size part
+ * (num_dct-1)*3B
+ * Note:
+ * 1. only key-frames have extra 7-bytes
+ * 2. all offsets are base on src_dma
+ * 3. number of DCT parts is 1, 2, 4 or 8
+ * 4. the addresses set to the VPU must be 64-bits aligned
+ */
+static void cfg_parts(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_src;
+ u32 first_part_offset = V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) ? 10 : 3;
+ u32 mb_size, mb_offset_bytes, mb_offset_bits, mb_start_bits;
+ u32 dct_size_part_size, dct_part_offset;
+ struct hantro_reg reg;
+ dma_addr_t src_dma;
+ u32 dct_part_total_len = 0;
+ u32 count = 0;
+ unsigned int i;
+
+ vb2_src = hantro_get_src_buf(ctx);
+ src_dma = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
+
+ /*
+ * Calculate control partition mb data info
+ * @first_part_header_bits: bits offset of mb data from first
+ * part start pos
+ * @mb_offset_bits: bits offset of mb data from src_dma
+ * base addr
+ * @mb_offset_byte: bytes offset of mb data from src_dma
+ * base addr
+ * @mb_start_bits: bits offset of mb data from mb data
+ * 64bits alignment addr
+ */
+ mb_offset_bits = first_part_offset * 8 +
+ hdr->first_part_header_bits + 8;
+ mb_offset_bytes = mb_offset_bits / 8;
+ mb_start_bits = mb_offset_bits -
+ (mb_offset_bytes & (~DEC_8190_ALIGN_MASK)) * 8;
+ mb_size = hdr->first_part_size -
+ (mb_offset_bytes - first_part_offset) +
+ (mb_offset_bytes & DEC_8190_ALIGN_MASK);
+
+ /* Macroblock data aligned base addr */
+ vdpu_write_relaxed(vpu, (mb_offset_bytes & (~DEC_8190_ALIGN_MASK))
+ + src_dma, G1_REG_ADDR_REF(13));
+
+ /* Macroblock data start bits */
+ reg.base = G1_REG_DEC_CTRL2;
+ reg.mask = 0x3f;
+ reg.shift = 18;
+ hantro_reg_write(vpu, &reg, mb_start_bits);
+
+ /* Macroblock aligned data length */
+ reg.base = G1_REG_DEC_CTRL6;
+ reg.mask = 0x3fffff;
+ reg.shift = 0;
+ hantro_reg_write(vpu, &reg, mb_size + 1);
+
+ /*
+ * Calculate DCT partition info
+ * @dct_size_part_size: Containing sizes of DCT part, every DCT part
+ * has 3 bytes to store its size, except the last
+ * DCT part
+ * @dct_part_offset: bytes offset of DCT parts from src_dma base addr
+ * @dct_part_total_len: total size of all DCT parts
+ */
+ dct_size_part_size = (hdr->num_dct_parts - 1) * 3;
+ dct_part_offset = first_part_offset + hdr->first_part_size;
+ for (i = 0; i < hdr->num_dct_parts; i++)
+ dct_part_total_len += hdr->dct_part_sizes[i];
+ dct_part_total_len += dct_size_part_size;
+ dct_part_total_len += (dct_part_offset & DEC_8190_ALIGN_MASK);
+
+ /* Number of DCT partitions */
+ reg.base = G1_REG_DEC_CTRL6;
+ reg.mask = 0xf;
+ reg.shift = 24;
+ hantro_reg_write(vpu, &reg, hdr->num_dct_parts - 1);
+
+ /* DCT partition length */
+ vdpu_write_relaxed(vpu,
+ G1_REG_DEC_CTRL3_STREAM_LEN(dct_part_total_len),
+ G1_REG_DEC_CTRL3);
+
+ /* DCT partitions base address */
+ for (i = 0; i < hdr->num_dct_parts; i++) {
+ u32 byte_offset = dct_part_offset + dct_size_part_size + count;
+ u32 base_addr = byte_offset + src_dma;
+
+ hantro_reg_write(vpu, &vp8_dec_dct_base[i],
+ base_addr & (~DEC_8190_ALIGN_MASK));
+
+ hantro_reg_write(vpu, &vp8_dec_dct_start_bits[i],
+ (byte_offset & DEC_8190_ALIGN_MASK) * 8);
+
+ count += hdr->dct_part_sizes[i];
+ }
+}
+
+/*
+ * prediction filter taps
+ * normal 6-tap filters
+ */
+static void cfg_tap(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_reg reg;
+ u32 val = 0;
+ int i, j;
+
+ reg.base = G1_REG_BD_REF_PIC(3);
+ reg.mask = 0xf;
+
+ if ((hdr->version & 0x03) != 0)
+ return; /* Tap filter not used. */
+
+ for (i = 0; i < 8; i++) {
+ val = (hantro_vp8_dec_mc_filter[i][0] << 2) |
+ hantro_vp8_dec_mc_filter[i][5];
+
+ for (j = 0; j < 4; j++)
+ hantro_reg_write(vpu, &vp8_dec_pred_bc_tap[i][j],
+ hantro_vp8_dec_mc_filter[i][j + 1]);
+
+ switch (i) {
+ case 2:
+ reg.shift = 8;
+ break;
+ case 4:
+ reg.shift = 4;
+ break;
+ case 6:
+ reg.shift = 0;
+ break;
+ default:
+ continue;
+ }
+
+ hantro_reg_write(vpu, &reg, val);
+ }
+}
+
+static void cfg_ref(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr,
+ struct vb2_v4l2_buffer *vb2_dst)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t ref;
+
+
+ ref = hantro_get_ref(ctx, hdr->last_frame_ts);
+ if (!ref) {
+ vpu_debug(0, "failed to find last frame ts=%llu\n",
+ hdr->last_frame_ts);
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ }
+ vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(0));
+
+ ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
+ if (!ref && hdr->golden_frame_ts)
+ vpu_debug(0, "failed to find golden frame ts=%llu\n",
+ hdr->golden_frame_ts);
+ if (!ref)
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN)
+ ref |= G1_REG_ADDR_REF_TOPC_E;
+ vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(4));
+
+ ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
+ if (!ref && hdr->alt_frame_ts)
+ vpu_debug(0, "failed to find alt frame ts=%llu\n",
+ hdr->alt_frame_ts);
+ if (!ref)
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT)
+ ref |= G1_REG_ADDR_REF_TOPC_E;
+ vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(5));
+}
+
+static void cfg_buffers(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr,
+ struct vb2_v4l2_buffer *vb2_dst)
+{
+ const struct v4l2_vp8_segment *seg = &hdr->segment;
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t dst_dma;
+ u32 reg;
+
+ /* Set probability table buffer address */
+ vdpu_write_relaxed(vpu, ctx->vp8_dec.prob_tbl.dma,
+ G1_REG_ADDR_QTABLE);
+
+ /* Set segment map address */
+ reg = G1_REG_FWD_PIC1_SEGMENT_BASE(ctx->vp8_dec.segment_map.dma);
+ if (seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED) {
+ reg |= G1_REG_FWD_PIC1_SEGMENT_E;
+ if (seg->flags & V4L2_VP8_SEGMENT_FLAG_UPDATE_MAP)
+ reg |= G1_REG_FWD_PIC1_SEGMENT_UPD_E;
+ }
+ vdpu_write_relaxed(vpu, reg, G1_REG_FWD_PIC(0));
+
+ dst_dma = hantro_get_dec_buf_addr(ctx, &vb2_dst->vb2_buf);
+ vdpu_write_relaxed(vpu, dst_dma, G1_REG_ADDR_DST);
+}
+
+int hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
+{
+ const struct v4l2_ctrl_vp8_frame *hdr;
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_dst;
+ size_t height = ctx->dst_fmt.height;
+ size_t width = ctx->dst_fmt.width;
+ u32 mb_width, mb_height;
+ u32 reg;
+
+ hantro_start_prepare_run(ctx);
+
+ hdr = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_VP8_FRAME);
+ if (WARN_ON(!hdr))
+ return -EINVAL;
+
+ /* Reset segment_map buffer in keyframe */
+ if (V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) && ctx->vp8_dec.segment_map.cpu)
+ memset(ctx->vp8_dec.segment_map.cpu, 0,
+ ctx->vp8_dec.segment_map.size);
+
+ hantro_vp8_prob_update(ctx, hdr);
+
+ reg = G1_REG_CONFIG_DEC_TIMEOUT_E |
+ G1_REG_CONFIG_DEC_STRENDIAN_E |
+ G1_REG_CONFIG_DEC_INSWAP32_E |
+ G1_REG_CONFIG_DEC_STRSWAP32_E |
+ G1_REG_CONFIG_DEC_OUTSWAP32_E |
+ G1_REG_CONFIG_DEC_CLK_GATE_E |
+ G1_REG_CONFIG_DEC_IN_ENDIAN |
+ G1_REG_CONFIG_DEC_OUT_ENDIAN |
+ G1_REG_CONFIG_DEC_MAX_BURST(16);
+ vdpu_write_relaxed(vpu, reg, G1_REG_CONFIG);
+
+ reg = G1_REG_DEC_CTRL0_DEC_MODE(10) |
+ G1_REG_DEC_CTRL0_DEC_AXI_AUTO;
+ if (!V4L2_VP8_FRAME_IS_KEY_FRAME(hdr))
+ reg |= G1_REG_DEC_CTRL0_PIC_INTER_E;
+ if (!(hdr->flags & V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF))
+ reg |= G1_REG_DEC_CTRL0_SKIP_MODE;
+ if (hdr->lf.level == 0)
+ reg |= G1_REG_DEC_CTRL0_FILTERING_DIS;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
+
+ /* Frame dimensions */
+ mb_width = MB_WIDTH(width);
+ mb_height = MB_HEIGHT(height);
+ reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(mb_width) |
+ G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(mb_height) |
+ G1_REG_DEC_CTRL1_PIC_MB_W_EXT(mb_width >> 9) |
+ G1_REG_DEC_CTRL1_PIC_MB_H_EXT(mb_height >> 8);
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL1);
+
+ /* Boolean decoder */
+ reg = G1_REG_DEC_CTRL2_BOOLEAN_RANGE(hdr->coder_state.range)
+ | G1_REG_DEC_CTRL2_BOOLEAN_VALUE(hdr->coder_state.value);
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2);
+
+ reg = 0;
+ if (hdr->version != 3)
+ reg |= G1_REG_DEC_CTRL4_VC1_HEIGHT_EXT;
+ if (hdr->version & 0x3)
+ reg |= G1_REG_DEC_CTRL4_BILIN_MC_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL4);
+
+ cfg_lf(ctx, hdr);
+ cfg_qp(ctx, hdr);
+ cfg_parts(ctx, hdr);
+ cfg_tap(ctx, hdr);
+
+ vb2_dst = hantro_get_dst_buf(ctx);
+ cfg_ref(ctx, hdr, vb2_dst);
+ cfg_buffers(ctx, hdr, vb2_dst);
+
+ hantro_end_prepare_run(ctx);
+
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_g2.c b/drivers/media/platform/verisilicon/hantro_g2.c
new file mode 100644
index 0000000000..ee5f14c5f8
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g2.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2021 Collabora Ltd, Andrzej Pietrasiewicz <andrzej.p@collabora.com>
+ */
+
+#include "hantro_hw.h"
+#include "hantro_g2_regs.h"
+
+void hantro_g2_check_idle(struct hantro_dev *vpu)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ u32 status;
+
+ /* Make sure the VPU is idle */
+ status = vdpu_read(vpu, G2_REG_INTERRUPT);
+ if (status & G2_REG_INTERRUPT_DEC_E) {
+ dev_warn(vpu->dev, "device still running, aborting");
+ status |= G2_REG_INTERRUPT_DEC_ABORT_E | G2_REG_INTERRUPT_DEC_IRQ_DIS;
+ vdpu_write(vpu, status, G2_REG_INTERRUPT);
+ }
+ }
+}
+
+irqreturn_t hantro_g2_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, G2_REG_INTERRUPT);
+ state = (status & G2_REG_INTERRUPT_DEC_RDY_INT) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, G2_REG_INTERRUPT);
+ vdpu_write(vpu, G2_REG_CONFIG_DEC_CLK_GATE_E, G2_REG_CONFIG);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
new file mode 100644
index 0000000000..a9d4ac84a8
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU HEVC codec driver
+ *
+ * Copyright (C) 2020 Safran Passenger Innovations LLC
+ */
+
+#include "hantro_hw.h"
+#include "hantro_g2_regs.h"
+
+#define G2_ALIGN 16
+
+static size_t hantro_hevc_chroma_offset(struct hantro_ctx *ctx)
+{
+ return ctx->dst_fmt.width * ctx->dst_fmt.height * ctx->bit_depth / 8;
+}
+
+static size_t hantro_hevc_motion_vectors_offset(struct hantro_ctx *ctx)
+{
+ size_t cr_offset = hantro_hevc_chroma_offset(ctx);
+
+ return ALIGN((cr_offset * 3) / 2, G2_ALIGN);
+}
+
+static void prepare_tile_info_buffer(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ u16 *p = (u16 *)((u8 *)ctx->hevc_dec.tile_sizes.cpu);
+ unsigned int num_tile_rows = pps->num_tile_rows_minus1 + 1;
+ unsigned int num_tile_cols = pps->num_tile_columns_minus1 + 1;
+ unsigned int pic_width_in_ctbs, pic_height_in_ctbs;
+ unsigned int max_log2_ctb_size, ctb_size;
+ bool tiles_enabled, uniform_spacing;
+ u32 no_chroma = 0;
+
+ tiles_enabled = !!(pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED);
+ uniform_spacing = !!(pps->flags & V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING);
+
+ hantro_reg_write(vpu, &g2_tile_e, tiles_enabled);
+
+ max_log2_ctb_size = sps->log2_min_luma_coding_block_size_minus3 + 3 +
+ sps->log2_diff_max_min_luma_coding_block_size;
+ pic_width_in_ctbs = (sps->pic_width_in_luma_samples +
+ (1 << max_log2_ctb_size) - 1) >> max_log2_ctb_size;
+ pic_height_in_ctbs = (sps->pic_height_in_luma_samples + (1 << max_log2_ctb_size) - 1)
+ >> max_log2_ctb_size;
+ ctb_size = 1 << max_log2_ctb_size;
+
+ vpu_debug(1, "Preparing tile sizes buffer for %dx%d CTBs (CTB size %d)\n",
+ pic_width_in_ctbs, pic_height_in_ctbs, ctb_size);
+
+ if (tiles_enabled) {
+ unsigned int i, j, h;
+
+ vpu_debug(1, "Tiles enabled! %dx%d\n", num_tile_cols, num_tile_rows);
+
+ hantro_reg_write(vpu, &g2_num_tile_rows, num_tile_rows);
+ hantro_reg_write(vpu, &g2_num_tile_cols, num_tile_cols);
+
+ /* write width + height for each tile in pic */
+ if (!uniform_spacing) {
+ u32 tmp_w = 0, tmp_h = 0;
+
+ for (i = 0; i < num_tile_rows; i++) {
+ if (i == num_tile_rows - 1)
+ h = pic_height_in_ctbs - tmp_h;
+ else
+ h = pps->row_height_minus1[i] + 1;
+ tmp_h += h;
+ if (i == 0 && h == 1 && ctb_size == 16)
+ no_chroma = 1;
+ for (j = 0, tmp_w = 0; j < num_tile_cols - 1; j++) {
+ tmp_w += pps->column_width_minus1[j] + 1;
+ *p++ = pps->column_width_minus1[j] + 1;
+ *p++ = h;
+ if (i == 0 && h == 1 && ctb_size == 16)
+ no_chroma = 1;
+ }
+ /* last column */
+ *p++ = pic_width_in_ctbs - tmp_w;
+ *p++ = h;
+ }
+ } else { /* uniform spacing */
+ u32 tmp, prev_h, prev_w;
+
+ for (i = 0, prev_h = 0; i < num_tile_rows; i++) {
+ tmp = (i + 1) * pic_height_in_ctbs / num_tile_rows;
+ h = tmp - prev_h;
+ prev_h = tmp;
+ if (i == 0 && h == 1 && ctb_size == 16)
+ no_chroma = 1;
+ for (j = 0, prev_w = 0; j < num_tile_cols; j++) {
+ tmp = (j + 1) * pic_width_in_ctbs / num_tile_cols;
+ *p++ = tmp - prev_w;
+ *p++ = h;
+ if (j == 0 &&
+ (pps->column_width_minus1[0] + 1) == 1 &&
+ ctb_size == 16)
+ no_chroma = 1;
+ prev_w = tmp;
+ }
+ }
+ }
+ } else {
+ hantro_reg_write(vpu, &g2_num_tile_rows, 1);
+ hantro_reg_write(vpu, &g2_num_tile_cols, 1);
+
+ /* There's one tile, with dimensions equal to pic size. */
+ p[0] = pic_width_in_ctbs;
+ p[1] = pic_height_in_ctbs;
+ }
+
+ if (no_chroma)
+ vpu_debug(1, "%s: no chroma!\n", __func__);
+}
+
+static int compute_header_skip_length(struct hantro_ctx *ctx)
+{
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
+ int skip = 0;
+
+ if (pps->flags & V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT)
+ /* size of pic_output_flag */
+ skip++;
+
+ if (sps->flags & V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE)
+ /* size of pic_order_cnt_lsb */
+ skip += 2;
+
+ if (!(decode_params->flags & V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC)) {
+ /* size of pic_order_cnt_lsb */
+ skip += sps->log2_max_pic_order_cnt_lsb_minus4 + 4;
+
+ /* size of short_term_ref_pic_set_sps_flag */
+ skip++;
+
+ if (decode_params->short_term_ref_pic_set_size)
+ /* size of st_ref_pic_set( num_short_term_ref_pic_sets ) */
+ skip += decode_params->short_term_ref_pic_set_size;
+ else if (sps->num_short_term_ref_pic_sets > 1)
+ skip += fls(sps->num_short_term_ref_pic_sets - 1);
+
+ skip += decode_params->long_term_ref_pic_set_size;
+ }
+
+ return skip;
+}
+
+static void set_params(struct hantro_ctx *ctx)
+{
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
+ struct hantro_dev *vpu = ctx->dev;
+ u32 min_log2_cb_size, max_log2_ctb_size, min_cb_size, max_ctb_size;
+ u32 pic_width_in_min_cbs, pic_height_in_min_cbs;
+ u32 pic_width_aligned, pic_height_aligned;
+ u32 partial_ctb_x, partial_ctb_y;
+
+ hantro_reg_write(vpu, &g2_bit_depth_y_minus8, sps->bit_depth_luma_minus8);
+ hantro_reg_write(vpu, &g2_bit_depth_c_minus8, sps->bit_depth_chroma_minus8);
+
+ hantro_reg_write(vpu, &g2_hdr_skip_length, compute_header_skip_length(ctx));
+
+ min_log2_cb_size = sps->log2_min_luma_coding_block_size_minus3 + 3;
+ max_log2_ctb_size = min_log2_cb_size + sps->log2_diff_max_min_luma_coding_block_size;
+
+ hantro_reg_write(vpu, &g2_min_cb_size, min_log2_cb_size);
+ hantro_reg_write(vpu, &g2_max_cb_size, max_log2_ctb_size);
+
+ min_cb_size = 1 << min_log2_cb_size;
+ max_ctb_size = 1 << max_log2_ctb_size;
+
+ pic_width_in_min_cbs = sps->pic_width_in_luma_samples / min_cb_size;
+ pic_height_in_min_cbs = sps->pic_height_in_luma_samples / min_cb_size;
+ pic_width_aligned = ALIGN(sps->pic_width_in_luma_samples, max_ctb_size);
+ pic_height_aligned = ALIGN(sps->pic_height_in_luma_samples, max_ctb_size);
+
+ partial_ctb_x = !!(sps->pic_width_in_luma_samples != pic_width_aligned);
+ partial_ctb_y = !!(sps->pic_height_in_luma_samples != pic_height_aligned);
+
+ hantro_reg_write(vpu, &g2_partial_ctb_x, partial_ctb_x);
+ hantro_reg_write(vpu, &g2_partial_ctb_y, partial_ctb_y);
+
+ hantro_reg_write(vpu, &g2_pic_width_in_cbs, pic_width_in_min_cbs);
+ hantro_reg_write(vpu, &g2_pic_height_in_cbs, pic_height_in_min_cbs);
+
+ hantro_reg_write(vpu, &g2_pic_width_4x4,
+ (pic_width_in_min_cbs * min_cb_size) / 4);
+ hantro_reg_write(vpu, &g2_pic_height_4x4,
+ (pic_height_in_min_cbs * min_cb_size) / 4);
+
+ hantro_reg_write(vpu, &hevc_max_inter_hierdepth,
+ sps->max_transform_hierarchy_depth_inter);
+ hantro_reg_write(vpu, &hevc_max_intra_hierdepth,
+ sps->max_transform_hierarchy_depth_intra);
+ hantro_reg_write(vpu, &hevc_min_trb_size,
+ sps->log2_min_luma_transform_block_size_minus2 + 2);
+ hantro_reg_write(vpu, &hevc_max_trb_size,
+ sps->log2_min_luma_transform_block_size_minus2 + 2 +
+ sps->log2_diff_max_min_luma_transform_block_size);
+
+ hantro_reg_write(vpu, &g2_tempor_mvp_e,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED) &&
+ !(decode_params->flags & V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC));
+ hantro_reg_write(vpu, &g2_strong_smooth_e,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED));
+ hantro_reg_write(vpu, &g2_asym_pred_e,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_AMP_ENABLED));
+ hantro_reg_write(vpu, &g2_sao_e,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET));
+ hantro_reg_write(vpu, &g2_sign_data_hide,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED));
+
+ if (pps->flags & V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED) {
+ hantro_reg_write(vpu, &g2_cu_qpd_e, 1);
+ hantro_reg_write(vpu, &g2_max_cu_qpd_depth, pps->diff_cu_qp_delta_depth);
+ } else {
+ hantro_reg_write(vpu, &g2_cu_qpd_e, 0);
+ hantro_reg_write(vpu, &g2_max_cu_qpd_depth, 0);
+ }
+
+ hantro_reg_write(vpu, &g2_cb_qp_offset, pps->pps_cb_qp_offset);
+ hantro_reg_write(vpu, &g2_cr_qp_offset, pps->pps_cr_qp_offset);
+
+ hantro_reg_write(vpu, &g2_filt_offset_beta, pps->pps_beta_offset_div2);
+ hantro_reg_write(vpu, &g2_filt_offset_tc, pps->pps_tc_offset_div2);
+ hantro_reg_write(vpu, &g2_slice_hdr_ext_e,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT));
+ hantro_reg_write(vpu, &g2_slice_hdr_ext_bits, pps->num_extra_slice_header_bits);
+ hantro_reg_write(vpu, &g2_slice_chqp_present,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT));
+ hantro_reg_write(vpu, &g2_weight_bipr_idc,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED));
+ hantro_reg_write(vpu, &g2_transq_bypass,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED));
+ hantro_reg_write(vpu, &g2_list_mod_e,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT));
+ hantro_reg_write(vpu, &g2_entropy_sync_e,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED));
+ hantro_reg_write(vpu, &g2_cabac_init_present,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT));
+ hantro_reg_write(vpu, &g2_idr_pic_e,
+ !!(decode_params->flags & V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC));
+ hantro_reg_write(vpu, &hevc_parallel_merge,
+ pps->log2_parallel_merge_level_minus2 + 2);
+ hantro_reg_write(vpu, &g2_pcm_filt_d,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED));
+ hantro_reg_write(vpu, &g2_pcm_e,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED));
+ if (sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED) {
+ hantro_reg_write(vpu, &g2_max_pcm_size,
+ sps->log2_diff_max_min_pcm_luma_coding_block_size +
+ sps->log2_min_pcm_luma_coding_block_size_minus3 + 3);
+ hantro_reg_write(vpu, &g2_min_pcm_size,
+ sps->log2_min_pcm_luma_coding_block_size_minus3 + 3);
+ hantro_reg_write(vpu, &g2_bit_depth_pcm_y,
+ sps->pcm_sample_bit_depth_luma_minus1 + 1);
+ hantro_reg_write(vpu, &g2_bit_depth_pcm_c,
+ sps->pcm_sample_bit_depth_chroma_minus1 + 1);
+ } else {
+ hantro_reg_write(vpu, &g2_max_pcm_size, 0);
+ hantro_reg_write(vpu, &g2_min_pcm_size, 0);
+ hantro_reg_write(vpu, &g2_bit_depth_pcm_y, 0);
+ hantro_reg_write(vpu, &g2_bit_depth_pcm_c, 0);
+ }
+
+ hantro_reg_write(vpu, &g2_start_code_e, 1);
+ hantro_reg_write(vpu, &g2_init_qp, pps->init_qp_minus26 + 26);
+ hantro_reg_write(vpu, &g2_weight_pred_e,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED));
+ hantro_reg_write(vpu, &g2_cabac_init_present,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT));
+ hantro_reg_write(vpu, &g2_const_intra_e,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED));
+ hantro_reg_write(vpu, &g2_transform_skip,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED));
+ hantro_reg_write(vpu, &g2_out_filtering_dis,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER));
+ hantro_reg_write(vpu, &g2_filt_ctrl_pres,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT));
+ hantro_reg_write(vpu, &g2_dependent_slice,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED));
+ hantro_reg_write(vpu, &g2_filter_override,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED));
+ hantro_reg_write(vpu, &g2_refidx0_active,
+ pps->num_ref_idx_l0_default_active_minus1 + 1);
+ hantro_reg_write(vpu, &g2_refidx1_active,
+ pps->num_ref_idx_l1_default_active_minus1 + 1);
+ hantro_reg_write(vpu, &g2_apf_threshold, 8);
+}
+
+static void set_ref_pic_list(struct hantro_ctx *ctx)
+{
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ struct hantro_dev *vpu = ctx->dev;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
+ u32 list0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX] = {};
+ u32 list1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX] = {};
+ static const struct hantro_reg ref_pic_regs0[] = {
+ hevc_rlist_f0,
+ hevc_rlist_f1,
+ hevc_rlist_f2,
+ hevc_rlist_f3,
+ hevc_rlist_f4,
+ hevc_rlist_f5,
+ hevc_rlist_f6,
+ hevc_rlist_f7,
+ hevc_rlist_f8,
+ hevc_rlist_f9,
+ hevc_rlist_f10,
+ hevc_rlist_f11,
+ hevc_rlist_f12,
+ hevc_rlist_f13,
+ hevc_rlist_f14,
+ hevc_rlist_f15,
+ };
+ static const struct hantro_reg ref_pic_regs1[] = {
+ hevc_rlist_b0,
+ hevc_rlist_b1,
+ hevc_rlist_b2,
+ hevc_rlist_b3,
+ hevc_rlist_b4,
+ hevc_rlist_b5,
+ hevc_rlist_b6,
+ hevc_rlist_b7,
+ hevc_rlist_b8,
+ hevc_rlist_b9,
+ hevc_rlist_b10,
+ hevc_rlist_b11,
+ hevc_rlist_b12,
+ hevc_rlist_b13,
+ hevc_rlist_b14,
+ hevc_rlist_b15,
+ };
+ unsigned int i, j;
+
+ /* List 0 contains: short term before, short term after and long term */
+ j = 0;
+ for (i = 0; i < decode_params->num_poc_st_curr_before && j < ARRAY_SIZE(list0); i++)
+ list0[j++] = decode_params->poc_st_curr_before[i];
+ for (i = 0; i < decode_params->num_poc_st_curr_after && j < ARRAY_SIZE(list0); i++)
+ list0[j++] = decode_params->poc_st_curr_after[i];
+ for (i = 0; i < decode_params->num_poc_lt_curr && j < ARRAY_SIZE(list0); i++)
+ list0[j++] = decode_params->poc_lt_curr[i];
+
+ /* Fill the list, copying over and over */
+ i = 0;
+ while (j < ARRAY_SIZE(list0))
+ list0[j++] = list0[i++];
+
+ j = 0;
+ for (i = 0; i < decode_params->num_poc_st_curr_after && j < ARRAY_SIZE(list1); i++)
+ list1[j++] = decode_params->poc_st_curr_after[i];
+ for (i = 0; i < decode_params->num_poc_st_curr_before && j < ARRAY_SIZE(list1); i++)
+ list1[j++] = decode_params->poc_st_curr_before[i];
+ for (i = 0; i < decode_params->num_poc_lt_curr && j < ARRAY_SIZE(list1); i++)
+ list1[j++] = decode_params->poc_lt_curr[i];
+
+ i = 0;
+ while (j < ARRAY_SIZE(list1))
+ list1[j++] = list1[i++];
+
+ for (i = 0; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) {
+ hantro_reg_write(vpu, &ref_pic_regs0[i], list0[i]);
+ hantro_reg_write(vpu, &ref_pic_regs1[i], list1[i]);
+ }
+}
+
+static int set_ref(struct hantro_ctx *ctx)
+{
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
+ const struct v4l2_hevc_dpb_entry *dpb = decode_params->dpb;
+ dma_addr_t luma_addr, chroma_addr, mv_addr = 0;
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_dst;
+ struct hantro_decoded_buffer *dst;
+ size_t cr_offset = hantro_hevc_chroma_offset(ctx);
+ size_t mv_offset = hantro_hevc_motion_vectors_offset(ctx);
+ u32 max_ref_frames;
+ u16 dpb_longterm_e;
+ static const struct hantro_reg cur_poc[] = {
+ hevc_cur_poc_00,
+ hevc_cur_poc_01,
+ hevc_cur_poc_02,
+ hevc_cur_poc_03,
+ hevc_cur_poc_04,
+ hevc_cur_poc_05,
+ hevc_cur_poc_06,
+ hevc_cur_poc_07,
+ hevc_cur_poc_08,
+ hevc_cur_poc_09,
+ hevc_cur_poc_10,
+ hevc_cur_poc_11,
+ hevc_cur_poc_12,
+ hevc_cur_poc_13,
+ hevc_cur_poc_14,
+ hevc_cur_poc_15,
+ };
+ unsigned int i;
+
+ max_ref_frames = decode_params->num_poc_lt_curr +
+ decode_params->num_poc_st_curr_before +
+ decode_params->num_poc_st_curr_after;
+ /*
+ * Set max_ref_frames to non-zero to avoid HW hang when decoding
+ * badly marked I-frames.
+ */
+ max_ref_frames = max_ref_frames ? max_ref_frames : 1;
+ hantro_reg_write(vpu, &g2_num_ref_frames, max_ref_frames);
+ hantro_reg_write(vpu, &g2_filter_over_slices,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED));
+ hantro_reg_write(vpu, &g2_filter_over_tiles,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED));
+
+ /*
+ * Write POC count diff from current pic.
+ */
+ for (i = 0; i < decode_params->num_active_dpb_entries && i < ARRAY_SIZE(cur_poc); i++) {
+ char poc_diff = decode_params->pic_order_cnt_val - dpb[i].pic_order_cnt_val;
+
+ hantro_reg_write(vpu, &cur_poc[i], poc_diff);
+ }
+
+ if (i < ARRAY_SIZE(cur_poc)) {
+ /*
+ * After the references, fill one entry pointing to itself,
+ * i.e. difference is zero.
+ */
+ hantro_reg_write(vpu, &cur_poc[i], 0);
+ i++;
+ }
+
+ /* Fill the rest with the current picture */
+ for (; i < ARRAY_SIZE(cur_poc); i++)
+ hantro_reg_write(vpu, &cur_poc[i], decode_params->pic_order_cnt_val);
+
+ set_ref_pic_list(ctx);
+
+ /* We will only keep the reference pictures that are still used */
+ hantro_hevc_ref_init(ctx);
+
+ /* Set up addresses of DPB buffers */
+ dpb_longterm_e = 0;
+ for (i = 0; i < decode_params->num_active_dpb_entries &&
+ i < (V4L2_HEVC_DPB_ENTRIES_NUM_MAX - 1); i++) {
+ luma_addr = hantro_hevc_get_ref_buf(ctx, dpb[i].pic_order_cnt_val);
+ if (!luma_addr)
+ return -ENOMEM;
+
+ chroma_addr = luma_addr + cr_offset;
+ mv_addr = luma_addr + mv_offset;
+
+ if (dpb[i].flags & V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE)
+ dpb_longterm_e |= BIT(V4L2_HEVC_DPB_ENTRIES_NUM_MAX - 1 - i);
+
+ hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), luma_addr);
+ hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), chroma_addr);
+ hantro_write_addr(vpu, G2_REF_MV_ADDR(i), mv_addr);
+ }
+
+ vb2_dst = hantro_get_dst_buf(ctx);
+ dst = vb2_to_hantro_decoded_buf(&vb2_dst->vb2_buf);
+ luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
+ if (!luma_addr)
+ return -ENOMEM;
+
+ if (hantro_hevc_add_ref_buf(ctx, decode_params->pic_order_cnt_val, luma_addr))
+ return -EINVAL;
+
+ chroma_addr = luma_addr + cr_offset;
+ mv_addr = luma_addr + mv_offset;
+
+ hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), luma_addr);
+ hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), chroma_addr);
+ hantro_write_addr(vpu, G2_REF_MV_ADDR(i++), mv_addr);
+
+ hantro_write_addr(vpu, G2_OUT_LUMA_ADDR, luma_addr);
+ hantro_write_addr(vpu, G2_OUT_CHROMA_ADDR, chroma_addr);
+ hantro_write_addr(vpu, G2_OUT_MV_ADDR, mv_addr);
+
+ for (; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) {
+ hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), 0);
+ hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), 0);
+ hantro_write_addr(vpu, G2_REF_MV_ADDR(i), 0);
+ }
+
+ hantro_reg_write(vpu, &g2_refer_lterm_e, dpb_longterm_e);
+
+ return 0;
+}
+
+static void set_buffers(struct hantro_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *src_buf;
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t src_dma;
+ u32 src_len, src_buf_len;
+
+ src_buf = hantro_get_src_buf(ctx);
+
+ /* Source (stream) buffer. */
+ src_dma = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ src_len = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
+ src_buf_len = vb2_plane_size(&src_buf->vb2_buf, 0);
+
+ hantro_write_addr(vpu, G2_STREAM_ADDR, src_dma);
+ hantro_reg_write(vpu, &g2_stream_len, src_len);
+ hantro_reg_write(vpu, &g2_strm_buffer_len, src_buf_len);
+ hantro_reg_write(vpu, &g2_strm_start_offset, 0);
+ hantro_reg_write(vpu, &g2_write_mvs_e, 1);
+
+ hantro_write_addr(vpu, G2_TILE_SIZES_ADDR, ctx->hevc_dec.tile_sizes.dma);
+ hantro_write_addr(vpu, G2_TILE_FILTER_ADDR, ctx->hevc_dec.tile_filter.dma);
+ hantro_write_addr(vpu, G2_TILE_SAO_ADDR, ctx->hevc_dec.tile_sao.dma);
+ hantro_write_addr(vpu, G2_TILE_BSD_ADDR, ctx->hevc_dec.tile_bsd.dma);
+}
+
+static void prepare_scaling_list_buffer(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_scaling_matrix *sc = ctrls->scaling;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ u8 *p = ((u8 *)ctx->hevc_dec.scaling_lists.cpu);
+ unsigned int scaling_list_enabled;
+ unsigned int i, j, k;
+
+ scaling_list_enabled = !!(sps->flags & V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED);
+ hantro_reg_write(vpu, &g2_scaling_list_e, scaling_list_enabled);
+
+ if (!scaling_list_enabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(sc->scaling_list_dc_coef_16x16); i++)
+ *p++ = sc->scaling_list_dc_coef_16x16[i];
+
+ for (i = 0; i < ARRAY_SIZE(sc->scaling_list_dc_coef_32x32); i++)
+ *p++ = sc->scaling_list_dc_coef_32x32[i];
+
+ /* 128-bit boundary */
+ p += 8;
+
+ /* write scaling lists column by column */
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 4; j++)
+ for (k = 0; k < 4; k++)
+ *p++ = sc->scaling_list_4x4[i][4 * k + j];
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k++)
+ *p++ = sc->scaling_list_8x8[i][8 * k + j];
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k++)
+ *p++ = sc->scaling_list_16x16[i][8 * k + j];
+
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k++)
+ *p++ = sc->scaling_list_32x32[i][8 * k + j];
+
+ hantro_write_addr(vpu, G2_HEVC_SCALING_LIST_ADDR, ctx->hevc_dec.scaling_lists.dma);
+}
+
+int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ int ret;
+
+ hantro_g2_check_idle(vpu);
+
+ /* Prepare HEVC decoder context. */
+ ret = hantro_hevc_dec_prepare_run(ctx);
+ if (ret)
+ return ret;
+
+ /* Configure hardware registers. */
+ set_params(ctx);
+
+ /* set reference pictures */
+ ret = set_ref(ctx);
+ if (ret)
+ return ret;
+
+ set_buffers(ctx);
+ prepare_tile_info_buffer(ctx);
+
+ prepare_scaling_list_buffer(ctx);
+
+ hantro_end_prepare_run(ctx);
+
+ hantro_reg_write(vpu, &g2_mode, HEVC_DEC_MODE);
+ hantro_reg_write(vpu, &g2_clk_gate_e, 1);
+
+ /* Don't disable output */
+ hantro_reg_write(vpu, &g2_out_dis, 0);
+
+ /* Don't compress buffers */
+ hantro_reg_write(vpu, &g2_ref_compress_bypass, 1);
+
+ /* Bus width and max burst */
+ hantro_reg_write(vpu, &g2_buswidth, BUS_WIDTH_128);
+ hantro_reg_write(vpu, &g2_max_burst, 16);
+
+ /* Swap */
+ hantro_reg_write(vpu, &g2_strm_swap, 0xf);
+ hantro_reg_write(vpu, &g2_dirmv_swap, 0xf);
+ hantro_reg_write(vpu, &g2_compress_swap, 0xf);
+
+ /* Start decoding! */
+ vdpu_write(vpu, G2_REG_INTERRUPT_DEC_E, G2_REG_INTERRUPT);
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_g2_regs.h b/drivers/media/platform/verisilicon/hantro_g2_regs.h
new file mode 100644
index 0000000000..8260678359
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g2_regs.h
@@ -0,0 +1,325 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, Collabora
+ *
+ * Author: Benjamin Gaignard <benjamin.gaignard@collabora.com>
+ */
+
+#ifndef HANTRO_G2_REGS_H_
+#define HANTRO_G2_REGS_H_
+
+#include "hantro.h"
+
+#define G2_SWREG(nr) ((nr) * 4)
+
+#define G2_DEC_REG(b, s, m) \
+ ((const struct hantro_reg) { \
+ .base = G2_SWREG(b), \
+ .shift = s, \
+ .mask = m, \
+ })
+
+#define G2_REG_VERSION G2_SWREG(0)
+
+#define G2_REG_INTERRUPT G2_SWREG(1)
+#define G2_REG_INTERRUPT_DEC_RDY_INT BIT(12)
+#define G2_REG_INTERRUPT_DEC_ABORT_E BIT(5)
+#define G2_REG_INTERRUPT_DEC_IRQ_DIS BIT(4)
+#define G2_REG_INTERRUPT_DEC_E BIT(0)
+
+#define HEVC_DEC_MODE 0xc
+#define VP9_DEC_MODE 0xd
+
+#define BUS_WIDTH_32 0
+#define BUS_WIDTH_64 1
+#define BUS_WIDTH_128 2
+#define BUS_WIDTH_256 3
+
+#define g2_strm_swap G2_DEC_REG(2, 28, 0xf)
+#define g2_strm_swap_old G2_DEC_REG(2, 27, 0x1f)
+#define g2_pic_swap G2_DEC_REG(2, 22, 0x1f)
+#define g2_dirmv_swap G2_DEC_REG(2, 20, 0xf)
+#define g2_dirmv_swap_old G2_DEC_REG(2, 17, 0x1f)
+#define g2_tab0_swap_old G2_DEC_REG(2, 12, 0x1f)
+#define g2_tab1_swap_old G2_DEC_REG(2, 7, 0x1f)
+#define g2_tab2_swap_old G2_DEC_REG(2, 2, 0x1f)
+
+#define g2_mode G2_DEC_REG(3, 27, 0x1f)
+#define g2_compress_swap G2_DEC_REG(3, 20, 0xf)
+#define g2_ref_compress_bypass G2_DEC_REG(3, 17, 0x1)
+#define g2_out_rs_e G2_DEC_REG(3, 16, 0x1)
+#define g2_out_dis G2_DEC_REG(3, 15, 0x1)
+#define g2_out_filtering_dis G2_DEC_REG(3, 14, 0x1)
+#define g2_write_mvs_e G2_DEC_REG(3, 12, 0x1)
+#define g2_tab3_swap_old G2_DEC_REG(3, 7, 0x1f)
+#define g2_rscan_swap G2_DEC_REG(3, 2, 0x1f)
+
+#define g2_pic_width_in_cbs G2_DEC_REG(4, 19, 0x1fff)
+#define g2_pic_height_in_cbs G2_DEC_REG(4, 6, 0x1fff)
+#define g2_num_ref_frames G2_DEC_REG(4, 0, 0x1f)
+
+#define g2_start_bit G2_DEC_REG(5, 25, 0x7f)
+#define g2_scaling_list_e G2_DEC_REG(5, 24, 0x1)
+#define g2_cb_qp_offset G2_DEC_REG(5, 19, 0x1f)
+#define g2_cr_qp_offset G2_DEC_REG(5, 14, 0x1f)
+#define g2_sign_data_hide G2_DEC_REG(5, 12, 0x1)
+#define g2_tempor_mvp_e G2_DEC_REG(5, 11, 0x1)
+#define g2_max_cu_qpd_depth G2_DEC_REG(5, 5, 0x3f)
+#define g2_cu_qpd_e G2_DEC_REG(5, 4, 0x1)
+#define g2_pix_shift G2_DEC_REG(5, 0, 0xf)
+
+#define g2_stream_len G2_DEC_REG(6, 0, 0xffffffff)
+
+#define g2_cabac_init_present G2_DEC_REG(7, 31, 0x1)
+#define g2_weight_pred_e G2_DEC_REG(7, 28, 0x1)
+#define g2_weight_bipr_idc G2_DEC_REG(7, 26, 0x3)
+#define g2_filter_over_slices G2_DEC_REG(7, 25, 0x1)
+#define g2_filter_over_tiles G2_DEC_REG(7, 24, 0x1)
+#define g2_asym_pred_e G2_DEC_REG(7, 23, 0x1)
+#define g2_sao_e G2_DEC_REG(7, 22, 0x1)
+#define g2_pcm_filt_d G2_DEC_REG(7, 21, 0x1)
+#define g2_slice_chqp_present G2_DEC_REG(7, 20, 0x1)
+#define g2_dependent_slice G2_DEC_REG(7, 19, 0x1)
+#define g2_filter_override G2_DEC_REG(7, 18, 0x1)
+#define g2_strong_smooth_e G2_DEC_REG(7, 17, 0x1)
+#define g2_filt_offset_beta G2_DEC_REG(7, 12, 0x1f)
+#define g2_filt_offset_tc G2_DEC_REG(7, 7, 0x1f)
+#define g2_slice_hdr_ext_e G2_DEC_REG(7, 6, 0x1)
+#define g2_slice_hdr_ext_bits G2_DEC_REG(7, 3, 0x7)
+
+#define g2_const_intra_e G2_DEC_REG(8, 31, 0x1)
+#define g2_filt_ctrl_pres G2_DEC_REG(8, 30, 0x1)
+#define g2_bit_depth_y G2_DEC_REG(8, 21, 0xf)
+#define g2_bit_depth_c G2_DEC_REG(8, 17, 0xf)
+#define g2_idr_pic_e G2_DEC_REG(8, 16, 0x1)
+#define g2_bit_depth_pcm_y G2_DEC_REG(8, 12, 0xf)
+#define g2_bit_depth_pcm_c G2_DEC_REG(8, 8, 0xf)
+#define g2_bit_depth_y_minus8 G2_DEC_REG(8, 6, 0x3)
+#define g2_bit_depth_c_minus8 G2_DEC_REG(8, 4, 0x3)
+#define g2_rs_out_bit_depth G2_DEC_REG(8, 4, 0xf)
+#define g2_output_8_bits G2_DEC_REG(8, 3, 0x1)
+#define g2_output_format G2_DEC_REG(8, 0, 0x7)
+#define g2_pp_pix_shift G2_DEC_REG(8, 0, 0xf)
+
+#define g2_refidx1_active G2_DEC_REG(9, 19, 0x1f)
+#define g2_refidx0_active G2_DEC_REG(9, 14, 0x1f)
+#define g2_hdr_skip_length G2_DEC_REG(9, 0, 0x3fff)
+
+#define g2_start_code_e G2_DEC_REG(10, 31, 0x1)
+#define g2_init_qp_old G2_DEC_REG(10, 25, 0x3f)
+#define g2_init_qp G2_DEC_REG(10, 24, 0x7f)
+#define g2_num_tile_cols_old G2_DEC_REG(10, 20, 0x1f)
+#define g2_num_tile_cols G2_DEC_REG(10, 19, 0x1f)
+#define g2_num_tile_rows_old G2_DEC_REG(10, 15, 0x1f)
+#define g2_num_tile_rows G2_DEC_REG(10, 14, 0x1f)
+#define g2_tile_e G2_DEC_REG(10, 1, 0x1)
+#define g2_entropy_sync_e G2_DEC_REG(10, 0, 0x1)
+
+#define vp9_transform_mode G2_DEC_REG(11, 27, 0x7)
+#define vp9_filt_sharpness G2_DEC_REG(11, 21, 0x7)
+#define vp9_mcomp_filt_type G2_DEC_REG(11, 8, 0x7)
+#define vp9_high_prec_mv_e G2_DEC_REG(11, 7, 0x1)
+#define vp9_comp_pred_mode G2_DEC_REG(11, 4, 0x3)
+#define vp9_gref_sign_bias G2_DEC_REG(11, 2, 0x1)
+#define vp9_aref_sign_bias G2_DEC_REG(11, 0, 0x1)
+
+#define g2_refer_lterm_e G2_DEC_REG(12, 16, 0xffff)
+#define g2_min_cb_size G2_DEC_REG(12, 13, 0x7)
+#define g2_max_cb_size G2_DEC_REG(12, 10, 0x7)
+#define g2_min_pcm_size G2_DEC_REG(12, 7, 0x7)
+#define g2_max_pcm_size G2_DEC_REG(12, 4, 0x7)
+#define g2_pcm_e G2_DEC_REG(12, 3, 0x1)
+#define g2_transform_skip G2_DEC_REG(12, 2, 0x1)
+#define g2_transq_bypass G2_DEC_REG(12, 1, 0x1)
+#define g2_list_mod_e G2_DEC_REG(12, 0, 0x1)
+
+#define hevc_min_trb_size G2_DEC_REG(13, 13, 0x7)
+#define hevc_max_trb_size G2_DEC_REG(13, 10, 0x7)
+#define hevc_max_intra_hierdepth G2_DEC_REG(13, 7, 0x7)
+#define hevc_max_inter_hierdepth G2_DEC_REG(13, 4, 0x7)
+#define hevc_parallel_merge G2_DEC_REG(13, 0, 0xf)
+
+#define hevc_rlist_f0 G2_DEC_REG(14, 0, 0x1f)
+#define hevc_rlist_f1 G2_DEC_REG(14, 10, 0x1f)
+#define hevc_rlist_f2 G2_DEC_REG(14, 20, 0x1f)
+#define hevc_rlist_b0 G2_DEC_REG(14, 5, 0x1f)
+#define hevc_rlist_b1 G2_DEC_REG(14, 15, 0x1f)
+#define hevc_rlist_b2 G2_DEC_REG(14, 25, 0x1f)
+
+#define hevc_rlist_f3 G2_DEC_REG(15, 0, 0x1f)
+#define hevc_rlist_f4 G2_DEC_REG(15, 10, 0x1f)
+#define hevc_rlist_f5 G2_DEC_REG(15, 20, 0x1f)
+#define hevc_rlist_b3 G2_DEC_REG(15, 5, 0x1f)
+#define hevc_rlist_b4 G2_DEC_REG(15, 15, 0x1f)
+#define hevc_rlist_b5 G2_DEC_REG(15, 25, 0x1f)
+
+#define hevc_rlist_f6 G2_DEC_REG(16, 0, 0x1f)
+#define hevc_rlist_f7 G2_DEC_REG(16, 10, 0x1f)
+#define hevc_rlist_f8 G2_DEC_REG(16, 20, 0x1f)
+#define hevc_rlist_b6 G2_DEC_REG(16, 5, 0x1f)
+#define hevc_rlist_b7 G2_DEC_REG(16, 15, 0x1f)
+#define hevc_rlist_b8 G2_DEC_REG(16, 25, 0x1f)
+
+#define hevc_rlist_f9 G2_DEC_REG(17, 0, 0x1f)
+#define hevc_rlist_f10 G2_DEC_REG(17, 10, 0x1f)
+#define hevc_rlist_f11 G2_DEC_REG(17, 20, 0x1f)
+#define hevc_rlist_b9 G2_DEC_REG(17, 5, 0x1f)
+#define hevc_rlist_b10 G2_DEC_REG(17, 15, 0x1f)
+#define hevc_rlist_b11 G2_DEC_REG(17, 25, 0x1f)
+
+#define hevc_rlist_f12 G2_DEC_REG(18, 0, 0x1f)
+#define hevc_rlist_f13 G2_DEC_REG(18, 10, 0x1f)
+#define hevc_rlist_f14 G2_DEC_REG(18, 20, 0x1f)
+#define hevc_rlist_b12 G2_DEC_REG(18, 5, 0x1f)
+#define hevc_rlist_b13 G2_DEC_REG(18, 15, 0x1f)
+#define hevc_rlist_b14 G2_DEC_REG(18, 25, 0x1f)
+
+#define hevc_rlist_f15 G2_DEC_REG(19, 0, 0x1f)
+#define hevc_rlist_b15 G2_DEC_REG(19, 5, 0x1f)
+
+#define g2_partial_ctb_x G2_DEC_REG(20, 31, 0x1)
+#define g2_partial_ctb_y G2_DEC_REG(20, 30, 0x1)
+#define g2_pic_width_4x4 G2_DEC_REG(20, 16, 0xfff)
+#define g2_pic_height_4x4 G2_DEC_REG(20, 0, 0xfff)
+
+#define vp9_qp_delta_y_dc G2_DEC_REG(13, 23, 0x3f)
+#define vp9_qp_delta_ch_dc G2_DEC_REG(13, 17, 0x3f)
+#define vp9_qp_delta_ch_ac G2_DEC_REG(13, 11, 0x3f)
+#define vp9_last_sign_bias G2_DEC_REG(13, 10, 0x1)
+#define vp9_lossless_e G2_DEC_REG(13, 9, 0x1)
+#define vp9_comp_pred_var_ref1 G2_DEC_REG(13, 7, 0x3)
+#define vp9_comp_pred_var_ref0 G2_DEC_REG(13, 5, 0x3)
+#define vp9_comp_pred_fixed_ref G2_DEC_REG(13, 3, 0x3)
+#define vp9_segment_temp_upd_e G2_DEC_REG(13, 2, 0x1)
+#define vp9_segment_upd_e G2_DEC_REG(13, 1, 0x1)
+#define vp9_segment_e G2_DEC_REG(13, 0, 0x1)
+
+#define vp9_filt_level G2_DEC_REG(14, 18, 0x3f)
+#define vp9_refpic_seg0 G2_DEC_REG(14, 15, 0x7)
+#define vp9_skip_seg0 G2_DEC_REG(14, 14, 0x1)
+#define vp9_filt_level_seg0 G2_DEC_REG(14, 8, 0x3f)
+#define vp9_quant_seg0 G2_DEC_REG(14, 0, 0xff)
+
+#define vp9_refpic_seg1 G2_DEC_REG(15, 15, 0x7)
+#define vp9_skip_seg1 G2_DEC_REG(15, 14, 0x1)
+#define vp9_filt_level_seg1 G2_DEC_REG(15, 8, 0x3f)
+#define vp9_quant_seg1 G2_DEC_REG(15, 0, 0xff)
+
+#define vp9_refpic_seg2 G2_DEC_REG(16, 15, 0x7)
+#define vp9_skip_seg2 G2_DEC_REG(16, 14, 0x1)
+#define vp9_filt_level_seg2 G2_DEC_REG(16, 8, 0x3f)
+#define vp9_quant_seg2 G2_DEC_REG(16, 0, 0xff)
+
+#define vp9_refpic_seg3 G2_DEC_REG(17, 15, 0x7)
+#define vp9_skip_seg3 G2_DEC_REG(17, 14, 0x1)
+#define vp9_filt_level_seg3 G2_DEC_REG(17, 8, 0x3f)
+#define vp9_quant_seg3 G2_DEC_REG(17, 0, 0xff)
+
+#define vp9_refpic_seg4 G2_DEC_REG(18, 15, 0x7)
+#define vp9_skip_seg4 G2_DEC_REG(18, 14, 0x1)
+#define vp9_filt_level_seg4 G2_DEC_REG(18, 8, 0x3f)
+#define vp9_quant_seg4 G2_DEC_REG(18, 0, 0xff)
+
+#define vp9_refpic_seg5 G2_DEC_REG(19, 15, 0x7)
+#define vp9_skip_seg5 G2_DEC_REG(19, 14, 0x1)
+#define vp9_filt_level_seg5 G2_DEC_REG(19, 8, 0x3f)
+#define vp9_quant_seg5 G2_DEC_REG(19, 0, 0xff)
+
+#define hevc_cur_poc_00 G2_DEC_REG(46, 24, 0xff)
+#define hevc_cur_poc_01 G2_DEC_REG(46, 16, 0xff)
+#define hevc_cur_poc_02 G2_DEC_REG(46, 8, 0xff)
+#define hevc_cur_poc_03 G2_DEC_REG(46, 0, 0xff)
+
+#define hevc_cur_poc_04 G2_DEC_REG(47, 24, 0xff)
+#define hevc_cur_poc_05 G2_DEC_REG(47, 16, 0xff)
+#define hevc_cur_poc_06 G2_DEC_REG(47, 8, 0xff)
+#define hevc_cur_poc_07 G2_DEC_REG(47, 0, 0xff)
+
+#define hevc_cur_poc_08 G2_DEC_REG(48, 24, 0xff)
+#define hevc_cur_poc_09 G2_DEC_REG(48, 16, 0xff)
+#define hevc_cur_poc_10 G2_DEC_REG(48, 8, 0xff)
+#define hevc_cur_poc_11 G2_DEC_REG(48, 0, 0xff)
+
+#define hevc_cur_poc_12 G2_DEC_REG(49, 24, 0xff)
+#define hevc_cur_poc_13 G2_DEC_REG(49, 16, 0xff)
+#define hevc_cur_poc_14 G2_DEC_REG(49, 8, 0xff)
+#define hevc_cur_poc_15 G2_DEC_REG(49, 0, 0xff)
+
+#define vp9_refpic_seg6 G2_DEC_REG(31, 15, 0x7)
+#define vp9_skip_seg6 G2_DEC_REG(31, 14, 0x1)
+#define vp9_filt_level_seg6 G2_DEC_REG(31, 8, 0x3f)
+#define vp9_quant_seg6 G2_DEC_REG(31, 0, 0xff)
+
+#define vp9_refpic_seg7 G2_DEC_REG(32, 15, 0x7)
+#define vp9_skip_seg7 G2_DEC_REG(32, 14, 0x1)
+#define vp9_filt_level_seg7 G2_DEC_REG(32, 8, 0x3f)
+#define vp9_quant_seg7 G2_DEC_REG(32, 0, 0xff)
+
+#define vp9_lref_width G2_DEC_REG(33, 16, 0xffff)
+#define vp9_lref_height G2_DEC_REG(33, 0, 0xffff)
+
+#define vp9_gref_width G2_DEC_REG(34, 16, 0xffff)
+#define vp9_gref_height G2_DEC_REG(34, 0, 0xffff)
+
+#define vp9_aref_width G2_DEC_REG(35, 16, 0xffff)
+#define vp9_aref_height G2_DEC_REG(35, 0, 0xffff)
+
+#define vp9_lref_hor_scale G2_DEC_REG(36, 16, 0xffff)
+#define vp9_lref_ver_scale G2_DEC_REG(36, 0, 0xffff)
+
+#define vp9_gref_hor_scale G2_DEC_REG(37, 16, 0xffff)
+#define vp9_gref_ver_scale G2_DEC_REG(37, 0, 0xffff)
+
+#define vp9_aref_hor_scale G2_DEC_REG(38, 16, 0xffff)
+#define vp9_aref_ver_scale G2_DEC_REG(38, 0, 0xffff)
+
+#define vp9_filt_ref_adj_0 G2_DEC_REG(46, 24, 0x7f)
+#define vp9_filt_ref_adj_1 G2_DEC_REG(46, 16, 0x7f)
+#define vp9_filt_ref_adj_2 G2_DEC_REG(46, 8, 0x7f)
+#define vp9_filt_ref_adj_3 G2_DEC_REG(46, 0, 0x7f)
+
+#define vp9_filt_mb_adj_0 G2_DEC_REG(47, 24, 0x7f)
+#define vp9_filt_mb_adj_1 G2_DEC_REG(47, 16, 0x7f)
+#define vp9_filt_mb_adj_2 G2_DEC_REG(47, 8, 0x7f)
+#define vp9_filt_mb_adj_3 G2_DEC_REG(47, 0, 0x7f)
+
+#define g2_apf_threshold G2_DEC_REG(55, 0, 0xffff)
+
+#define g2_clk_gate_e G2_DEC_REG(58, 16, 0x1)
+#define g2_double_buffer_e G2_DEC_REG(58, 15, 0x1)
+#define g2_buswidth G2_DEC_REG(58, 8, 0x7)
+#define g2_max_burst G2_DEC_REG(58, 0, 0xff)
+
+#define g2_down_scale_e G2_DEC_REG(184, 7, 0x1)
+#define g2_down_scale_y G2_DEC_REG(184, 2, 0x3)
+#define g2_down_scale_x G2_DEC_REG(184, 0, 0x3)
+
+#define G2_REG_CONFIG G2_SWREG(58)
+#define G2_REG_CONFIG_DEC_CLK_GATE_E BIT(16)
+#define G2_REG_CONFIG_DEC_CLK_GATE_IDLE_E BIT(17)
+
+#define G2_OUT_LUMA_ADDR (G2_SWREG(65))
+#define G2_REF_LUMA_ADDR(i) (G2_SWREG(67) + ((i) * 0x8))
+#define G2_VP9_SEGMENT_WRITE_ADDR (G2_SWREG(79))
+#define G2_VP9_SEGMENT_READ_ADDR (G2_SWREG(81))
+#define G2_OUT_CHROMA_ADDR (G2_SWREG(99))
+#define G2_REF_CHROMA_ADDR(i) (G2_SWREG(101) + ((i) * 0x8))
+#define G2_OUT_MV_ADDR (G2_SWREG(133))
+#define G2_REF_MV_ADDR(i) (G2_SWREG(135) + ((i) * 0x8))
+#define G2_TILE_SIZES_ADDR (G2_SWREG(167))
+#define G2_STREAM_ADDR (G2_SWREG(169))
+#define G2_HEVC_SCALING_LIST_ADDR (G2_SWREG(171))
+#define G2_VP9_CTX_COUNT_ADDR (G2_SWREG(171))
+#define G2_VP9_PROBS_ADDR (G2_SWREG(173))
+#define G2_RS_OUT_LUMA_ADDR (G2_SWREG(175))
+#define G2_RS_OUT_CHROMA_ADDR (G2_SWREG(177))
+#define G2_TILE_FILTER_ADDR (G2_SWREG(179))
+#define G2_TILE_SAO_ADDR (G2_SWREG(181))
+#define G2_TILE_BSD_ADDR (G2_SWREG(183))
+#define G2_DS_DST (G2_SWREG(186))
+#define G2_DS_DST_CHR (G2_SWREG(188))
+
+#define g2_strm_buffer_len G2_DEC_REG(258, 0, 0xffffffff)
+#define g2_strm_start_offset G2_DEC_REG(259, 0, 0xffffffff)
+
+#endif
diff --git a/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c b/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
new file mode 100644
index 0000000000..6fc4b55551
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
@@ -0,0 +1,1014 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VP9 codec driver
+ *
+ * Copyright (C) 2021 Collabora Ltd.
+ */
+#include "media/videobuf2-core.h"
+#include "media/videobuf2-dma-contig.h"
+#include "media/videobuf2-v4l2.h"
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-vp9.h>
+
+#include "hantro.h"
+#include "hantro_vp9.h"
+#include "hantro_g2_regs.h"
+
+#define G2_ALIGN 16
+
+enum hantro_ref_frames {
+ INTRA_FRAME = 0,
+ LAST_FRAME = 1,
+ GOLDEN_FRAME = 2,
+ ALTREF_FRAME = 3,
+ MAX_REF_FRAMES = 4
+};
+
+static int start_prepare_run(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame **dec_params)
+{
+ const struct v4l2_ctrl_vp9_compressed_hdr *prob_updates;
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ struct v4l2_ctrl *ctrl;
+ unsigned int fctx_idx;
+
+ /* v4l2-specific stuff */
+ hantro_start_prepare_run(ctx);
+
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, V4L2_CID_STATELESS_VP9_FRAME);
+ if (WARN_ON(!ctrl))
+ return -EINVAL;
+ *dec_params = ctrl->p_cur.p;
+
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, V4L2_CID_STATELESS_VP9_COMPRESSED_HDR);
+ if (WARN_ON(!ctrl))
+ return -EINVAL;
+ prob_updates = ctrl->p_cur.p;
+ vp9_ctx->cur.tx_mode = prob_updates->tx_mode;
+
+ /*
+ * vp9 stuff
+ *
+ * by this point the userspace has done all parts of 6.2 uncompressed_header()
+ * except this fragment:
+ * if ( FrameIsIntra || error_resilient_mode ) {
+ * setup_past_independence ( )
+ * if ( frame_type == KEY_FRAME || error_resilient_mode == 1 ||
+ * reset_frame_context == 3 ) {
+ * for ( i = 0; i < 4; i ++ ) {
+ * save_probs( i )
+ * }
+ * } else if ( reset_frame_context == 2 ) {
+ * save_probs( frame_context_idx )
+ * }
+ * frame_context_idx = 0
+ * }
+ */
+ fctx_idx = v4l2_vp9_reset_frame_ctx(*dec_params, vp9_ctx->frame_context);
+ vp9_ctx->cur.frame_context_idx = fctx_idx;
+
+ /* 6.1 frame(sz): load_probs() and load_probs2() */
+ vp9_ctx->probability_tables = vp9_ctx->frame_context[fctx_idx];
+
+ /*
+ * The userspace has also performed 6.3 compressed_header(), but handling the
+ * probs in a special way. All probs which need updating, except MV-related,
+ * have been read from the bitstream and translated through inv_map_table[],
+ * but no 6.3.6 inv_recenter_nonneg(v, m) has been performed. The values passed
+ * by userspace are either translated values (there are no 0 values in
+ * inv_map_table[]), or zero to indicate no update. All MV-related probs which need
+ * updating have been read from the bitstream and (mv_prob << 1) | 1 has been
+ * performed. The values passed by userspace are either new values
+ * to replace old ones (the above mentioned shift and bitwise or never result in
+ * a zero) or zero to indicate no update.
+ * fw_update_probs() performs actual probs updates or leaves probs as-is
+ * for values for which a zero was passed from userspace.
+ */
+ v4l2_vp9_fw_update_probs(&vp9_ctx->probability_tables, prob_updates, *dec_params);
+
+ return 0;
+}
+
+static size_t chroma_offset(const struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ int bytes_per_pixel = dec_params->bit_depth == 8 ? 1 : 2;
+
+ return ctx->src_fmt.width * ctx->src_fmt.height * bytes_per_pixel;
+}
+
+static size_t mv_offset(const struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ size_t cr_offset = chroma_offset(ctx, dec_params);
+
+ return ALIGN((cr_offset * 3) / 2, G2_ALIGN);
+}
+
+static struct hantro_decoded_buffer *
+get_ref_buf(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *dst, u64 timestamp)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
+ struct vb2_buffer *buf;
+
+ /*
+ * If a ref is unused or invalid, address of current destination
+ * buffer is returned.
+ */
+ buf = vb2_find_buffer(cap_q, timestamp);
+ if (!buf)
+ buf = &dst->vb2_buf;
+
+ return vb2_to_hantro_decoded_buf(buf);
+}
+
+static void update_dec_buf_info(struct hantro_decoded_buffer *buf,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ buf->vp9.width = dec_params->frame_width_minus_1 + 1;
+ buf->vp9.height = dec_params->frame_height_minus_1 + 1;
+ buf->vp9.bit_depth = dec_params->bit_depth;
+}
+
+static void update_ctx_cur_info(struct hantro_vp9_dec_hw_ctx *vp9_ctx,
+ struct hantro_decoded_buffer *buf,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ vp9_ctx->cur.valid = true;
+ vp9_ctx->cur.reference_mode = dec_params->reference_mode;
+ vp9_ctx->cur.interpolation_filter = dec_params->interpolation_filter;
+ vp9_ctx->cur.flags = dec_params->flags;
+ vp9_ctx->cur.timestamp = buf->base.vb.vb2_buf.timestamp;
+}
+
+static void config_output(struct hantro_ctx *ctx,
+ struct hantro_decoded_buffer *dst,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ dma_addr_t luma_addr, chroma_addr, mv_addr;
+
+ hantro_reg_write(ctx->dev, &g2_out_dis, 0);
+ if (!ctx->dev->variant->legacy_regs)
+ hantro_reg_write(ctx->dev, &g2_output_format, 0);
+
+ luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
+ hantro_write_addr(ctx->dev, G2_OUT_LUMA_ADDR, luma_addr);
+
+ chroma_addr = luma_addr + chroma_offset(ctx, dec_params);
+ hantro_write_addr(ctx->dev, G2_OUT_CHROMA_ADDR, chroma_addr);
+
+ mv_addr = luma_addr + mv_offset(ctx, dec_params);
+ hantro_write_addr(ctx->dev, G2_OUT_MV_ADDR, mv_addr);
+}
+
+struct hantro_vp9_ref_reg {
+ const struct hantro_reg width;
+ const struct hantro_reg height;
+ const struct hantro_reg hor_scale;
+ const struct hantro_reg ver_scale;
+ u32 y_base;
+ u32 c_base;
+};
+
+static void config_ref(struct hantro_ctx *ctx,
+ struct hantro_decoded_buffer *dst,
+ const struct hantro_vp9_ref_reg *ref_reg,
+ const struct v4l2_ctrl_vp9_frame *dec_params,
+ u64 ref_ts)
+{
+ struct hantro_decoded_buffer *buf;
+ dma_addr_t luma_addr, chroma_addr;
+ u32 refw, refh;
+
+ buf = get_ref_buf(ctx, &dst->base.vb, ref_ts);
+ refw = buf->vp9.width;
+ refh = buf->vp9.height;
+
+ hantro_reg_write(ctx->dev, &ref_reg->width, refw);
+ hantro_reg_write(ctx->dev, &ref_reg->height, refh);
+
+ hantro_reg_write(ctx->dev, &ref_reg->hor_scale, (refw << 14) / dst->vp9.width);
+ hantro_reg_write(ctx->dev, &ref_reg->ver_scale, (refh << 14) / dst->vp9.height);
+
+ luma_addr = hantro_get_dec_buf_addr(ctx, &buf->base.vb.vb2_buf);
+ hantro_write_addr(ctx->dev, ref_reg->y_base, luma_addr);
+
+ chroma_addr = luma_addr + chroma_offset(ctx, dec_params);
+ hantro_write_addr(ctx->dev, ref_reg->c_base, chroma_addr);
+}
+
+static void config_ref_registers(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct hantro_decoded_buffer *dst,
+ struct hantro_decoded_buffer *mv_ref)
+{
+ static const struct hantro_vp9_ref_reg ref_regs[] = {
+ {
+ /* Last */
+ .width = vp9_lref_width,
+ .height = vp9_lref_height,
+ .hor_scale = vp9_lref_hor_scale,
+ .ver_scale = vp9_lref_ver_scale,
+ .y_base = G2_REF_LUMA_ADDR(0),
+ .c_base = G2_REF_CHROMA_ADDR(0),
+ }, {
+ /* Golden */
+ .width = vp9_gref_width,
+ .height = vp9_gref_height,
+ .hor_scale = vp9_gref_hor_scale,
+ .ver_scale = vp9_gref_ver_scale,
+ .y_base = G2_REF_LUMA_ADDR(4),
+ .c_base = G2_REF_CHROMA_ADDR(4),
+ }, {
+ /* Altref */
+ .width = vp9_aref_width,
+ .height = vp9_aref_height,
+ .hor_scale = vp9_aref_hor_scale,
+ .ver_scale = vp9_aref_ver_scale,
+ .y_base = G2_REF_LUMA_ADDR(5),
+ .c_base = G2_REF_CHROMA_ADDR(5),
+ },
+ };
+ dma_addr_t mv_addr;
+
+ config_ref(ctx, dst, &ref_regs[0], dec_params, dec_params->last_frame_ts);
+ config_ref(ctx, dst, &ref_regs[1], dec_params, dec_params->golden_frame_ts);
+ config_ref(ctx, dst, &ref_regs[2], dec_params, dec_params->alt_frame_ts);
+
+ mv_addr = hantro_get_dec_buf_addr(ctx, &mv_ref->base.vb.vb2_buf) +
+ mv_offset(ctx, dec_params);
+ hantro_write_addr(ctx->dev, G2_REF_MV_ADDR(0), mv_addr);
+
+ hantro_reg_write(ctx->dev, &vp9_last_sign_bias,
+ dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_LAST ? 1 : 0);
+
+ hantro_reg_write(ctx->dev, &vp9_gref_sign_bias,
+ dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_GOLDEN ? 1 : 0);
+
+ hantro_reg_write(ctx->dev, &vp9_aref_sign_bias,
+ dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_ALT ? 1 : 0);
+}
+
+static void recompute_tile_info(unsigned short *tile_info, unsigned int tiles, unsigned int sbs)
+{
+ int i;
+ unsigned int accumulated = 0;
+ unsigned int next_accumulated;
+
+ for (i = 1; i <= tiles; ++i) {
+ next_accumulated = i * sbs / tiles;
+ *tile_info++ = next_accumulated - accumulated;
+ accumulated = next_accumulated;
+ }
+}
+
+static void
+recompute_tile_rc_info(struct hantro_ctx *ctx,
+ unsigned int tile_r, unsigned int tile_c,
+ unsigned int sbs_r, unsigned int sbs_c)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+
+ recompute_tile_info(vp9_ctx->tile_r_info, tile_r, sbs_r);
+ recompute_tile_info(vp9_ctx->tile_c_info, tile_c, sbs_c);
+
+ vp9_ctx->last_tile_r = tile_r;
+ vp9_ctx->last_tile_c = tile_c;
+ vp9_ctx->last_sbs_r = sbs_r;
+ vp9_ctx->last_sbs_c = sbs_c;
+}
+
+static inline unsigned int first_tile_row(unsigned int tile_r, unsigned int sbs_r)
+{
+ if (tile_r == sbs_r + 1)
+ return 1;
+
+ if (tile_r == sbs_r + 2)
+ return 2;
+
+ return 0;
+}
+
+static void
+fill_tile_info(struct hantro_ctx *ctx,
+ unsigned int tile_r, unsigned int tile_c,
+ unsigned int sbs_r, unsigned int sbs_c,
+ unsigned short *tile_mem)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ unsigned int i, j;
+ bool first = true;
+
+ for (i = first_tile_row(tile_r, sbs_r); i < tile_r; ++i) {
+ unsigned short r_info = vp9_ctx->tile_r_info[i];
+
+ if (first) {
+ if (i > 0)
+ r_info += vp9_ctx->tile_r_info[0];
+ if (i == 2)
+ r_info += vp9_ctx->tile_r_info[1];
+ first = false;
+ }
+ for (j = 0; j < tile_c; ++j) {
+ *tile_mem++ = vp9_ctx->tile_c_info[j];
+ *tile_mem++ = r_info;
+ }
+ }
+}
+
+static void
+config_tiles(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct hantro_decoded_buffer *dst)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ struct hantro_aux_buf *misc = &vp9_ctx->misc;
+ struct hantro_aux_buf *tile_edge = &vp9_ctx->tile_edge;
+ dma_addr_t addr;
+ unsigned short *tile_mem;
+ unsigned int rows, cols;
+
+ addr = misc->dma + vp9_ctx->tile_info_offset;
+ hantro_write_addr(ctx->dev, G2_TILE_SIZES_ADDR, addr);
+
+ tile_mem = misc->cpu + vp9_ctx->tile_info_offset;
+ if (dec_params->tile_cols_log2 || dec_params->tile_rows_log2) {
+ unsigned int tile_r = (1 << dec_params->tile_rows_log2);
+ unsigned int tile_c = (1 << dec_params->tile_cols_log2);
+ unsigned int sbs_r = hantro_vp9_num_sbs(dst->vp9.height);
+ unsigned int sbs_c = hantro_vp9_num_sbs(dst->vp9.width);
+
+ if (tile_r != vp9_ctx->last_tile_r || tile_c != vp9_ctx->last_tile_c ||
+ sbs_r != vp9_ctx->last_sbs_r || sbs_c != vp9_ctx->last_sbs_c)
+ recompute_tile_rc_info(ctx, tile_r, tile_c, sbs_r, sbs_c);
+
+ fill_tile_info(ctx, tile_r, tile_c, sbs_r, sbs_c, tile_mem);
+
+ cols = tile_c;
+ rows = tile_r;
+ hantro_reg_write(ctx->dev, &g2_tile_e, 1);
+ } else {
+ tile_mem[0] = hantro_vp9_num_sbs(dst->vp9.width);
+ tile_mem[1] = hantro_vp9_num_sbs(dst->vp9.height);
+
+ cols = 1;
+ rows = 1;
+ hantro_reg_write(ctx->dev, &g2_tile_e, 0);
+ }
+
+ if (ctx->dev->variant->legacy_regs) {
+ hantro_reg_write(ctx->dev, &g2_num_tile_cols_old, cols);
+ hantro_reg_write(ctx->dev, &g2_num_tile_rows_old, rows);
+ } else {
+ hantro_reg_write(ctx->dev, &g2_num_tile_cols, cols);
+ hantro_reg_write(ctx->dev, &g2_num_tile_rows, rows);
+ }
+
+ /* provide aux buffers even if no tiles are used */
+ addr = tile_edge->dma;
+ hantro_write_addr(ctx->dev, G2_TILE_FILTER_ADDR, addr);
+
+ addr = tile_edge->dma + vp9_ctx->bsd_ctrl_offset;
+ hantro_write_addr(ctx->dev, G2_TILE_BSD_ADDR, addr);
+}
+
+static void
+update_feat_and_flag(struct hantro_vp9_dec_hw_ctx *vp9_ctx,
+ const struct v4l2_vp9_segmentation *seg,
+ unsigned int feature,
+ unsigned int segid)
+{
+ u8 mask = V4L2_VP9_SEGMENT_FEATURE_ENABLED(feature);
+
+ vp9_ctx->feature_data[segid][feature] = seg->feature_data[segid][feature];
+ vp9_ctx->feature_enabled[segid] &= ~mask;
+ vp9_ctx->feature_enabled[segid] |= (seg->feature_enabled[segid] & mask);
+}
+
+static inline s16 clip3(s16 x, s16 y, s16 z)
+{
+ return (z < x) ? x : (z > y) ? y : z;
+}
+
+static s16 feat_val_clip3(s16 feat_val, s16 feature_data, bool absolute, u8 clip)
+{
+ if (absolute)
+ return feature_data;
+
+ return clip3(0, 255, feat_val + feature_data);
+}
+
+static void config_segment(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ const struct v4l2_vp9_segmentation *seg;
+ s16 feat_val;
+ unsigned char feat_id;
+ unsigned int segid;
+ bool segment_enabled, absolute, update_data;
+
+ static const struct hantro_reg seg_regs[8][V4L2_VP9_SEG_LVL_MAX] = {
+ { vp9_quant_seg0, vp9_filt_level_seg0, vp9_refpic_seg0, vp9_skip_seg0 },
+ { vp9_quant_seg1, vp9_filt_level_seg1, vp9_refpic_seg1, vp9_skip_seg1 },
+ { vp9_quant_seg2, vp9_filt_level_seg2, vp9_refpic_seg2, vp9_skip_seg2 },
+ { vp9_quant_seg3, vp9_filt_level_seg3, vp9_refpic_seg3, vp9_skip_seg3 },
+ { vp9_quant_seg4, vp9_filt_level_seg4, vp9_refpic_seg4, vp9_skip_seg4 },
+ { vp9_quant_seg5, vp9_filt_level_seg5, vp9_refpic_seg5, vp9_skip_seg5 },
+ { vp9_quant_seg6, vp9_filt_level_seg6, vp9_refpic_seg6, vp9_skip_seg6 },
+ { vp9_quant_seg7, vp9_filt_level_seg7, vp9_refpic_seg7, vp9_skip_seg7 },
+ };
+
+ segment_enabled = !!(dec_params->seg.flags & V4L2_VP9_SEGMENTATION_FLAG_ENABLED);
+ hantro_reg_write(ctx->dev, &vp9_segment_e, segment_enabled);
+ hantro_reg_write(ctx->dev, &vp9_segment_upd_e,
+ !!(dec_params->seg.flags & V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP));
+ hantro_reg_write(ctx->dev, &vp9_segment_temp_upd_e,
+ !!(dec_params->seg.flags & V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE));
+
+ seg = &dec_params->seg;
+ absolute = !!(seg->flags & V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE);
+ update_data = !!(seg->flags & V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA);
+
+ for (segid = 0; segid < 8; ++segid) {
+ /* Quantizer segment feature */
+ feat_id = V4L2_VP9_SEG_LVL_ALT_Q;
+ feat_val = dec_params->quant.base_q_idx;
+ if (segment_enabled) {
+ if (update_data)
+ update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
+ if (v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled, feat_id, segid))
+ feat_val = feat_val_clip3(feat_val,
+ vp9_ctx->feature_data[segid][feat_id],
+ absolute, 255);
+ }
+ hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
+
+ /* Loop filter segment feature */
+ feat_id = V4L2_VP9_SEG_LVL_ALT_L;
+ feat_val = dec_params->lf.level;
+ if (segment_enabled) {
+ if (update_data)
+ update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
+ if (v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled, feat_id, segid))
+ feat_val = feat_val_clip3(feat_val,
+ vp9_ctx->feature_data[segid][feat_id],
+ absolute, 63);
+ }
+ hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
+
+ /* Reference frame segment feature */
+ feat_id = V4L2_VP9_SEG_LVL_REF_FRAME;
+ feat_val = 0;
+ if (segment_enabled) {
+ if (update_data)
+ update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
+ if (!(dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME) &&
+ v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled, feat_id, segid))
+ feat_val = vp9_ctx->feature_data[segid][feat_id] + 1;
+ }
+ hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
+
+ /* Skip segment feature */
+ feat_id = V4L2_VP9_SEG_LVL_SKIP;
+ feat_val = 0;
+ if (segment_enabled) {
+ if (update_data)
+ update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
+ feat_val = v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled,
+ feat_id, segid) ? 1 : 0;
+ }
+ hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
+ }
+}
+
+static void config_loop_filter(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ bool d = dec_params->lf.flags & V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED;
+
+ hantro_reg_write(ctx->dev, &vp9_filt_level, dec_params->lf.level);
+ hantro_reg_write(ctx->dev, &g2_out_filtering_dis, dec_params->lf.level == 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_sharpness, dec_params->lf.sharpness);
+
+ hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_0, d ? dec_params->lf.ref_deltas[0] : 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_1, d ? dec_params->lf.ref_deltas[1] : 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_2, d ? dec_params->lf.ref_deltas[2] : 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_3, d ? dec_params->lf.ref_deltas[3] : 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_mb_adj_0, d ? dec_params->lf.mode_deltas[0] : 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_mb_adj_1, d ? dec_params->lf.mode_deltas[1] : 0);
+}
+
+static void config_picture_dimensions(struct hantro_ctx *ctx, struct hantro_decoded_buffer *dst)
+{
+ u32 pic_w_4x4, pic_h_4x4;
+
+ hantro_reg_write(ctx->dev, &g2_pic_width_in_cbs, (dst->vp9.width + 7) / 8);
+ hantro_reg_write(ctx->dev, &g2_pic_height_in_cbs, (dst->vp9.height + 7) / 8);
+ pic_w_4x4 = roundup(dst->vp9.width, 8) >> 2;
+ pic_h_4x4 = roundup(dst->vp9.height, 8) >> 2;
+ hantro_reg_write(ctx->dev, &g2_pic_width_4x4, pic_w_4x4);
+ hantro_reg_write(ctx->dev, &g2_pic_height_4x4, pic_h_4x4);
+}
+
+static void
+config_bit_depth(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ if (ctx->dev->variant->legacy_regs) {
+ hantro_reg_write(ctx->dev, &g2_bit_depth_y, dec_params->bit_depth);
+ hantro_reg_write(ctx->dev, &g2_bit_depth_c, dec_params->bit_depth);
+ hantro_reg_write(ctx->dev, &g2_pix_shift, 0);
+ } else {
+ hantro_reg_write(ctx->dev, &g2_bit_depth_y_minus8, dec_params->bit_depth - 8);
+ hantro_reg_write(ctx->dev, &g2_bit_depth_c_minus8, dec_params->bit_depth - 8);
+ }
+}
+
+static inline bool is_lossless(const struct v4l2_vp9_quantization *quant)
+{
+ return quant->base_q_idx == 0 && quant->delta_q_uv_ac == 0 &&
+ quant->delta_q_uv_dc == 0 && quant->delta_q_y_dc == 0;
+}
+
+static void
+config_quant(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ hantro_reg_write(ctx->dev, &vp9_qp_delta_y_dc, dec_params->quant.delta_q_y_dc);
+ hantro_reg_write(ctx->dev, &vp9_qp_delta_ch_dc, dec_params->quant.delta_q_uv_dc);
+ hantro_reg_write(ctx->dev, &vp9_qp_delta_ch_ac, dec_params->quant.delta_q_uv_ac);
+ hantro_reg_write(ctx->dev, &vp9_lossless_e, is_lossless(&dec_params->quant));
+}
+
+static u32
+hantro_interp_filter_from_v4l2(unsigned int interpolation_filter)
+{
+ switch (interpolation_filter) {
+ case V4L2_VP9_INTERP_FILTER_EIGHTTAP:
+ return 0x1;
+ case V4L2_VP9_INTERP_FILTER_EIGHTTAP_SMOOTH:
+ return 0;
+ case V4L2_VP9_INTERP_FILTER_EIGHTTAP_SHARP:
+ return 0x2;
+ case V4L2_VP9_INTERP_FILTER_BILINEAR:
+ return 0x3;
+ case V4L2_VP9_INTERP_FILTER_SWITCHABLE:
+ return 0x4;
+ }
+
+ return 0;
+}
+
+static void
+config_others(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params,
+ bool intra_only, bool resolution_change)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+
+ hantro_reg_write(ctx->dev, &g2_idr_pic_e, intra_only);
+
+ hantro_reg_write(ctx->dev, &vp9_transform_mode, vp9_ctx->cur.tx_mode);
+
+ hantro_reg_write(ctx->dev, &vp9_mcomp_filt_type, intra_only ?
+ 0 : hantro_interp_filter_from_v4l2(dec_params->interpolation_filter));
+
+ hantro_reg_write(ctx->dev, &vp9_high_prec_mv_e,
+ !!(dec_params->flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV));
+
+ hantro_reg_write(ctx->dev, &vp9_comp_pred_mode, dec_params->reference_mode);
+
+ hantro_reg_write(ctx->dev, &g2_tempor_mvp_e,
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) &&
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME) &&
+ !(vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME) &&
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY) &&
+ !resolution_change &&
+ vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_SHOW_FRAME
+ );
+
+ hantro_reg_write(ctx->dev, &g2_write_mvs_e,
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME));
+}
+
+static void
+config_compound_reference(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ u32 comp_fixed_ref, comp_var_ref[2];
+ bool last_ref_frame_sign_bias;
+ bool golden_ref_frame_sign_bias;
+ bool alt_ref_frame_sign_bias;
+ bool comp_ref_allowed = 0;
+
+ comp_fixed_ref = 0;
+ comp_var_ref[0] = 0;
+ comp_var_ref[1] = 0;
+
+ last_ref_frame_sign_bias = dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_LAST;
+ golden_ref_frame_sign_bias = dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_GOLDEN;
+ alt_ref_frame_sign_bias = dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_ALT;
+
+ /* 6.3.12 Frame reference mode syntax */
+ comp_ref_allowed |= golden_ref_frame_sign_bias != last_ref_frame_sign_bias;
+ comp_ref_allowed |= alt_ref_frame_sign_bias != last_ref_frame_sign_bias;
+
+ if (comp_ref_allowed) {
+ if (last_ref_frame_sign_bias ==
+ golden_ref_frame_sign_bias) {
+ comp_fixed_ref = ALTREF_FRAME;
+ comp_var_ref[0] = LAST_FRAME;
+ comp_var_ref[1] = GOLDEN_FRAME;
+ } else if (last_ref_frame_sign_bias ==
+ alt_ref_frame_sign_bias) {
+ comp_fixed_ref = GOLDEN_FRAME;
+ comp_var_ref[0] = LAST_FRAME;
+ comp_var_ref[1] = ALTREF_FRAME;
+ } else {
+ comp_fixed_ref = LAST_FRAME;
+ comp_var_ref[0] = GOLDEN_FRAME;
+ comp_var_ref[1] = ALTREF_FRAME;
+ }
+ }
+
+ hantro_reg_write(ctx->dev, &vp9_comp_pred_fixed_ref, comp_fixed_ref);
+ hantro_reg_write(ctx->dev, &vp9_comp_pred_var_ref0, comp_var_ref[0]);
+ hantro_reg_write(ctx->dev, &vp9_comp_pred_var_ref1, comp_var_ref[1]);
+}
+
+#define INNER_LOOP \
+do { \
+ for (m = 0; m < ARRAY_SIZE(adaptive->coef[0][0][0][0]); ++m) { \
+ memcpy(adaptive->coef[i][j][k][l][m], \
+ probs->coef[i][j][k][l][m], \
+ sizeof(probs->coef[i][j][k][l][m])); \
+ \
+ adaptive->coef[i][j][k][l][m][3] = 0; \
+ } \
+} while (0)
+
+static void config_probs(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ struct hantro_aux_buf *misc = &vp9_ctx->misc;
+ struct hantro_g2_all_probs *all_probs = misc->cpu;
+ struct hantro_g2_probs *adaptive;
+ struct hantro_g2_mv_probs *mv;
+ const struct v4l2_vp9_segmentation *seg = &dec_params->seg;
+ const struct v4l2_vp9_frame_context *probs = &vp9_ctx->probability_tables;
+ int i, j, k, l, m;
+
+ for (i = 0; i < ARRAY_SIZE(all_probs->kf_y_mode_prob); ++i)
+ for (j = 0; j < ARRAY_SIZE(all_probs->kf_y_mode_prob[0]); ++j) {
+ memcpy(all_probs->kf_y_mode_prob[i][j],
+ v4l2_vp9_kf_y_mode_prob[i][j],
+ ARRAY_SIZE(all_probs->kf_y_mode_prob[i][j]));
+
+ all_probs->kf_y_mode_prob_tail[i][j][0] =
+ v4l2_vp9_kf_y_mode_prob[i][j][8];
+ }
+
+ memcpy(all_probs->mb_segment_tree_probs, seg->tree_probs,
+ sizeof(all_probs->mb_segment_tree_probs));
+
+ memcpy(all_probs->segment_pred_probs, seg->pred_probs,
+ sizeof(all_probs->segment_pred_probs));
+
+ for (i = 0; i < ARRAY_SIZE(all_probs->kf_uv_mode_prob); ++i) {
+ memcpy(all_probs->kf_uv_mode_prob[i], v4l2_vp9_kf_uv_mode_prob[i],
+ ARRAY_SIZE(all_probs->kf_uv_mode_prob[i]));
+
+ all_probs->kf_uv_mode_prob_tail[i][0] = v4l2_vp9_kf_uv_mode_prob[i][8];
+ }
+
+ adaptive = &all_probs->probs;
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->inter_mode); ++i) {
+ memcpy(adaptive->inter_mode[i], probs->inter_mode[i],
+ ARRAY_SIZE(probs->inter_mode[i]));
+
+ adaptive->inter_mode[i][3] = 0;
+ }
+
+ memcpy(adaptive->is_inter, probs->is_inter, sizeof(adaptive->is_inter));
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->uv_mode); ++i) {
+ memcpy(adaptive->uv_mode[i], probs->uv_mode[i],
+ sizeof(adaptive->uv_mode[i]));
+ adaptive->uv_mode_tail[i][0] = probs->uv_mode[i][8];
+ }
+
+ memcpy(adaptive->tx8, probs->tx8, sizeof(adaptive->tx8));
+ memcpy(adaptive->tx16, probs->tx16, sizeof(adaptive->tx16));
+ memcpy(adaptive->tx32, probs->tx32, sizeof(adaptive->tx32));
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->y_mode); ++i) {
+ memcpy(adaptive->y_mode[i], probs->y_mode[i],
+ ARRAY_SIZE(adaptive->y_mode[i]));
+
+ adaptive->y_mode_tail[i][0] = probs->y_mode[i][8];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->partition[0]); ++i) {
+ memcpy(adaptive->partition[0][i], v4l2_vp9_kf_partition_probs[i],
+ sizeof(v4l2_vp9_kf_partition_probs[i]));
+
+ adaptive->partition[0][i][3] = 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->partition[1]); ++i) {
+ memcpy(adaptive->partition[1][i], probs->partition[i],
+ sizeof(probs->partition[i]));
+
+ adaptive->partition[1][i][3] = 0;
+ }
+
+ memcpy(adaptive->interp_filter, probs->interp_filter,
+ sizeof(adaptive->interp_filter));
+
+ memcpy(adaptive->comp_mode, probs->comp_mode, sizeof(adaptive->comp_mode));
+
+ memcpy(adaptive->skip, probs->skip, sizeof(adaptive->skip));
+
+ mv = &adaptive->mv;
+
+ memcpy(mv->joint, probs->mv.joint, sizeof(mv->joint));
+ memcpy(mv->sign, probs->mv.sign, sizeof(mv->sign));
+ memcpy(mv->class0_bit, probs->mv.class0_bit, sizeof(mv->class0_bit));
+ memcpy(mv->fr, probs->mv.fr, sizeof(mv->fr));
+ memcpy(mv->class0_hp, probs->mv.class0_hp, sizeof(mv->class0_hp));
+ memcpy(mv->hp, probs->mv.hp, sizeof(mv->hp));
+ memcpy(mv->classes, probs->mv.classes, sizeof(mv->classes));
+ memcpy(mv->class0_fr, probs->mv.class0_fr, sizeof(mv->class0_fr));
+ memcpy(mv->bits, probs->mv.bits, sizeof(mv->bits));
+
+ memcpy(adaptive->single_ref, probs->single_ref, sizeof(adaptive->single_ref));
+
+ memcpy(adaptive->comp_ref, probs->comp_ref, sizeof(adaptive->comp_ref));
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->coef); ++i)
+ for (j = 0; j < ARRAY_SIZE(adaptive->coef[0]); ++j)
+ for (k = 0; k < ARRAY_SIZE(adaptive->coef[0][0]); ++k)
+ for (l = 0; l < ARRAY_SIZE(adaptive->coef[0][0][0]); ++l)
+ INNER_LOOP;
+
+ hantro_write_addr(ctx->dev, G2_VP9_PROBS_ADDR, misc->dma);
+}
+
+static void config_counts(struct hantro_ctx *ctx)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_dec = &ctx->vp9_dec;
+ struct hantro_aux_buf *misc = &vp9_dec->misc;
+ dma_addr_t addr = misc->dma + vp9_dec->ctx_counters_offset;
+
+ hantro_write_addr(ctx->dev, G2_VP9_CTX_COUNT_ADDR, addr);
+}
+
+static void config_seg_map(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params,
+ bool intra_only, bool update_map)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ struct hantro_aux_buf *segment_map = &vp9_ctx->segment_map;
+ dma_addr_t addr;
+
+ if (intra_only ||
+ (dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT)) {
+ memset(segment_map->cpu, 0, segment_map->size);
+ memset(vp9_ctx->feature_data, 0, sizeof(vp9_ctx->feature_data));
+ memset(vp9_ctx->feature_enabled, 0, sizeof(vp9_ctx->feature_enabled));
+ }
+
+ addr = segment_map->dma + vp9_ctx->active_segment * vp9_ctx->segment_map_size;
+ hantro_write_addr(ctx->dev, G2_VP9_SEGMENT_READ_ADDR, addr);
+
+ addr = segment_map->dma + (1 - vp9_ctx->active_segment) * vp9_ctx->segment_map_size;
+ hantro_write_addr(ctx->dev, G2_VP9_SEGMENT_WRITE_ADDR, addr);
+
+ if (update_map)
+ vp9_ctx->active_segment = 1 - vp9_ctx->active_segment;
+}
+
+static void
+config_source(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct vb2_v4l2_buffer *vb2_src)
+{
+ dma_addr_t stream_base, tmp_addr;
+ unsigned int headres_size;
+ u32 src_len, start_bit, src_buf_len;
+
+ headres_size = dec_params->uncompressed_header_size
+ + dec_params->compressed_header_size;
+
+ stream_base = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
+
+ tmp_addr = stream_base + headres_size;
+ if (ctx->dev->variant->legacy_regs)
+ hantro_write_addr(ctx->dev, G2_STREAM_ADDR, (tmp_addr & ~0xf));
+ else
+ hantro_write_addr(ctx->dev, G2_STREAM_ADDR, stream_base);
+
+ start_bit = (tmp_addr & 0xf) * 8;
+ hantro_reg_write(ctx->dev, &g2_start_bit, start_bit);
+
+ src_len = vb2_get_plane_payload(&vb2_src->vb2_buf, 0);
+ src_len += start_bit / 8 - headres_size;
+ hantro_reg_write(ctx->dev, &g2_stream_len, src_len);
+
+ if (!ctx->dev->variant->legacy_regs) {
+ tmp_addr &= ~0xf;
+ hantro_reg_write(ctx->dev, &g2_strm_start_offset, tmp_addr - stream_base);
+ src_buf_len = vb2_plane_size(&vb2_src->vb2_buf, 0);
+ hantro_reg_write(ctx->dev, &g2_strm_buffer_len, src_buf_len);
+ }
+}
+
+static void
+config_registers(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct vb2_v4l2_buffer *vb2_src, struct vb2_v4l2_buffer *vb2_dst)
+{
+ struct hantro_decoded_buffer *dst, *last, *mv_ref;
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ const struct v4l2_vp9_segmentation *seg;
+ bool intra_only, resolution_change;
+
+ /* vp9 stuff */
+ dst = vb2_to_hantro_decoded_buf(&vb2_dst->vb2_buf);
+
+ if (vp9_ctx->last.valid)
+ last = get_ref_buf(ctx, &dst->base.vb, vp9_ctx->last.timestamp);
+ else
+ last = dst;
+
+ update_dec_buf_info(dst, dec_params);
+ update_ctx_cur_info(vp9_ctx, dst, dec_params);
+ seg = &dec_params->seg;
+
+ intra_only = !!(dec_params->flags &
+ (V4L2_VP9_FRAME_FLAG_KEY_FRAME |
+ V4L2_VP9_FRAME_FLAG_INTRA_ONLY));
+
+ if (!intra_only &&
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) &&
+ vp9_ctx->last.valid)
+ mv_ref = last;
+ else
+ mv_ref = dst;
+
+ resolution_change = dst->vp9.width != last->vp9.width ||
+ dst->vp9.height != last->vp9.height;
+
+ /* configure basic registers */
+ hantro_reg_write(ctx->dev, &g2_mode, VP9_DEC_MODE);
+ if (!ctx->dev->variant->legacy_regs) {
+ hantro_reg_write(ctx->dev, &g2_strm_swap, 0xf);
+ hantro_reg_write(ctx->dev, &g2_dirmv_swap, 0xf);
+ hantro_reg_write(ctx->dev, &g2_compress_swap, 0xf);
+ hantro_reg_write(ctx->dev, &g2_ref_compress_bypass, 1);
+ } else {
+ hantro_reg_write(ctx->dev, &g2_strm_swap_old, 0x1f);
+ hantro_reg_write(ctx->dev, &g2_pic_swap, 0x10);
+ hantro_reg_write(ctx->dev, &g2_dirmv_swap_old, 0x10);
+ hantro_reg_write(ctx->dev, &g2_tab0_swap_old, 0x10);
+ hantro_reg_write(ctx->dev, &g2_tab1_swap_old, 0x10);
+ hantro_reg_write(ctx->dev, &g2_tab2_swap_old, 0x10);
+ hantro_reg_write(ctx->dev, &g2_tab3_swap_old, 0x10);
+ hantro_reg_write(ctx->dev, &g2_rscan_swap, 0x10);
+ }
+ hantro_reg_write(ctx->dev, &g2_buswidth, BUS_WIDTH_128);
+ hantro_reg_write(ctx->dev, &g2_max_burst, 16);
+ hantro_reg_write(ctx->dev, &g2_apf_threshold, 8);
+ hantro_reg_write(ctx->dev, &g2_clk_gate_e, 1);
+ hantro_reg_write(ctx->dev, &g2_max_cb_size, 6);
+ hantro_reg_write(ctx->dev, &g2_min_cb_size, 3);
+ if (ctx->dev->variant->double_buffer)
+ hantro_reg_write(ctx->dev, &g2_double_buffer_e, 1);
+
+ config_output(ctx, dst, dec_params);
+
+ if (!intra_only)
+ config_ref_registers(ctx, dec_params, dst, mv_ref);
+
+ config_tiles(ctx, dec_params, dst);
+ config_segment(ctx, dec_params);
+ config_loop_filter(ctx, dec_params);
+ config_picture_dimensions(ctx, dst);
+ config_bit_depth(ctx, dec_params);
+ config_quant(ctx, dec_params);
+ config_others(ctx, dec_params, intra_only, resolution_change);
+ config_compound_reference(ctx, dec_params);
+ config_probs(ctx, dec_params);
+ config_counts(ctx);
+ config_seg_map(ctx, dec_params, intra_only,
+ seg->flags & V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP);
+ config_source(ctx, dec_params, vb2_src);
+}
+
+int hantro_g2_vp9_dec_run(struct hantro_ctx *ctx)
+{
+ const struct v4l2_ctrl_vp9_frame *decode_params;
+ struct vb2_v4l2_buffer *src;
+ struct vb2_v4l2_buffer *dst;
+ int ret;
+
+ hantro_g2_check_idle(ctx->dev);
+
+ ret = start_prepare_run(ctx, &decode_params);
+ if (ret) {
+ hantro_end_prepare_run(ctx);
+ return ret;
+ }
+
+ src = hantro_get_src_buf(ctx);
+ dst = hantro_get_dst_buf(ctx);
+
+ config_registers(ctx, decode_params, src, dst);
+
+ hantro_end_prepare_run(ctx);
+
+ vdpu_write(ctx->dev, G2_REG_INTERRUPT_DEC_E, G2_REG_INTERRUPT);
+
+ return 0;
+}
+
+#define copy_tx_and_skip(p1, p2) \
+do { \
+ memcpy((p1)->tx8, (p2)->tx8, sizeof((p1)->tx8)); \
+ memcpy((p1)->tx16, (p2)->tx16, sizeof((p1)->tx16)); \
+ memcpy((p1)->tx32, (p2)->tx32, sizeof((p1)->tx32)); \
+ memcpy((p1)->skip, (p2)->skip, sizeof((p1)->skip)); \
+} while (0)
+
+void hantro_g2_vp9_dec_done(struct hantro_ctx *ctx)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ unsigned int fctx_idx;
+
+ if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX))
+ goto out_update_last;
+
+ fctx_idx = vp9_ctx->cur.frame_context_idx;
+
+ if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE)) {
+ /* error_resilient_mode == 0 && frame_parallel_decoding_mode == 0 */
+ struct v4l2_vp9_frame_context *probs = &vp9_ctx->probability_tables;
+ bool frame_is_intra = vp9_ctx->cur.flags &
+ (V4L2_VP9_FRAME_FLAG_KEY_FRAME | V4L2_VP9_FRAME_FLAG_INTRA_ONLY);
+ struct tx_and_skip {
+ u8 tx8[2][1];
+ u8 tx16[2][2];
+ u8 tx32[2][3];
+ u8 skip[3];
+ } _tx_skip, *tx_skip = &_tx_skip;
+ struct v4l2_vp9_frame_symbol_counts *counts;
+ struct symbol_counts *hantro_cnts;
+ u32 tx16p[2][4];
+ int i;
+
+ /* buffer the forward-updated TX and skip probs */
+ if (frame_is_intra)
+ copy_tx_and_skip(tx_skip, probs);
+
+ /* 6.1.2 refresh_probs(): load_probs() and load_probs2() */
+ *probs = vp9_ctx->frame_context[fctx_idx];
+
+ /* if FrameIsIntra then undo the effect of load_probs2() */
+ if (frame_is_intra)
+ copy_tx_and_skip(probs, tx_skip);
+
+ counts = &vp9_ctx->cnts;
+ hantro_cnts = vp9_ctx->misc.cpu + vp9_ctx->ctx_counters_offset;
+ for (i = 0; i < ARRAY_SIZE(tx16p); ++i) {
+ memcpy(tx16p[i],
+ hantro_cnts->tx16x16_count[i],
+ sizeof(hantro_cnts->tx16x16_count[0]));
+ tx16p[i][3] = 0;
+ }
+ counts->tx16p = &tx16p;
+
+ v4l2_vp9_adapt_coef_probs(probs, counts,
+ !vp9_ctx->last.valid ||
+ vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME,
+ frame_is_intra);
+
+ if (!frame_is_intra) {
+ /* load_probs2() already done */
+ u32 mv_mode[7][4];
+
+ for (i = 0; i < ARRAY_SIZE(mv_mode); ++i) {
+ mv_mode[i][0] = hantro_cnts->inter_mode_counts[i][1][0];
+ mv_mode[i][1] = hantro_cnts->inter_mode_counts[i][2][0];
+ mv_mode[i][2] = hantro_cnts->inter_mode_counts[i][0][0];
+ mv_mode[i][3] = hantro_cnts->inter_mode_counts[i][2][1];
+ }
+ counts->mv_mode = &mv_mode;
+ v4l2_vp9_adapt_noncoef_probs(&vp9_ctx->probability_tables, counts,
+ vp9_ctx->cur.reference_mode,
+ vp9_ctx->cur.interpolation_filter,
+ vp9_ctx->cur.tx_mode, vp9_ctx->cur.flags);
+ }
+ }
+
+ vp9_ctx->frame_context[fctx_idx] = vp9_ctx->probability_tables;
+
+out_update_last:
+ vp9_ctx->last = vp9_ctx->cur;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c b/drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c
new file mode 100644
index 0000000000..12d69503d6
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include <asm/unaligned.h>
+#include <media/v4l2-mem2mem.h>
+#include "hantro_jpeg.h"
+#include "hantro.h"
+#include "hantro_v4l2.h"
+#include "hantro_hw.h"
+#include "hantro_h1_regs.h"
+
+#define H1_JPEG_QUANT_TABLE_COUNT 16
+
+static void hantro_h1_set_src_img_ctrl(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
+{
+ u32 overfill_r, overfill_b;
+ u32 reg;
+
+ /*
+ * The format width and height are already macroblock aligned
+ * by .vidioc_s_fmt_vid_cap_mplane() callback. Destination
+ * format width and height can be further modified by
+ * .vidioc_s_selection(), and the width is 4-aligned.
+ */
+ overfill_r = ctx->src_fmt.width - ctx->dst_fmt.width;
+ overfill_b = ctx->src_fmt.height - ctx->dst_fmt.height;
+
+ reg = H1_REG_IN_IMG_CTRL_ROW_LEN(ctx->src_fmt.width)
+ | H1_REG_IN_IMG_CTRL_OVRFLR_D4(overfill_r / 4)
+ | H1_REG_IN_IMG_CTRL_OVRFLB(overfill_b)
+ | H1_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
+ vepu_write_relaxed(vpu, reg, H1_REG_IN_IMG_CTRL);
+}
+
+static void hantro_h1_jpeg_enc_set_buffers(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf)
+{
+ struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ dma_addr_t src[3];
+ u32 size_left;
+
+ size_left = vb2_plane_size(dst_buf, 0) - ctx->vpu_dst_fmt->header_size;
+ if (WARN_ON(vb2_plane_size(dst_buf, 0) < ctx->vpu_dst_fmt->header_size))
+ size_left = 0;
+
+ WARN_ON(pix_fmt->num_planes > 3);
+
+ vepu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(dst_buf, 0) +
+ ctx->vpu_dst_fmt->header_size,
+ H1_REG_ADDR_OUTPUT_STREAM);
+ vepu_write_relaxed(vpu, size_left, H1_REG_STR_BUF_LIMIT);
+
+ if (pix_fmt->num_planes == 1) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ /* single plane formats we supported are all interlaced */
+ vepu_write_relaxed(vpu, src[0], H1_REG_ADDR_IN_PLANE_0);
+ } else if (pix_fmt->num_planes == 2) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ vepu_write_relaxed(vpu, src[0], H1_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], H1_REG_ADDR_IN_PLANE_1);
+ } else {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ src[2] = vb2_dma_contig_plane_dma_addr(src_buf, 2);
+ vepu_write_relaxed(vpu, src[0], H1_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], H1_REG_ADDR_IN_PLANE_1);
+ vepu_write_relaxed(vpu, src[2], H1_REG_ADDR_IN_PLANE_2);
+ }
+}
+
+static void
+hantro_h1_jpeg_enc_set_qtable(struct hantro_dev *vpu,
+ unsigned char *luma_qtable,
+ unsigned char *chroma_qtable)
+{
+ u32 reg, i;
+ __be32 *luma_qtable_p;
+ __be32 *chroma_qtable_p;
+
+ luma_qtable_p = (__be32 *)luma_qtable;
+ chroma_qtable_p = (__be32 *)chroma_qtable;
+
+ /*
+ * Quantization table registers must be written in contiguous blocks.
+ * DO NOT collapse the below two "for" loops into one.
+ */
+ for (i = 0; i < H1_JPEG_QUANT_TABLE_COUNT; i++) {
+ reg = get_unaligned_be32(&luma_qtable_p[i]);
+ vepu_write_relaxed(vpu, reg, H1_REG_JPEG_LUMA_QUAT(i));
+ }
+
+ for (i = 0; i < H1_JPEG_QUANT_TABLE_COUNT; i++) {
+ reg = get_unaligned_be32(&chroma_qtable_p[i]);
+ vepu_write_relaxed(vpu, reg, H1_REG_JPEG_CHROMA_QUAT(i));
+ }
+}
+
+int hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct hantro_jpeg_ctx jpeg_ctx;
+ u32 reg;
+
+ src_buf = hantro_get_src_buf(ctx);
+ dst_buf = hantro_get_dst_buf(ctx);
+
+ hantro_start_prepare_run(ctx);
+
+ memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
+ jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ jpeg_ctx.width = ctx->dst_fmt.width;
+ jpeg_ctx.height = ctx->dst_fmt.height;
+ jpeg_ctx.quality = ctx->jpeg_quality;
+ hantro_jpeg_header_assemble(&jpeg_ctx);
+
+ /* Switch to JPEG encoder mode before writing registers */
+ vepu_write_relaxed(vpu, H1_REG_ENC_CTRL_ENC_MODE_JPEG,
+ H1_REG_ENC_CTRL);
+
+ hantro_h1_set_src_img_ctrl(vpu, ctx);
+ hantro_h1_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf,
+ &dst_buf->vb2_buf);
+ hantro_h1_jpeg_enc_set_qtable(vpu, jpeg_ctx.hw_luma_qtable,
+ jpeg_ctx.hw_chroma_qtable);
+
+ reg = H1_REG_AXI_CTRL_OUTPUT_SWAP16
+ | H1_REG_AXI_CTRL_INPUT_SWAP16
+ | H1_REG_AXI_CTRL_BURST_LEN(16)
+ | H1_REG_AXI_CTRL_OUTPUT_SWAP32
+ | H1_REG_AXI_CTRL_INPUT_SWAP32
+ | H1_REG_AXI_CTRL_OUTPUT_SWAP8
+ | H1_REG_AXI_CTRL_INPUT_SWAP8;
+ /* Make sure that all registers are written at this point. */
+ vepu_write(vpu, reg, H1_REG_AXI_CTRL);
+
+ reg = H1_REG_ENC_CTRL_WIDTH(MB_WIDTH(ctx->src_fmt.width))
+ | H1_REG_ENC_CTRL_HEIGHT(MB_HEIGHT(ctx->src_fmt.height))
+ | H1_REG_ENC_CTRL_ENC_MODE_JPEG
+ | H1_REG_ENC_PIC_INTRA
+ | H1_REG_ENC_CTRL_EN_BIT;
+
+ hantro_end_prepare_run(ctx);
+
+ vepu_write(vpu, reg, H1_REG_ENC_CTRL);
+
+ return 0;
+}
+
+void hantro_h1_jpeg_enc_done(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ u32 bytesused = vepu_read(vpu, H1_REG_STR_BUF_LIMIT) / 8;
+ struct vb2_v4l2_buffer *dst_buf = hantro_get_dst_buf(ctx);
+
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ ctx->vpu_dst_fmt->header_size + bytesused);
+}
diff --git a/drivers/media/platform/verisilicon/hantro_h1_regs.h b/drivers/media/platform/verisilicon/hantro_h1_regs.h
new file mode 100644
index 0000000000..30e7e7b920
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_h1_regs.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#ifndef HANTRO_H1_REGS_H_
+#define HANTRO_H1_REGS_H_
+
+/* Encoder registers. */
+#define H1_REG_INTERRUPT 0x004
+#define H1_REG_INTERRUPT_FRAME_RDY BIT(2)
+#define H1_REG_INTERRUPT_DIS_BIT BIT(1)
+#define H1_REG_INTERRUPT_BIT BIT(0)
+#define H1_REG_AXI_CTRL 0x008
+#define H1_REG_AXI_CTRL_OUTPUT_SWAP16 BIT(15)
+#define H1_REG_AXI_CTRL_INPUT_SWAP16 BIT(14)
+#define H1_REG_AXI_CTRL_BURST_LEN(x) ((x) << 8)
+#define H1_REG_AXI_CTRL_GATE_BIT BIT(4)
+#define H1_REG_AXI_CTRL_OUTPUT_SWAP32 BIT(3)
+#define H1_REG_AXI_CTRL_INPUT_SWAP32 BIT(2)
+#define H1_REG_AXI_CTRL_OUTPUT_SWAP8 BIT(1)
+#define H1_REG_AXI_CTRL_INPUT_SWAP8 BIT(0)
+#define H1_REG_ADDR_OUTPUT_STREAM 0x014
+#define H1_REG_ADDR_OUTPUT_CTRL 0x018
+#define H1_REG_ADDR_REF_LUMA 0x01c
+#define H1_REG_ADDR_REF_CHROMA 0x020
+#define H1_REG_ADDR_REC_LUMA 0x024
+#define H1_REG_ADDR_REC_CHROMA 0x028
+#define H1_REG_ADDR_IN_PLANE_0 0x02c
+#define H1_REG_ADDR_IN_PLANE_1 0x030
+#define H1_REG_ADDR_IN_PLANE_2 0x034
+#define H1_REG_ENC_CTRL 0x038
+#define H1_REG_ENC_CTRL_TIMEOUT_EN BIT(31)
+#define H1_REG_ENC_CTRL_NAL_MODE_BIT BIT(29)
+#define H1_REG_ENC_CTRL_WIDTH(w) ((w) << 19)
+#define H1_REG_ENC_CTRL_HEIGHT(h) ((h) << 10)
+#define H1_REG_ENC_PIC_INTER (0x0 << 3)
+#define H1_REG_ENC_PIC_INTRA (0x1 << 3)
+#define H1_REG_ENC_PIC_MVCINTER (0x2 << 3)
+#define H1_REG_ENC_CTRL_ENC_MODE_H264 (0x3 << 1)
+#define H1_REG_ENC_CTRL_ENC_MODE_JPEG (0x2 << 1)
+#define H1_REG_ENC_CTRL_ENC_MODE_VP8 (0x1 << 1)
+#define H1_REG_ENC_CTRL_EN_BIT BIT(0)
+#define H1_REG_IN_IMG_CTRL 0x03c
+#define H1_REG_IN_IMG_CTRL_ROW_LEN(x) ((x) << 12)
+#define H1_REG_IN_IMG_CTRL_OVRFLR_D4(x) ((x) << 10)
+#define H1_REG_IN_IMG_CTRL_OVRFLB(x) ((x) << 6)
+#define H1_REG_IN_IMG_CTRL_FMT(x) ((x) << 2)
+#define H1_REG_ENC_CTRL0 0x040
+#define H1_REG_ENC_CTRL0_INIT_QP(x) ((x) << 26)
+#define H1_REG_ENC_CTRL0_SLICE_ALPHA(x) ((x) << 22)
+#define H1_REG_ENC_CTRL0_SLICE_BETA(x) ((x) << 18)
+#define H1_REG_ENC_CTRL0_CHROMA_QP_OFFSET(x) ((x) << 13)
+#define H1_REG_ENC_CTRL0_FILTER_DIS(x) ((x) << 5)
+#define H1_REG_ENC_CTRL0_IDR_PICID(x) ((x) << 1)
+#define H1_REG_ENC_CTRL0_CONSTR_INTRA_PRED BIT(0)
+#define H1_REG_ENC_CTRL1 0x044
+#define H1_REG_ENC_CTRL1_PPS_ID(x) ((x) << 24)
+#define H1_REG_ENC_CTRL1_INTRA_PRED_MODE(x) ((x) << 16)
+#define H1_REG_ENC_CTRL1_FRAME_NUM(x) ((x))
+#define H1_REG_ENC_CTRL2 0x048
+#define H1_REG_ENC_CTRL2_DEBLOCKING_FILETER_MODE(x) ((x) << 30)
+#define H1_REG_ENC_CTRL2_H264_SLICE_SIZE(x) ((x) << 23)
+#define H1_REG_ENC_CTRL2_DISABLE_QUARTER_PIXMV BIT(22)
+#define H1_REG_ENC_CTRL2_TRANS8X8_MODE_EN BIT(21)
+#define H1_REG_ENC_CTRL2_CABAC_INIT_IDC(x) ((x) << 19)
+#define H1_REG_ENC_CTRL2_ENTROPY_CODING_MODE BIT(18)
+#define H1_REG_ENC_CTRL2_H264_INTER4X4_MODE BIT(17)
+#define H1_REG_ENC_CTRL2_H264_STREAM_MODE BIT(16)
+#define H1_REG_ENC_CTRL2_INTRA16X16_MODE(x) ((x))
+#define H1_REG_ENC_CTRL3 0x04c
+#define H1_REG_ENC_CTRL3_MUTIMV_EN BIT(30)
+#define H1_REG_ENC_CTRL3_MV_PENALTY_1_4P(x) ((x) << 20)
+#define H1_REG_ENC_CTRL3_MV_PENALTY_4P(x) ((x) << 10)
+#define H1_REG_ENC_CTRL3_MV_PENALTY_1P(x) ((x))
+#define H1_REG_ENC_CTRL4 0x050
+#define H1_REG_ENC_CTRL4_MV_PENALTY_16X8_8X16(x) ((x) << 20)
+#define H1_REG_ENC_CTRL4_MV_PENALTY_8X8(x) ((x) << 10)
+#define H1_REG_ENC_CTRL4_8X4_4X8(x) ((x))
+#define H1_REG_ENC_CTRL5 0x054
+#define H1_REG_ENC_CTRL5_MACROBLOCK_PENALTY(x) ((x) << 24)
+#define H1_REG_ENC_CTRL5_COMPLETE_SLICES(x) ((x) << 16)
+#define H1_REG_ENC_CTRL5_INTER_MODE(x) ((x))
+#define H1_REG_STR_HDR_REM_MSB 0x058
+#define H1_REG_STR_HDR_REM_LSB 0x05c
+#define H1_REG_STR_BUF_LIMIT 0x060
+#define H1_REG_MAD_CTRL 0x064
+#define H1_REG_MAD_CTRL_QP_ADJUST(x) ((x) << 28)
+#define H1_REG_MAD_CTRL_MAD_THREDHOLD(x) ((x) << 22)
+#define H1_REG_MAD_CTRL_QP_SUM_DIV2(x) ((x))
+#define H1_REG_ADDR_VP8_PROB_CNT 0x068
+#define H1_REG_QP_VAL 0x06c
+#define H1_REG_QP_VAL_LUM(x) ((x) << 26)
+#define H1_REG_QP_VAL_MAX(x) ((x) << 20)
+#define H1_REG_QP_VAL_MIN(x) ((x) << 14)
+#define H1_REG_QP_VAL_CHECKPOINT_DISTAN(x) ((x))
+#define H1_REG_VP8_QP_VAL(i) (0x06c + ((i) * 0x4))
+#define H1_REG_CHECKPOINT(i) (0x070 + ((i) * 0x4))
+#define H1_REG_CHECKPOINT_CHECK0(x) (((x) & 0xffff))
+#define H1_REG_CHECKPOINT_CHECK1(x) (((x) & 0xffff) << 16)
+#define H1_REG_CHECKPOINT_RESULT(x) ((((x) >> (16 - 16 \
+ * (i & 1))) & 0xffff) \
+ * 32)
+#define H1_REG_CHKPT_WORD_ERR(i) (0x084 + ((i) * 0x4))
+#define H1_REG_CHKPT_WORD_ERR_CHK0(x) (((x) & 0xffff))
+#define H1_REG_CHKPT_WORD_ERR_CHK1(x) (((x) & 0xffff) << 16)
+#define H1_REG_VP8_BOOL_ENC 0x08c
+#define H1_REG_CHKPT_DELTA_QP 0x090
+#define H1_REG_CHKPT_DELTA_QP_CHK0(x) (((x) & 0x0f) << 0)
+#define H1_REG_CHKPT_DELTA_QP_CHK1(x) (((x) & 0x0f) << 4)
+#define H1_REG_CHKPT_DELTA_QP_CHK2(x) (((x) & 0x0f) << 8)
+#define H1_REG_CHKPT_DELTA_QP_CHK3(x) (((x) & 0x0f) << 12)
+#define H1_REG_CHKPT_DELTA_QP_CHK4(x) (((x) & 0x0f) << 16)
+#define H1_REG_CHKPT_DELTA_QP_CHK5(x) (((x) & 0x0f) << 20)
+#define H1_REG_CHKPT_DELTA_QP_CHK6(x) (((x) & 0x0f) << 24)
+#define H1_REG_VP8_CTRL0 0x090
+#define H1_REG_RLC_CTRL 0x094
+#define H1_REG_RLC_CTRL_STR_OFFS_SHIFT 23
+#define H1_REG_RLC_CTRL_STR_OFFS_MASK (0x3f << 23)
+#define H1_REG_RLC_CTRL_RLC_SUM(x) ((x))
+#define H1_REG_MB_CTRL 0x098
+#define H1_REG_MB_CNT_OUT(x) (((x) & 0xffff))
+#define H1_REG_MB_CNT_SET(x) (((x) & 0xffff) << 16)
+#define H1_REG_ADDR_NEXT_PIC 0x09c
+#define H1_REG_JPEG_LUMA_QUAT(i) (0x100 + ((i) * 0x4))
+#define H1_REG_JPEG_CHROMA_QUAT(i) (0x140 + ((i) * 0x4))
+#define H1_REG_STABILIZATION_OUTPUT 0x0A0
+#define H1_REG_ADDR_CABAC_TBL 0x0cc
+#define H1_REG_ADDR_MV_OUT 0x0d0
+#define H1_REG_RGB_YUV_COEFF(i) (0x0d4 + ((i) * 0x4))
+#define H1_REG_RGB_MASK_MSB 0x0dc
+#define H1_REG_INTRA_AREA_CTRL 0x0e0
+#define H1_REG_CIR_INTRA_CTRL 0x0e4
+#define H1_REG_INTRA_SLICE_BITMAP(i) (0x0e8 + ((i) * 0x4))
+#define H1_REG_ADDR_VP8_DCT_PART(i) (0x0e8 + ((i) * 0x4))
+#define H1_REG_FIRST_ROI_AREA 0x0f0
+#define H1_REG_SECOND_ROI_AREA 0x0f4
+#define H1_REG_MVC_CTRL 0x0f8
+#define H1_REG_MVC_CTRL_MV16X16_FAVOR(x) ((x) << 28)
+#define H1_REG_VP8_INTRA_PENALTY(i) (0x100 + ((i) * 0x4))
+#define H1_REG_ADDR_VP8_SEG_MAP 0x11c
+#define H1_REG_VP8_SEG_QP(i) (0x120 + ((i) * 0x4))
+#define H1_REG_DMV_4P_1P_PENALTY(i) (0x180 + ((i) * 0x4))
+#define H1_REG_DMV_4P_1P_PENALTY_BIT(x, i) ((x) << (i) * 8)
+#define H1_REG_DMV_QPEL_PENALTY(i) (0x200 + ((i) * 0x4))
+#define H1_REG_DMV_QPEL_PENALTY_BIT(x, i) ((x) << (i) * 8)
+#define H1_REG_VP8_CTRL1 0x280
+#define H1_REG_VP8_BIT_COST_GOLDEN 0x284
+#define H1_REG_VP8_LOOP_FLT_DELTA(i) (0x288 + ((i) * 0x4))
+
+#endif /* HANTRO_H1_REGS_H_ */
diff --git a/drivers/media/platform/verisilicon/hantro_h264.c b/drivers/media/platform/verisilicon/hantro_h264.c
new file mode 100644
index 0000000000..4e9a0ecf5c
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_h264.c
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip RK3288 VPU codec driver
+ *
+ * Copyright (c) 2014 Rockchip Electronics Co., Ltd.
+ * Hertz Wong <hertz.wong@rock-chips.com>
+ * Herman Chen <herman.chen@rock-chips.com>
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#include <linux/types.h>
+#include <media/v4l2-h264.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+
+/* Size with u32 units. */
+#define CABAC_INIT_BUFFER_SIZE (460 * 2)
+#define POC_BUFFER_SIZE 34
+#define SCALING_LIST_SIZE (6 * 16 + 2 * 64)
+
+/*
+ * For valid and long term reference marking, index are reversed, so bit 31
+ * indicates the status of the picture 0.
+ */
+#define REF_BIT(i) BIT(32 - 1 - (i))
+
+/* Data structure describing auxiliary buffer format. */
+struct hantro_h264_dec_priv_tbl {
+ u32 cabac_table[CABAC_INIT_BUFFER_SIZE];
+ u32 poc[POC_BUFFER_SIZE];
+ u8 scaling_list[SCALING_LIST_SIZE];
+};
+
+/*
+ * Constant CABAC table.
+ * From drivers/media/platform/rk3288-vpu/rk3288_vpu_hw_h264d.c
+ * in https://chromium.googlesource.com/chromiumos/third_party/kernel,
+ * chromeos-3.14 branch.
+ */
+static const u32 h264_cabac_table[] = {
+ 0x14f10236, 0x034a14f1, 0x0236034a, 0xe47fe968, 0xfa35ff36, 0x07330000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0029003f, 0x003f003f, 0xf7530456, 0x0061f948, 0x0d29033e, 0x000b0137,
+ 0x0045ef7f, 0xf3660052, 0xf94aeb6b, 0xe57fe17f, 0xe87fee5f, 0xe57feb72,
+ 0xe27fef7b, 0xf473f07a, 0xf573f43f, 0xfe44f154, 0xf368fd46, 0xf85df65a,
+ 0xe27fff4a, 0xfa61f95b, 0xec7ffc38, 0xfb52f94c, 0xea7df95d, 0xf557fd4d,
+ 0xfb47fc3f, 0xfc44f454, 0xf93ef941, 0x083d0538, 0xfe420140, 0x003dfe4e,
+ 0x01320734, 0x0a23002c, 0x0b26012d, 0x002e052c, 0x1f110133, 0x07321c13,
+ 0x10210e3e, 0xf36cf164, 0xf365f35b, 0xf45ef658, 0xf054f656, 0xf953f357,
+ 0xed5e0146, 0x0048fb4a, 0x123bf866, 0xf164005f, 0xfc4b0248, 0xf54bfd47,
+ 0x0f2ef345, 0x003e0041, 0x1525f148, 0x09391036, 0x003e0c48, 0x18000f09,
+ 0x08190d12, 0x0f090d13, 0x0a250c12, 0x061d1421, 0x0f1e042d, 0x013a003e,
+ 0x073d0c26, 0x0b2d0f27, 0x0b2a0d2c, 0x102d0c29, 0x0a311e22, 0x122a0a37,
+ 0x1133112e, 0x00591aed, 0x16ef1aef, 0x1ee71cec, 0x21e925e5, 0x21e928e4,
+ 0x26ef21f5, 0x28f129fa, 0x26012911, 0x1efa1b03, 0x1a1625f0, 0x23fc26f8,
+ 0x26fd2503, 0x26052a00, 0x23102716, 0x0e301b25, 0x153c0c44, 0x0261fd47,
+ 0xfa2afb32, 0xfd36fe3e, 0x003a013f, 0xfe48ff4a, 0xf75bfb43, 0xfb1bfd27,
+ 0xfe2c002e, 0xf040f844, 0xf64efa4d, 0xf656f45c, 0xf137f63c, 0xfa3efc41,
+ 0xf449f84c, 0xf950f758, 0xef6ef561, 0xec54f54f, 0xfa49fc4a, 0xf356f360,
+ 0xf561ed75, 0xf84efb21, 0xfc30fe35, 0xfd3ef347, 0xf64ff456, 0xf35af261,
+ 0x0000fa5d, 0xfa54f84f, 0x0042ff47, 0x003efe3c, 0xfe3bfb4b, 0xfd3efc3a,
+ 0xf742ff4f, 0x00470344, 0x0a2cf93e, 0x0f240e28, 0x101b0c1d, 0x012c1424,
+ 0x1220052a, 0x01300a3e, 0x112e0940, 0xf468f561, 0xf060f958, 0xf855f955,
+ 0xf755f358, 0x0442fd4d, 0xfd4cfa4c, 0x0a3aff4c, 0xff53f963, 0xf25f025f,
+ 0x004cfb4a, 0x0046f54b, 0x01440041, 0xf249033e, 0x043eff44, 0xf34b0b37,
+ 0x05400c46, 0x0f060613, 0x07100c0e, 0x120d0d0b, 0x0d0f0f10, 0x0c170d17,
+ 0x0f140e1a, 0x0e2c1128, 0x112f1811, 0x15151916, 0x1f1b161d, 0x13230e32,
+ 0x0a39073f, 0xfe4dfc52, 0xfd5e0945, 0xf46d24dd, 0x24de20e6, 0x25e22ce0,
+ 0x22ee22f1, 0x28f121f9, 0x23fb2100, 0x2602210d, 0x17230d3a, 0x1dfd1a00,
+ 0x161e1ff9, 0x23f122fd, 0x220324ff, 0x2205200b, 0x2305220c, 0x270b1e1d,
+ 0x221a1d27, 0x13421f15, 0x1f1f1932, 0xef78ec70, 0xee72f555, 0xf15cf259,
+ 0xe647f151, 0xf2500044, 0xf246e838, 0xe944e832, 0xf54a17f3, 0x1af328f1,
+ 0x31f22c03, 0x2d062c22, 0x21361352, 0xfd4bff17, 0x0122012b, 0x0036fe37,
+ 0x003d0140, 0x0044f75c, 0xf26af361, 0xf15af45a, 0xee58f649, 0xf74ff256,
+ 0xf649f646, 0xf645fb42, 0xf740fb3a, 0x023b15f6, 0x18f51cf8, 0x1cff1d03,
+ 0x1d092314, 0x1d240e43, 0x14f10236, 0x034a14f1, 0x0236034a, 0xe47fe968,
+ 0xfa35ff36, 0x07331721, 0x17021500, 0x01090031, 0xdb760539, 0xf34ef541,
+ 0x013e0c31, 0xfc491132, 0x1240092b, 0x1d001a43, 0x105a0968, 0xd27fec68,
+ 0x0143f34e, 0xf541013e, 0xfa56ef5f, 0xfa3d092d, 0xfd45fa51, 0xf5600637,
+ 0x0743fb56, 0x0258003a, 0xfd4cf65e, 0x05360445, 0xfd510058, 0xf943fb4a,
+ 0xfc4afb50, 0xf948013a, 0x0029003f, 0x003f003f, 0xf7530456, 0x0061f948,
+ 0x0d29033e, 0x002dfc4e, 0xfd60e57e, 0xe462e765, 0xe943e452, 0xec5ef053,
+ 0xea6eeb5b, 0xee66f35d, 0xe37ff95c, 0xfb59f960, 0xf36cfd2e, 0xff41ff39,
+ 0xf75dfd4a, 0xf75cf857, 0xe97e0536, 0x063c063b, 0x0645ff30, 0x0044fc45,
+ 0xf858fe55, 0xfa4eff4b, 0xf94d0236, 0x0532fd44, 0x0132062a, 0xfc51013f,
+ 0xfc460043, 0x0239fe4c, 0x0b230440, 0x013d0b23, 0x12190c18, 0x0d1d0d24,
+ 0xf65df949, 0xfe490d2e, 0x0931f964, 0x09350235, 0x0535fe3d, 0x00380038,
+ 0xf33ffb3c, 0xff3e0439, 0xfa450439, 0x0e270433, 0x0d440340, 0x013d093f,
+ 0x07321027, 0x052c0434, 0x0b30fb3c, 0xff3b003b, 0x1621052c, 0x0e2bff4e,
+ 0x003c0945, 0x0b1c0228, 0x032c0031, 0x002e022c, 0x0233002f, 0x0427023e,
+ 0x062e0036, 0x0336023a, 0x043f0633, 0x06390735, 0x06340637, 0x0b2d0e24,
+ 0x0835ff52, 0x0737fd4e, 0x0f2e161f, 0xff541907, 0x1ef91c03, 0x1c042000,
+ 0x22ff1e06, 0x1e062009, 0x1f131a1b, 0x1a1e2514, 0x1c221146, 0x0143053b,
+ 0x0943101e, 0x12201223, 0x161d181f, 0x1726122b, 0x14290b3f, 0x093b0940,
+ 0xff5efe59, 0xf76cfa4c, 0xfe2c002d, 0x0034fd40, 0xfe3bfc46, 0xfc4bf852,
+ 0xef66f74d, 0x0318002a, 0x00300037, 0xfa3bf947, 0xf453f557, 0xe277013a,
+ 0xfd1dff24, 0x0126022b, 0xfa37003a, 0x0040fd4a, 0xf65a0046, 0xfc1d051f,
+ 0x072a013b, 0xfe3afd48, 0xfd51f561, 0x003a0805, 0x0a0e0e12, 0x0d1b0228,
+ 0x003afd46, 0xfa4ff855, 0x0000f36a, 0xf06af657, 0xeb72ee6e, 0xf262ea6e,
+ 0xeb6aee67, 0xeb6be96c, 0xe670f660, 0xf45ffb5b, 0xf75dea5e, 0xfb560943,
+ 0xfc50f655, 0xff46073c, 0x093a053d, 0x0c320f32, 0x12311136, 0x0a29072e,
+ 0xff330731, 0x08340929, 0x062f0237, 0x0d290a2c, 0x06320535, 0x0d31043f,
+ 0x0640fe45, 0xfe3b0646, 0x0a2c091f, 0x0c2b0335, 0x0e220a26, 0xfd340d28,
+ 0x1120072c, 0x07260d32, 0x0a391a2b, 0x0e0b0b0e, 0x090b120b, 0x150917fe,
+ 0x20f120f1, 0x22eb27e9, 0x2adf29e1, 0x2ee426f4, 0x151d2de8, 0x35d330e6,
+ 0x41d52bed, 0x27f61e09, 0x121a141b, 0x0039f252, 0xfb4bed61, 0xdd7d1b00,
+ 0x1c001ffc, 0x1b062208, 0x1e0a1816, 0x21131620, 0x1a1f1529, 0x1a2c172f,
+ 0x10410e47, 0x083c063f, 0x11411518, 0x17141a17, 0x1b201c17, 0x1c181728,
+ 0x18201c1d, 0x172a1339, 0x1635163d, 0x0b560c28, 0x0b330e3b, 0xfc4ff947,
+ 0xfb45f746, 0xf842f644, 0xed49f445, 0xf046f143, 0xec3eed46, 0xf042ea41,
+ 0xec3f09fe, 0x1af721f7, 0x27f929fe, 0x2d033109, 0x2d1b243b, 0xfa42f923,
+ 0xf92af82d, 0xfb30f438, 0xfa3cfb3e, 0xf842f84c, 0xfb55fa51, 0xf64df951,
+ 0xef50ee49, 0xfc4af653, 0xf747f743, 0xff3df842, 0xf242003b, 0x023b15f3,
+ 0x21f227f9, 0x2efe3302, 0x3c063d11, 0x37222a3e, 0x14f10236, 0x034a14f1,
+ 0x0236034a, 0xe47fe968, 0xfa35ff36, 0x07331619, 0x22001000, 0xfe090429,
+ 0xe3760241, 0xfa47f34f, 0x05340932, 0xfd460a36, 0x1a221316, 0x28003902,
+ 0x29241a45, 0xd37ff165, 0xfc4cfa47, 0xf34f0534, 0x0645f35a, 0x0034082b,
+ 0xfe45fb52, 0xf660023b, 0x024bfd57, 0xfd640138, 0xfd4afa55, 0x003bfd51,
+ 0xf956fb5f, 0xff42ff4d, 0x0146fe56, 0xfb48003d, 0x0029003f, 0x003f003f,
+ 0xf7530456, 0x0061f948, 0x0d29033e, 0x0d0f0733, 0x0250d97f, 0xee5bef60,
+ 0xe651dd62, 0xe866e961, 0xe577e863, 0xeb6eee66, 0xdc7f0050, 0xfb59f95e,
+ 0xfc5c0027, 0x0041f154, 0xdd7ffe49, 0xf468f75b, 0xe17f0337, 0x07380737,
+ 0x083dfd35, 0x0044f94a, 0xf758f367, 0xf35bf759, 0xf25cf84c, 0xf457e96e,
+ 0xe869f64e, 0xec70ef63, 0xb27fba7f, 0xce7fd27f, 0xfc42fb4e, 0xfc47f848,
+ 0x023bff37, 0xf946fa4b, 0xf859de77, 0xfd4b2014, 0x1e16d47f, 0x0036fb3d,
+ 0x003aff3c, 0xfd3df843, 0xe754f24a, 0xfb410534, 0x0239003d, 0xf745f546,
+ 0x1237fc47, 0x003a073d, 0x09291219, 0x0920052b, 0x092f002c, 0x0033022e,
+ 0x1326fc42, 0x0f260c2a, 0x09220059, 0x042d0a1c, 0x0a1f21f5, 0x34d5120f,
+ 0x1c0023ea, 0x26e72200, 0x27ee20f4, 0x66a20000, 0x38f121fc, 0x1d0a25fb,
+ 0x33e327f7, 0x34de45c6, 0x43c12cfb, 0x200737e3, 0x20010000, 0x1b2421e7,
+ 0x22e224e4, 0x26e426e5, 0x22ee23f0, 0x22f220f8, 0x25fa2300, 0x1e0a1c12,
+ 0x1a191d29, 0x004b0248, 0x084d0e23, 0x121f1123, 0x151e112d, 0x142a122d,
+ 0x1b1a1036, 0x07421038, 0x0b490a43, 0xf674e970, 0xf147f93d, 0x0035fb42,
+ 0xf54df750, 0xf754f657, 0xde7feb65, 0xfd27fb35, 0xf93df54b, 0xf14def5b,
+ 0xe76be76f, 0xe47af54c, 0xf62cf634, 0xf639f73a, 0xf048f945, 0xfc45fb4a,
+ 0xf7560242, 0xf7220120, 0x0b1f0534, 0xfe37fe43, 0x0049f859, 0x03340704,
+ 0x0a081108, 0x10130325, 0xff3dfb49, 0xff46fc4e, 0x0000eb7e, 0xe97cec6e,
+ 0xe67ee77c, 0xef69e579, 0xe575ef66, 0xe675e574, 0xdf7af65f, 0xf264f85f,
+ 0xef6fe472, 0xfa59fe50, 0xfc52f755, 0xf851ff48, 0x05400143, 0x09380045,
+ 0x01450745, 0xf945fa43, 0xf04dfe40, 0x023dfa43, 0xfd400239, 0xfd41fd42,
+ 0x003e0933, 0xff42fe47, 0xfe4bff46, 0xf7480e3c, 0x1025002f, 0x12230b25,
+ 0x0c290a29, 0x02300c29, 0x0d29003b, 0x03321328, 0x03421232, 0x13fa12fa,
+ 0x0e001af4, 0x1ff021e7, 0x21ea25e4, 0x27e22ae2, 0x2fd62ddc, 0x31de29ef,
+ 0x200945b9, 0x3fc142c0, 0x4db636d9, 0x34dd29f6, 0x240028ff, 0x1e0e1c1a,
+ 0x17250c37, 0x0b4125df, 0x27dc28db, 0x26e22edf, 0x2ae228e8, 0x31e326f4,
+ 0x28f626fd, 0x2efb1f14, 0x1d1e192c, 0x0c300b31, 0x1a2d1616, 0x17161b15,
+ 0x21141a1c, 0x1e181b22, 0x122a1927, 0x12320c46, 0x15360e47, 0x0b531920,
+ 0x15311536, 0xfb55fa51, 0xf64df951, 0xef50ee49, 0xfc4af653, 0xf747f743,
+ 0xff3df842, 0xf242003b, 0x023b11f6, 0x20f32af7, 0x31fb3500, 0x4003440a,
+ 0x421b2f39, 0xfb470018, 0xff24fe2a, 0xfe34f739, 0xfa3ffc41, 0xfc43f952,
+ 0xfd51fd4c, 0xf948fa4e, 0xf448f244, 0xfd46fa4c, 0xfb42fb3e, 0x0039fc3d,
+ 0xf73c0136, 0x023a11f6, 0x20f32af7, 0x31fb3500, 0x4003440a, 0x421b2f39,
+ 0x14f10236, 0x034a14f1, 0x0236034a, 0xe47fe968, 0xfa35ff36, 0x07331d10,
+ 0x19000e00, 0xf633fd3e, 0xe5631a10, 0xfc55e866, 0x05390639, 0xef490e39,
+ 0x1428140a, 0x1d003600, 0x252a0c61, 0xe07fea75, 0xfe4afc55, 0xe8660539,
+ 0xfa5df258, 0xfa2c0437, 0xf559f167, 0xeb741339, 0x143a0454, 0x0660013f,
+ 0xfb55f36a, 0x053f064b, 0xfd5aff65, 0x0337fc4f, 0xfe4bf461, 0xf932013c,
+ 0x0029003f, 0x003f003f, 0xf7530456, 0x0061f948, 0x0d29033e, 0x0722f758,
+ 0xec7fdc7f, 0xef5bf25f, 0xe754e756, 0xf459ef5b, 0xe17ff24c, 0xee67f35a,
+ 0xdb7f0b50, 0x054c0254, 0x054efa37, 0x043df253, 0xdb7ffb4f, 0xf568f55b,
+ 0xe27f0041, 0xfe4f0048, 0xfc5cfa38, 0x0344f847, 0xf362fc56, 0xf458fb52,
+ 0xfd48fc43, 0xf848f059, 0xf745ff3b, 0x05420439, 0xfc47fe47, 0x023aff4a,
+ 0xfc2cff45, 0x003ef933, 0xfc2ffa2a, 0xfd29fa35, 0x084cf74e, 0xf5530934,
+ 0x0043fb5a, 0x0143f148, 0xfb4bf850, 0xeb53eb40, 0xf31fe740, 0xe35e094b,
+ 0x113ff84a, 0xfb23fe1b, 0x0d5b0341, 0xf945084d, 0xf642033e, 0xfd44ec51,
+ 0x001e0107, 0xfd17eb4a, 0x1042e97c, 0x11252cee, 0x32deea7f, 0x0427002a,
+ 0x07220b1d, 0x081f0625, 0x072a0328, 0x08210d2b, 0x0d24042f, 0x0337023a,
+ 0x063c082c, 0x0b2c0e2a, 0x07300438, 0x04340d25, 0x0931133a, 0x0a300c2d,
+ 0x00451421, 0x083f23ee, 0x21e71cfd, 0x180a1b00, 0x22f234d4, 0x27e81311,
+ 0x1f19241d, 0x1821220f, 0x1e141649, 0x1422131f, 0x1b2c1310, 0x0f240f24,
+ 0x151c1915, 0x1e141f0c, 0x1b10182a, 0x005d0e38, 0x0f391a26, 0xe87fe873,
+ 0xea52f73e, 0x0035003b, 0xf255f359, 0xf35ef55c, 0xe37feb64, 0xf239f443,
+ 0xf547f64d, 0xeb55f058, 0xe968f162, 0xdb7ff652, 0xf830f83d, 0xf842f946,
+ 0xf24bf64f, 0xf753f45c, 0xee6cfc4f, 0xea45f04b, 0xfe3a013a, 0xf34ef753,
+ 0xfc51f363, 0xf351fa26, 0xf33efa3a, 0xfe3bf049, 0xf64cf356, 0xf753f657,
+ 0x0000ea7f, 0xe77fe778, 0xe57fed72, 0xe975e776, 0xe675e871, 0xe476e178,
+ 0xdb7cf65e, 0xf166f663, 0xf36ace7f, 0xfb5c1139, 0xfb56f35e, 0xf45bfe4d,
+ 0x0047ff49, 0x0440f951, 0x05400f39, 0x01430044, 0xf6430144, 0x004d0240,
+ 0x0044fb4e, 0x0737053b, 0x02410e36, 0x0f2c053c, 0x0246fe4c, 0xee560c46,
+ 0x0540f446, 0x0b370538, 0x00450241, 0xfa4a0536, 0x0736fa4c, 0xf552fe4d,
+ 0xfe4d192a, 0x11f310f7, 0x11f41beb, 0x25e229d8, 0x2ad730d1, 0x27e02ed8,
+ 0x34cd2ed7, 0x34d92bed, 0x200b3dc9, 0x38d23ece, 0x51bd2dec, 0x23fe1c0f,
+ 0x22012701, 0x1e111426, 0x122d0f36, 0x004f24f0, 0x25f225ef, 0x2001220f,
+ 0x1d0f1819, 0x22161f10, 0x23121f1c, 0x2129241c, 0x1b2f153e, 0x121f131a,
+ 0x24181817, 0x1b10181e, 0x1f1d1629, 0x162a103c, 0x0f340e3c, 0x034ef07b,
+ 0x15351638, 0x193d1521, 0x1332113d, 0xfd4ef84a, 0xf748f648, 0xee4bf447,
+ 0xf53ffb46, 0xef4bf248, 0xf043f835, 0xf23bf734, 0xf54409fe, 0x1ef61ffc,
+ 0x21ff2107, 0x1f0c2517, 0x1f261440, 0xf747f925, 0xf82cf531, 0xf638f43b,
+ 0xf83ff743, 0xfa44f64f, 0xfd4ef84a, 0xf748f648, 0xee4bf447, 0xf53ffb46,
+ 0xef4bf248, 0xf043f835, 0xf23bf734, 0xf54409fe, 0x1ef61ffc, 0x21ff2107,
+ 0x1f0c2517, 0x1f261440
+};
+
+static void
+assemble_scaling_list(struct hantro_ctx *ctx)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling = ctrls->scaling;
+ const struct v4l2_ctrl_h264_pps *pps = ctrls->pps;
+ const size_t num_list_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4);
+ const size_t list_len_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4[0]);
+ const size_t list_len_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8[0]);
+ struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
+ u32 *dst = (u32 *)tbl->scaling_list;
+ const u32 *src;
+ int i, j;
+
+ if (!(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT))
+ return;
+
+ for (i = 0; i < num_list_4x4; i++) {
+ src = (u32 *)&scaling->scaling_list_4x4[i];
+ for (j = 0; j < list_len_4x4 / 4; j++)
+ *dst++ = swab32(src[j]);
+ }
+
+ /* Only Intra/Inter Y lists */
+ for (i = 0; i < 2; i++) {
+ src = (u32 *)&scaling->scaling_list_8x8[i];
+ for (j = 0; j < list_len_8x8 / 4; j++)
+ *dst++ = swab32(src[j]);
+ }
+}
+
+static void prepare_table(struct hantro_ctx *ctx)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ const struct v4l2_ctrl_h264_decode_params *dec_param = ctrls->decode;
+ const struct v4l2_ctrl_h264_sps *sps = ctrls->sps;
+ struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
+ const struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
+ u32 dpb_longterm = 0;
+ u32 dpb_valid = 0;
+ int i;
+
+ for (i = 0; i < HANTRO_H264_DPB_SIZE; ++i) {
+ tbl->poc[i * 2] = dpb[i].top_field_order_cnt;
+ tbl->poc[i * 2 + 1] = dpb[i].bottom_field_order_cnt;
+
+ if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_VALID))
+ continue;
+
+ /*
+ * Set up bit maps of valid and long term DPBs.
+ * NOTE: The bits are reversed, i.e. MSb is DPB 0. For frame
+ * decoding, bit 31 to 15 are used, while for field decoding,
+ * all bits are used, with bit 31 being a top field, 30 a bottom
+ * field and so on.
+ */
+ if (dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC) {
+ if (dpb[i].fields & V4L2_H264_TOP_FIELD_REF)
+ dpb_valid |= REF_BIT(i * 2);
+
+ if (dpb[i].fields & V4L2_H264_BOTTOM_FIELD_REF)
+ dpb_valid |= REF_BIT(i * 2 + 1);
+
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM) {
+ dpb_longterm |= REF_BIT(i * 2);
+ dpb_longterm |= REF_BIT(i * 2 + 1);
+ }
+ } else {
+ dpb_valid |= REF_BIT(i);
+
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
+ dpb_longterm |= REF_BIT(i);
+ }
+ }
+ ctx->h264_dec.dpb_valid = dpb_valid;
+ ctx->h264_dec.dpb_longterm = dpb_longterm;
+
+ if ((dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC) ||
+ !(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)) {
+ tbl->poc[32] = ctx->h264_dec.cur_poc;
+ tbl->poc[33] = 0;
+ } else {
+ tbl->poc[32] = dec_param->top_field_order_cnt;
+ tbl->poc[33] = dec_param->bottom_field_order_cnt;
+ }
+
+ assemble_scaling_list(ctx);
+}
+
+static bool dpb_entry_match(const struct v4l2_h264_dpb_entry *a,
+ const struct v4l2_h264_dpb_entry *b)
+{
+ return a->reference_ts == b->reference_ts;
+}
+
+static void update_dpb(struct hantro_ctx *ctx)
+{
+ const struct v4l2_ctrl_h264_decode_params *dec_param;
+ DECLARE_BITMAP(new, ARRAY_SIZE(dec_param->dpb)) = { 0, };
+ DECLARE_BITMAP(used, ARRAY_SIZE(dec_param->dpb)) = { 0, };
+ unsigned int i, j;
+
+ dec_param = ctx->h264_dec.ctrls.decode;
+
+ /* Disable all entries by default. */
+ for (i = 0; i < ARRAY_SIZE(ctx->h264_dec.dpb); i++)
+ ctx->h264_dec.dpb[i].flags = 0;
+
+ /* Try to match new DPB entries with existing ones by their POCs. */
+ for (i = 0; i < ARRAY_SIZE(dec_param->dpb); i++) {
+ const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
+
+ if (!(ndpb->flags & V4L2_H264_DPB_ENTRY_FLAG_VALID))
+ continue;
+
+ /*
+ * To cut off some comparisons, iterate only on target DPB
+ * entries which are not used yet.
+ */
+ for_each_clear_bit(j, used, ARRAY_SIZE(ctx->h264_dec.dpb)) {
+ struct v4l2_h264_dpb_entry *cdpb;
+
+ cdpb = &ctx->h264_dec.dpb[j];
+ if (!dpb_entry_match(cdpb, ndpb))
+ continue;
+
+ *cdpb = *ndpb;
+ set_bit(j, used);
+ break;
+ }
+
+ if (j == ARRAY_SIZE(ctx->h264_dec.dpb))
+ set_bit(i, new);
+ }
+
+ /* For entries that could not be matched, use remaining free slots. */
+ for_each_set_bit(i, new, ARRAY_SIZE(dec_param->dpb)) {
+ const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
+ struct v4l2_h264_dpb_entry *cdpb;
+
+ /*
+ * Both arrays are of the same sizes, so there is no way
+ * we can end up with no space in target array, unless
+ * something is buggy.
+ */
+ j = find_first_zero_bit(used, ARRAY_SIZE(ctx->h264_dec.dpb));
+ if (WARN_ON(j >= ARRAY_SIZE(ctx->h264_dec.dpb)))
+ return;
+
+ cdpb = &ctx->h264_dec.dpb[j];
+ *cdpb = *ndpb;
+ set_bit(j, used);
+ }
+}
+
+dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
+ unsigned int dpb_idx)
+{
+ struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
+ dma_addr_t dma_addr = 0;
+ s32 cur_poc = ctx->h264_dec.cur_poc;
+ u32 flags;
+
+ if (dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
+ dma_addr = hantro_get_ref(ctx, dpb[dpb_idx].reference_ts);
+
+ if (!dma_addr) {
+ struct vb2_v4l2_buffer *dst_buf;
+ struct vb2_buffer *buf;
+
+ /*
+ * If a DPB entry is unused or invalid, address of current
+ * destination buffer is returned.
+ */
+ dst_buf = hantro_get_dst_buf(ctx);
+ buf = &dst_buf->vb2_buf;
+ dma_addr = hantro_get_dec_buf_addr(ctx, buf);
+ }
+
+ flags = dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_FIELD ? 0x2 : 0;
+ flags |= abs(dpb[dpb_idx].top_field_order_cnt - cur_poc) <
+ abs(dpb[dpb_idx].bottom_field_order_cnt - cur_poc) ?
+ 0x1 : 0;
+
+ return dma_addr | flags;
+}
+
+u16 hantro_h264_get_ref_nbr(struct hantro_ctx *ctx, unsigned int dpb_idx)
+{
+ const struct v4l2_h264_dpb_entry *dpb = &ctx->h264_dec.dpb[dpb_idx];
+
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ return 0;
+ return dpb->frame_num;
+}
+
+/*
+ * Removes all references with the same parity as the current picture from the
+ * reference list. The remaining list will have references with the opposite
+ * parity. This is effectively a deduplication of references since each buffer
+ * stores two fields. For this reason, each buffer is found twice in the
+ * reference list.
+ *
+ * This technique has been chosen through trial and error. This simple approach
+ * resulted in the highest conformance score. Note that this method may suffer
+ * worse quality in the case an opposite reference frame has been lost. If this
+ * becomes a problem in the future, it should be possible to add a preprocessing
+ * to identify un-paired fields and avoid removing them.
+ */
+static void deduplicate_reflist(struct v4l2_h264_reflist_builder *b,
+ struct v4l2_h264_reference *reflist)
+{
+ int write_idx = 0;
+ int i;
+
+ if (b->cur_pic_fields == V4L2_H264_FRAME_REF) {
+ write_idx = b->num_valid;
+ goto done;
+ }
+
+ for (i = 0; i < b->num_valid; i++) {
+ if (!(b->cur_pic_fields == reflist[i].fields)) {
+ reflist[write_idx++] = reflist[i];
+ continue;
+ }
+ }
+
+done:
+ /* Should not happen unless we have a bug in the reflist builder. */
+ if (WARN_ON(write_idx > 16))
+ write_idx = 16;
+
+ /* Clear the remaining, some streams fails otherwise */
+ for (; write_idx < 16; write_idx++)
+ reflist[write_idx].index = 15;
+}
+
+int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx)
+{
+ struct hantro_h264_dec_hw_ctx *h264_ctx = &ctx->h264_dec;
+ struct hantro_h264_dec_ctrls *ctrls = &h264_ctx->ctrls;
+ struct v4l2_h264_reflist_builder reflist_builder;
+
+ hantro_start_prepare_run(ctx);
+
+ ctrls->scaling =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_H264_SCALING_MATRIX);
+ if (WARN_ON(!ctrls->scaling))
+ return -EINVAL;
+
+ ctrls->decode =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_H264_DECODE_PARAMS);
+ if (WARN_ON(!ctrls->decode))
+ return -EINVAL;
+
+ ctrls->sps =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_H264_SPS);
+ if (WARN_ON(!ctrls->sps))
+ return -EINVAL;
+
+ ctrls->pps =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_H264_PPS);
+ if (WARN_ON(!ctrls->pps))
+ return -EINVAL;
+
+ /* Update the DPB with new refs. */
+ update_dpb(ctx);
+
+ /* Build the P/B{0,1} ref lists. */
+ v4l2_h264_init_reflist_builder(&reflist_builder, ctrls->decode,
+ ctrls->sps, ctx->h264_dec.dpb);
+ h264_ctx->cur_poc = reflist_builder.cur_pic_order_count;
+
+ /* Prepare data in memory. */
+ prepare_table(ctx);
+
+ v4l2_h264_build_p_ref_list(&reflist_builder, h264_ctx->reflists.p);
+ v4l2_h264_build_b_ref_lists(&reflist_builder, h264_ctx->reflists.b0,
+ h264_ctx->reflists.b1);
+
+ /*
+ * Reduce ref lists to at most 16 entries, Hantro hardware will deduce
+ * the actual picture lists in field through the dpb_valid,
+ * dpb_longterm bitmap along with the current frame parity.
+ */
+ if (reflist_builder.cur_pic_fields != V4L2_H264_FRAME_REF) {
+ deduplicate_reflist(&reflist_builder, h264_ctx->reflists.p);
+ deduplicate_reflist(&reflist_builder, h264_ctx->reflists.b0);
+ deduplicate_reflist(&reflist_builder, h264_ctx->reflists.b1);
+ }
+
+ return 0;
+}
+
+void hantro_h264_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_h264_dec_hw_ctx *h264_dec = &ctx->h264_dec;
+ struct hantro_aux_buf *priv = &h264_dec->priv;
+
+ dma_free_coherent(vpu->dev, priv->size, priv->cpu, priv->dma);
+}
+
+int hantro_h264_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_h264_dec_hw_ctx *h264_dec = &ctx->h264_dec;
+ struct hantro_aux_buf *priv = &h264_dec->priv;
+ struct hantro_h264_dec_priv_tbl *tbl;
+
+ priv->cpu = dma_alloc_coherent(vpu->dev, sizeof(*tbl), &priv->dma,
+ GFP_KERNEL);
+ if (!priv->cpu)
+ return -ENOMEM;
+
+ priv->size = sizeof(*tbl);
+ tbl = priv->cpu;
+ memcpy(tbl->cabac_table, h264_cabac_table, sizeof(tbl->cabac_table));
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_hevc.c b/drivers/media/platform/verisilicon/hantro_hevc.c
new file mode 100644
index 0000000000..2c14330bc5
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_hevc.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU HEVC codec driver
+ *
+ * Copyright (C) 2020 Safran Passenger Innovations LLC
+ */
+
+#include <linux/types.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+
+#define VERT_FILTER_RAM_SIZE 8 /* bytes per pixel row */
+/*
+ * BSD control data of current picture at tile border
+ * 128 bits per 4x4 tile = 128/(8*4) bytes per row
+ */
+#define BSD_CTRL_RAM_SIZE 4 /* bytes per pixel row */
+/* tile border coefficients of filter */
+#define VERT_SAO_RAM_SIZE 48 /* bytes per pixel */
+
+#define SCALING_LIST_SIZE (16 * 64)
+
+#define MAX_TILE_COLS 20
+#define MAX_TILE_ROWS 22
+
+void hantro_hevc_ref_init(struct hantro_ctx *ctx)
+{
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+
+ hevc_dec->ref_bufs_used = 0;
+}
+
+dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx,
+ s32 poc)
+{
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+ int i;
+
+ /* Find the reference buffer in already known ones */
+ for (i = 0; i < NUM_REF_PICTURES; i++) {
+ if (hevc_dec->ref_bufs_poc[i] == poc) {
+ hevc_dec->ref_bufs_used |= 1 << i;
+ return hevc_dec->ref_bufs[i].dma;
+ }
+ }
+
+ return 0;
+}
+
+int hantro_hevc_add_ref_buf(struct hantro_ctx *ctx, int poc, dma_addr_t addr)
+{
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+ int i;
+
+ /* Add a new reference buffer */
+ for (i = 0; i < NUM_REF_PICTURES; i++) {
+ if (!(hevc_dec->ref_bufs_used & 1 << i)) {
+ hevc_dec->ref_bufs_used |= 1 << i;
+ hevc_dec->ref_bufs_poc[i] = poc;
+ hevc_dec->ref_bufs[i].dma = addr;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int tile_buffer_reallocate(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ unsigned int num_tile_cols = pps->num_tile_columns_minus1 + 1;
+ unsigned int height64 = (sps->pic_height_in_luma_samples + 63) & ~63;
+ unsigned int size;
+
+ if (num_tile_cols <= 1 ||
+ num_tile_cols <= hevc_dec->num_tile_cols_allocated)
+ return 0;
+
+ /* Need to reallocate due to tiles passed via PPS */
+ if (hevc_dec->tile_filter.cpu) {
+ dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
+ hevc_dec->tile_filter.cpu,
+ hevc_dec->tile_filter.dma);
+ hevc_dec->tile_filter.cpu = NULL;
+ }
+
+ if (hevc_dec->tile_sao.cpu) {
+ dma_free_coherent(vpu->dev, hevc_dec->tile_sao.size,
+ hevc_dec->tile_sao.cpu,
+ hevc_dec->tile_sao.dma);
+ hevc_dec->tile_sao.cpu = NULL;
+ }
+
+ if (hevc_dec->tile_bsd.cpu) {
+ dma_free_coherent(vpu->dev, hevc_dec->tile_bsd.size,
+ hevc_dec->tile_bsd.cpu,
+ hevc_dec->tile_bsd.dma);
+ hevc_dec->tile_bsd.cpu = NULL;
+ }
+
+ size = (VERT_FILTER_RAM_SIZE * height64 * (num_tile_cols - 1) * ctx->bit_depth) / 8;
+ hevc_dec->tile_filter.cpu = dma_alloc_coherent(vpu->dev, size,
+ &hevc_dec->tile_filter.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->tile_filter.cpu)
+ return -ENOMEM;
+ hevc_dec->tile_filter.size = size;
+
+ size = (VERT_SAO_RAM_SIZE * height64 * (num_tile_cols - 1) * ctx->bit_depth) / 8;
+ hevc_dec->tile_sao.cpu = dma_alloc_coherent(vpu->dev, size,
+ &hevc_dec->tile_sao.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->tile_sao.cpu)
+ goto err_free_tile_buffers;
+ hevc_dec->tile_sao.size = size;
+
+ size = BSD_CTRL_RAM_SIZE * height64 * (num_tile_cols - 1);
+ hevc_dec->tile_bsd.cpu = dma_alloc_coherent(vpu->dev, size,
+ &hevc_dec->tile_bsd.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->tile_bsd.cpu)
+ goto err_free_sao_buffers;
+ hevc_dec->tile_bsd.size = size;
+
+ hevc_dec->num_tile_cols_allocated = num_tile_cols;
+
+ return 0;
+
+err_free_sao_buffers:
+ if (hevc_dec->tile_sao.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_sao.size,
+ hevc_dec->tile_sao.cpu,
+ hevc_dec->tile_sao.dma);
+ hevc_dec->tile_sao.cpu = NULL;
+
+err_free_tile_buffers:
+ if (hevc_dec->tile_filter.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
+ hevc_dec->tile_filter.cpu,
+ hevc_dec->tile_filter.dma);
+ hevc_dec->tile_filter.cpu = NULL;
+
+ return -ENOMEM;
+}
+
+static int hantro_hevc_validate_sps(struct hantro_ctx *ctx, const struct v4l2_ctrl_hevc_sps *sps)
+{
+ /*
+ * for tile pixel format check if the width and height match
+ * hardware constraints
+ */
+ if (ctx->vpu_dst_fmt->fourcc == V4L2_PIX_FMT_NV12_4L4) {
+ if (ctx->dst_fmt.width !=
+ ALIGN(sps->pic_width_in_luma_samples, ctx->vpu_dst_fmt->frmsize.step_width))
+ return -EINVAL;
+
+ if (ctx->dst_fmt.height !=
+ ALIGN(sps->pic_height_in_luma_samples, ctx->vpu_dst_fmt->frmsize.step_height))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx)
+{
+ struct hantro_hevc_dec_hw_ctx *hevc_ctx = &ctx->hevc_dec;
+ struct hantro_hevc_dec_ctrls *ctrls = &hevc_ctx->ctrls;
+ int ret;
+
+ hantro_start_prepare_run(ctx);
+
+ ctrls->decode_params =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_DECODE_PARAMS);
+ if (WARN_ON(!ctrls->decode_params))
+ return -EINVAL;
+
+ ctrls->scaling =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_SCALING_MATRIX);
+ if (WARN_ON(!ctrls->scaling))
+ return -EINVAL;
+
+ ctrls->sps =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_SPS);
+ if (WARN_ON(!ctrls->sps))
+ return -EINVAL;
+
+ ret = hantro_hevc_validate_sps(ctx, ctrls->sps);
+ if (ret)
+ return ret;
+
+ ctrls->pps =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_PPS);
+ if (WARN_ON(!ctrls->pps))
+ return -EINVAL;
+
+ ret = tile_buffer_reallocate(ctx);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void hantro_hevc_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+
+ if (hevc_dec->tile_sizes.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_sizes.size,
+ hevc_dec->tile_sizes.cpu,
+ hevc_dec->tile_sizes.dma);
+ hevc_dec->tile_sizes.cpu = NULL;
+
+ if (hevc_dec->scaling_lists.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->scaling_lists.size,
+ hevc_dec->scaling_lists.cpu,
+ hevc_dec->scaling_lists.dma);
+ hevc_dec->scaling_lists.cpu = NULL;
+
+ if (hevc_dec->tile_filter.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
+ hevc_dec->tile_filter.cpu,
+ hevc_dec->tile_filter.dma);
+ hevc_dec->tile_filter.cpu = NULL;
+
+ if (hevc_dec->tile_sao.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_sao.size,
+ hevc_dec->tile_sao.cpu,
+ hevc_dec->tile_sao.dma);
+ hevc_dec->tile_sao.cpu = NULL;
+
+ if (hevc_dec->tile_bsd.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_bsd.size,
+ hevc_dec->tile_bsd.cpu,
+ hevc_dec->tile_bsd.dma);
+ hevc_dec->tile_bsd.cpu = NULL;
+}
+
+int hantro_hevc_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+ unsigned int size;
+
+ memset(hevc_dec, 0, sizeof(*hevc_dec));
+
+ /*
+ * Maximum number of tiles times width and height (2 bytes each),
+ * rounding up to next 16 bytes boundary + one extra 16 byte
+ * chunk (HW guys wanted to have this).
+ */
+ size = round_up(MAX_TILE_COLS * MAX_TILE_ROWS * 4 * sizeof(u16) + 16, 16);
+ hevc_dec->tile_sizes.cpu = dma_alloc_coherent(vpu->dev, size,
+ &hevc_dec->tile_sizes.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->tile_sizes.cpu)
+ return -ENOMEM;
+
+ hevc_dec->tile_sizes.size = size;
+
+ hevc_dec->scaling_lists.cpu = dma_alloc_coherent(vpu->dev, SCALING_LIST_SIZE,
+ &hevc_dec->scaling_lists.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->scaling_lists.cpu)
+ return -ENOMEM;
+
+ hevc_dec->scaling_lists.size = SCALING_LIST_SIZE;
+
+ hantro_hevc_ref_init(ctx);
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_hw.h b/drivers/media/platform/verisilicon/hantro_hw.h
new file mode 100644
index 0000000000..7f33f7b07c
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_hw.h
@@ -0,0 +1,543 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#ifndef HANTRO_HW_H_
+#define HANTRO_HW_H_
+
+#include <linux/interrupt.h>
+#include <linux/v4l2-controls.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-vp9.h>
+#include <media/videobuf2-core.h>
+
+#include "rockchip_av1_entropymode.h"
+#include "rockchip_av1_filmgrain.h"
+
+#define DEC_8190_ALIGN_MASK 0x07U
+
+#define MB_DIM 16
+#define TILE_MB_DIM 4
+#define MB_WIDTH(w) DIV_ROUND_UP(w, MB_DIM)
+#define MB_HEIGHT(h) DIV_ROUND_UP(h, MB_DIM)
+
+#define FMT_MIN_WIDTH 48
+#define FMT_MIN_HEIGHT 48
+#define FMT_HD_WIDTH 1280
+#define FMT_HD_HEIGHT 720
+#define FMT_FHD_WIDTH 1920
+#define FMT_FHD_HEIGHT 1088
+#define FMT_UHD_WIDTH 3840
+#define FMT_UHD_HEIGHT 2160
+#define FMT_4K_WIDTH 4096
+#define FMT_4K_HEIGHT 2304
+
+#define NUM_REF_PICTURES (V4L2_HEVC_DPB_ENTRIES_NUM_MAX + 1)
+
+#define AV1_MAX_FRAME_BUF_COUNT (V4L2_AV1_TOTAL_REFS_PER_FRAME + 1)
+
+struct hantro_dev;
+struct hantro_ctx;
+struct hantro_buf;
+struct hantro_variant;
+
+/**
+ * struct hantro_aux_buf - auxiliary DMA buffer for hardware data
+ *
+ * @cpu: CPU pointer to the buffer.
+ * @dma: DMA address of the buffer.
+ * @size: Size of the buffer.
+ * @attrs: Attributes of the DMA mapping.
+ */
+struct hantro_aux_buf {
+ void *cpu;
+ dma_addr_t dma;
+ size_t size;
+ unsigned long attrs;
+};
+
+/* Max. number of entries in the DPB (HW limitation). */
+#define HANTRO_H264_DPB_SIZE 16
+
+/**
+ * struct hantro_h264_dec_ctrls
+ *
+ * @decode: Decode params
+ * @scaling: Scaling info
+ * @sps: SPS info
+ * @pps: PPS info
+ */
+struct hantro_h264_dec_ctrls {
+ const struct v4l2_ctrl_h264_decode_params *decode;
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling;
+ const struct v4l2_ctrl_h264_sps *sps;
+ const struct v4l2_ctrl_h264_pps *pps;
+};
+
+/**
+ * struct hantro_h264_dec_reflists
+ *
+ * @p: P reflist
+ * @b0: B0 reflist
+ * @b1: B1 reflist
+ */
+struct hantro_h264_dec_reflists {
+ struct v4l2_h264_reference p[V4L2_H264_REF_LIST_LEN];
+ struct v4l2_h264_reference b0[V4L2_H264_REF_LIST_LEN];
+ struct v4l2_h264_reference b1[V4L2_H264_REF_LIST_LEN];
+};
+
+/**
+ * struct hantro_h264_dec_hw_ctx
+ *
+ * @priv: Private auxiliary buffer for hardware.
+ * @dpb: DPB
+ * @reflists: P/B0/B1 reflists
+ * @ctrls: V4L2 controls attached to a run
+ * @dpb_longterm: DPB long-term
+ * @dpb_valid: DPB valid
+ * @cur_poc: Current picture order count
+ */
+struct hantro_h264_dec_hw_ctx {
+ struct hantro_aux_buf priv;
+ struct v4l2_h264_dpb_entry dpb[HANTRO_H264_DPB_SIZE];
+ struct hantro_h264_dec_reflists reflists;
+ struct hantro_h264_dec_ctrls ctrls;
+ u32 dpb_longterm;
+ u32 dpb_valid;
+ s32 cur_poc;
+};
+
+/**
+ * struct hantro_hevc_dec_ctrls
+ * @decode_params: Decode params
+ * @scaling: Scaling matrix
+ * @sps: SPS info
+ * @pps: PPS info
+ * @hevc_hdr_skip_length: the number of data (in bits) to skip in the
+ * slice segment header syntax after 'slice type'
+ * token
+ */
+struct hantro_hevc_dec_ctrls {
+ const struct v4l2_ctrl_hevc_decode_params *decode_params;
+ const struct v4l2_ctrl_hevc_scaling_matrix *scaling;
+ const struct v4l2_ctrl_hevc_sps *sps;
+ const struct v4l2_ctrl_hevc_pps *pps;
+ u32 hevc_hdr_skip_length;
+};
+
+/**
+ * struct hantro_hevc_dec_hw_ctx
+ * @tile_sizes: Tile sizes buffer
+ * @tile_filter: Tile vertical filter buffer
+ * @tile_sao: Tile SAO buffer
+ * @tile_bsd: Tile BSD control buffer
+ * @ref_bufs: Internal reference buffers
+ * @scaling_lists: Scaling lists buffer
+ * @ref_bufs_poc: Internal reference buffers picture order count
+ * @ref_bufs_used: Bitfield of used reference buffers
+ * @ctrls: V4L2 controls attached to a run
+ * @num_tile_cols_allocated: number of allocated tiles
+ */
+struct hantro_hevc_dec_hw_ctx {
+ struct hantro_aux_buf tile_sizes;
+ struct hantro_aux_buf tile_filter;
+ struct hantro_aux_buf tile_sao;
+ struct hantro_aux_buf tile_bsd;
+ struct hantro_aux_buf ref_bufs[NUM_REF_PICTURES];
+ struct hantro_aux_buf scaling_lists;
+ s32 ref_bufs_poc[NUM_REF_PICTURES];
+ u32 ref_bufs_used;
+ struct hantro_hevc_dec_ctrls ctrls;
+ unsigned int num_tile_cols_allocated;
+};
+
+/**
+ * struct hantro_mpeg2_dec_hw_ctx
+ *
+ * @qtable: Quantization table
+ */
+struct hantro_mpeg2_dec_hw_ctx {
+ struct hantro_aux_buf qtable;
+};
+
+/**
+ * struct hantro_vp8_dec_hw_ctx
+ *
+ * @segment_map: Segment map buffer.
+ * @prob_tbl: Probability table buffer.
+ */
+struct hantro_vp8_dec_hw_ctx {
+ struct hantro_aux_buf segment_map;
+ struct hantro_aux_buf prob_tbl;
+};
+
+/**
+ * struct hantro_vp9_frame_info
+ *
+ * @valid: frame info valid flag
+ * @frame_context_idx: index of frame context
+ * @reference_mode: inter prediction type
+ * @tx_mode: transform mode
+ * @interpolation_filter: filter selection for inter prediction
+ * @flags: frame flags
+ * @timestamp: frame timestamp
+ */
+struct hantro_vp9_frame_info {
+ u32 valid : 1;
+ u32 frame_context_idx : 2;
+ u32 reference_mode : 2;
+ u32 tx_mode : 3;
+ u32 interpolation_filter : 3;
+ u32 flags;
+ u64 timestamp;
+};
+
+#define MAX_SB_COLS 64
+#define MAX_SB_ROWS 34
+
+/**
+ * struct hantro_vp9_dec_hw_ctx
+ *
+ * @tile_edge: auxiliary DMA buffer for tile edge processing
+ * @segment_map: auxiliary DMA buffer for segment map
+ * @misc: auxiliary DMA buffer for tile info, probabilities and hw counters
+ * @cnts: vp9 library struct for abstracting hw counters access
+ * @probability_tables: VP9 probability tables implied by the spec
+ * @frame_context: VP9 frame contexts
+ * @cur: current frame information
+ * @last: last frame information
+ * @bsd_ctrl_offset: bsd offset into tile_edge
+ * @segment_map_size: size of segment map
+ * @ctx_counters_offset: hw counters offset into misc
+ * @tile_info_offset: tile info offset into misc
+ * @tile_r_info: per-tile information array
+ * @tile_c_info: per-tile information array
+ * @last_tile_r: last number of tile rows
+ * @last_tile_c: last number of tile cols
+ * @last_sbs_r: last number of superblock rows
+ * @last_sbs_c: last number of superblock cols
+ * @active_segment: number of active segment (alternating between 0 and 1)
+ * @feature_enabled: segmentation feature enabled flags
+ * @feature_data: segmentation feature data
+ */
+struct hantro_vp9_dec_hw_ctx {
+ struct hantro_aux_buf tile_edge;
+ struct hantro_aux_buf segment_map;
+ struct hantro_aux_buf misc;
+ struct v4l2_vp9_frame_symbol_counts cnts;
+ struct v4l2_vp9_frame_context probability_tables;
+ struct v4l2_vp9_frame_context frame_context[4];
+ struct hantro_vp9_frame_info cur;
+ struct hantro_vp9_frame_info last;
+
+ unsigned int bsd_ctrl_offset;
+ unsigned int segment_map_size;
+ unsigned int ctx_counters_offset;
+ unsigned int tile_info_offset;
+
+ unsigned short tile_r_info[MAX_SB_ROWS];
+ unsigned short tile_c_info[MAX_SB_COLS];
+ unsigned int last_tile_r;
+ unsigned int last_tile_c;
+ unsigned int last_sbs_r;
+ unsigned int last_sbs_c;
+
+ unsigned int active_segment;
+ u8 feature_enabled[8];
+ s16 feature_data[8][4];
+};
+
+/**
+ * struct hantro_av1_dec_ctrls
+ * @sequence: AV1 Sequence
+ * @tile_group_entry: AV1 Tile Group entry
+ * @frame: AV1 Frame Header OBU
+ * @film_grain: AV1 Film Grain
+ */
+struct hantro_av1_dec_ctrls {
+ const struct v4l2_ctrl_av1_sequence *sequence;
+ const struct v4l2_ctrl_av1_tile_group_entry *tile_group_entry;
+ const struct v4l2_ctrl_av1_frame *frame;
+ const struct v4l2_ctrl_av1_film_grain *film_grain;
+};
+
+struct hantro_av1_frame_ref {
+ int width;
+ int height;
+ int mi_cols;
+ int mi_rows;
+ u64 timestamp;
+ enum v4l2_av1_frame_type frame_type;
+ bool used;
+ u32 order_hint;
+ u32 order_hints[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ struct vb2_v4l2_buffer *vb2_ref;
+};
+
+/**
+ * struct hantro_av1_dec_hw_ctx
+ * @db_data_col: db tile col data buffer
+ * @db_ctrl_col: db tile col ctrl buffer
+ * @cdef_col: cdef tile col buffer
+ * @sr_col: sr tile col buffer
+ * @lr_col: lr tile col buffer
+ * @global_model: global model buffer
+ * @tile_info: tile info buffer
+ * @segment: segmentation info buffer
+ * @film_grain: film grain buffer
+ * @prob_tbl: probability table
+ * @prob_tbl_out: probability table output
+ * @tile_buf: tile buffer
+ * @ctrls: V4L2 controls attached to a run
+ * @frame_refs: reference frames info slots
+ * @ref_frame_sign_bias: array of sign bias
+ * @num_tile_cols_allocated: number of allocated tiles
+ * @cdfs: current probabilities structure
+ * @cdfs_ndvc: current mv probabilities structure
+ * @default_cdfs: default probabilities structure
+ * @default_cdfs_ndvc: default mv probabilties structure
+ * @cdfs_last: stored probabilities structures
+ * @cdfs_last_ndvc: stored mv probabilities structures
+ * @current_frame_index: index of the current in frame_refs array
+ */
+struct hantro_av1_dec_hw_ctx {
+ struct hantro_aux_buf db_data_col;
+ struct hantro_aux_buf db_ctrl_col;
+ struct hantro_aux_buf cdef_col;
+ struct hantro_aux_buf sr_col;
+ struct hantro_aux_buf lr_col;
+ struct hantro_aux_buf global_model;
+ struct hantro_aux_buf tile_info;
+ struct hantro_aux_buf segment;
+ struct hantro_aux_buf film_grain;
+ struct hantro_aux_buf prob_tbl;
+ struct hantro_aux_buf prob_tbl_out;
+ struct hantro_aux_buf tile_buf;
+ struct hantro_av1_dec_ctrls ctrls;
+ struct hantro_av1_frame_ref frame_refs[AV1_MAX_FRAME_BUF_COUNT];
+ u32 ref_frame_sign_bias[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ unsigned int num_tile_cols_allocated;
+ struct av1cdfs *cdfs;
+ struct mvcdfs *cdfs_ndvc;
+ struct av1cdfs default_cdfs;
+ struct mvcdfs default_cdfs_ndvc;
+ struct av1cdfs cdfs_last[NUM_REF_FRAMES];
+ struct mvcdfs cdfs_last_ndvc[NUM_REF_FRAMES];
+ int current_frame_index;
+};
+/**
+ * struct hantro_postproc_ctx
+ *
+ * @dec_q: References buffers, in decoder format.
+ */
+struct hantro_postproc_ctx {
+ struct hantro_aux_buf dec_q[VB2_MAX_FRAME];
+};
+
+/**
+ * struct hantro_postproc_ops - post-processor operations
+ *
+ * @enable: Enable the post-processor block. Optional.
+ * @disable: Disable the post-processor block. Optional.
+ * @enum_framesizes: Enumerate possible scaled output formats.
+ * Returns zero if OK, a negative value in error cases.
+ * Optional.
+ */
+struct hantro_postproc_ops {
+ void (*enable)(struct hantro_ctx *ctx);
+ void (*disable)(struct hantro_ctx *ctx);
+ int (*enum_framesizes)(struct hantro_ctx *ctx, struct v4l2_frmsizeenum *fsize);
+};
+
+/**
+ * struct hantro_codec_ops - codec mode specific operations
+ *
+ * @init: If needed, can be used for initialization.
+ * Optional and called from process context.
+ * @exit: If needed, can be used to undo the .init phase.
+ * Optional and called from process context.
+ * @run: Start single {en,de)coding job. Called from atomic context
+ * to indicate that a pair of buffers is ready and the hardware
+ * should be programmed and started. Returns zero if OK, a
+ * negative value in error cases.
+ * @done: Read back processing results and additional data from hardware.
+ * @reset: Reset the hardware in case of a timeout.
+ */
+struct hantro_codec_ops {
+ int (*init)(struct hantro_ctx *ctx);
+ void (*exit)(struct hantro_ctx *ctx);
+ int (*run)(struct hantro_ctx *ctx);
+ void (*done)(struct hantro_ctx *ctx);
+ void (*reset)(struct hantro_ctx *ctx);
+};
+
+/**
+ * enum hantro_enc_fmt - source format ID for hardware registers.
+ *
+ * @ROCKCHIP_VPU_ENC_FMT_YUV420P: Y/CbCr 4:2:0 planar format
+ * @ROCKCHIP_VPU_ENC_FMT_YUV420SP: Y/CbCr 4:2:0 semi-planar format
+ * @ROCKCHIP_VPU_ENC_FMT_YUYV422: YUV 4:2:2 packed format (YUYV)
+ * @ROCKCHIP_VPU_ENC_FMT_UYVY422: YUV 4:2:2 packed format (UYVY)
+ */
+enum hantro_enc_fmt {
+ ROCKCHIP_VPU_ENC_FMT_YUV420P = 0,
+ ROCKCHIP_VPU_ENC_FMT_YUV420SP = 1,
+ ROCKCHIP_VPU_ENC_FMT_YUYV422 = 2,
+ ROCKCHIP_VPU_ENC_FMT_UYVY422 = 3,
+};
+
+extern const struct hantro_variant imx8mm_vpu_g1_variant;
+extern const struct hantro_variant imx8mq_vpu_g1_variant;
+extern const struct hantro_variant imx8mq_vpu_g2_variant;
+extern const struct hantro_variant imx8mq_vpu_variant;
+extern const struct hantro_variant px30_vpu_variant;
+extern const struct hantro_variant rk3036_vpu_variant;
+extern const struct hantro_variant rk3066_vpu_variant;
+extern const struct hantro_variant rk3288_vpu_variant;
+extern const struct hantro_variant rk3328_vpu_variant;
+extern const struct hantro_variant rk3399_vpu_variant;
+extern const struct hantro_variant rk3568_vepu_variant;
+extern const struct hantro_variant rk3568_vpu_variant;
+extern const struct hantro_variant rk3588_vpu981_variant;
+extern const struct hantro_variant sama5d4_vdec_variant;
+extern const struct hantro_variant sunxi_vpu_variant;
+
+extern const struct hantro_postproc_ops hantro_g1_postproc_ops;
+extern const struct hantro_postproc_ops hantro_g2_postproc_ops;
+extern const struct hantro_postproc_ops rockchip_vpu981_postproc_ops;
+
+extern const u32 hantro_vp8_dec_mc_filter[8][6];
+
+void hantro_watchdog(struct work_struct *work);
+void hantro_run(struct hantro_ctx *ctx);
+void hantro_irq_done(struct hantro_dev *vpu,
+ enum vb2_buffer_state result);
+void hantro_start_prepare_run(struct hantro_ctx *ctx);
+void hantro_end_prepare_run(struct hantro_ctx *ctx);
+
+irqreturn_t hantro_g1_irq(int irq, void *dev_id);
+void hantro_g1_reset(struct hantro_ctx *ctx);
+
+int hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx);
+int rockchip_vpu2_jpeg_enc_run(struct hantro_ctx *ctx);
+void hantro_h1_jpeg_enc_done(struct hantro_ctx *ctx);
+void rockchip_vpu2_jpeg_enc_done(struct hantro_ctx *ctx);
+
+dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
+ unsigned int dpb_idx);
+u16 hantro_h264_get_ref_nbr(struct hantro_ctx *ctx,
+ unsigned int dpb_idx);
+int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx);
+int rockchip_vpu2_h264_dec_run(struct hantro_ctx *ctx);
+int hantro_g1_h264_dec_run(struct hantro_ctx *ctx);
+int hantro_h264_dec_init(struct hantro_ctx *ctx);
+void hantro_h264_dec_exit(struct hantro_ctx *ctx);
+
+int hantro_hevc_dec_init(struct hantro_ctx *ctx);
+void hantro_hevc_dec_exit(struct hantro_ctx *ctx);
+int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx);
+int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx);
+void hantro_hevc_ref_init(struct hantro_ctx *ctx);
+dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx, s32 poc);
+int hantro_hevc_add_ref_buf(struct hantro_ctx *ctx, int poc, dma_addr_t addr);
+
+int rockchip_vpu981_av1_dec_init(struct hantro_ctx *ctx);
+void rockchip_vpu981_av1_dec_exit(struct hantro_ctx *ctx);
+int rockchip_vpu981_av1_dec_run(struct hantro_ctx *ctx);
+void rockchip_vpu981_av1_dec_done(struct hantro_ctx *ctx);
+
+static inline unsigned short hantro_vp9_num_sbs(unsigned short dimension)
+{
+ return (dimension + 63) / 64;
+}
+
+static inline size_t
+hantro_vp9_mv_size(unsigned int width, unsigned int height)
+{
+ int num_ctbs;
+
+ /*
+ * There can be up to (CTBs x 64) number of blocks,
+ * and the motion vector for each block needs 16 bytes.
+ */
+ num_ctbs = hantro_vp9_num_sbs(width) * hantro_vp9_num_sbs(height);
+ return (num_ctbs * 64) * 16;
+}
+
+static inline size_t
+hantro_h264_mv_size(unsigned int width, unsigned int height)
+{
+ /*
+ * A decoded 8-bit 4:2:0 NV12 frame may need memory for up to
+ * 448 bytes per macroblock with additional 32 bytes on
+ * multi-core variants.
+ *
+ * The H264 decoder needs extra space on the output buffers
+ * to store motion vectors. This is needed for reference
+ * frames and only if the format is non-post-processed NV12.
+ *
+ * Memory layout is as follow:
+ *
+ * +---------------------------+
+ * | Y-plane 256 bytes x MBs |
+ * +---------------------------+
+ * | UV-plane 128 bytes x MBs |
+ * +---------------------------+
+ * | MV buffer 64 bytes x MBs |
+ * +---------------------------+
+ * | MC sync 32 bytes |
+ * +---------------------------+
+ */
+ return 64 * MB_WIDTH(width) * MB_WIDTH(height) + 32;
+}
+
+static inline size_t
+hantro_hevc_mv_size(unsigned int width, unsigned int height)
+{
+ /*
+ * A CTB can be 64x64, 32x32 or 16x16.
+ * Allocated memory for the "worse" case: 16x16
+ */
+ return width * height / 16;
+}
+
+static inline unsigned short hantro_av1_num_sbs(unsigned short dimension)
+{
+ return DIV_ROUND_UP(dimension, 64);
+}
+
+static inline size_t
+hantro_av1_mv_size(unsigned int width, unsigned int height)
+{
+ size_t num_sbs = hantro_av1_num_sbs(width) * hantro_av1_num_sbs(height);
+
+ return ALIGN(num_sbs * 384, 16) * 2 + 512;
+}
+
+int hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx);
+int rockchip_vpu2_mpeg2_dec_run(struct hantro_ctx *ctx);
+void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
+ const struct v4l2_ctrl_mpeg2_quantisation *ctrl);
+int hantro_mpeg2_dec_init(struct hantro_ctx *ctx);
+void hantro_mpeg2_dec_exit(struct hantro_ctx *ctx);
+
+int hantro_g1_vp8_dec_run(struct hantro_ctx *ctx);
+int rockchip_vpu2_vp8_dec_run(struct hantro_ctx *ctx);
+int hantro_vp8_dec_init(struct hantro_ctx *ctx);
+void hantro_vp8_dec_exit(struct hantro_ctx *ctx);
+void hantro_vp8_prob_update(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr);
+
+int hantro_g2_vp9_dec_run(struct hantro_ctx *ctx);
+void hantro_g2_vp9_dec_done(struct hantro_ctx *ctx);
+int hantro_vp9_dec_init(struct hantro_ctx *ctx);
+void hantro_vp9_dec_exit(struct hantro_ctx *ctx);
+void hantro_g2_check_idle(struct hantro_dev *vpu);
+irqreturn_t hantro_g2_irq(int irq, void *dev_id);
+
+#endif /* HANTRO_HW_H_ */
diff --git a/drivers/media/platform/verisilicon/hantro_jpeg.c b/drivers/media/platform/verisilicon/hantro_jpeg.c
new file mode 100644
index 0000000000..d07b1b449b
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_jpeg.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) Collabora, Ltd.
+ *
+ * Based on GSPCA and CODA drivers:
+ * Copyright (C) Jean-Francois Moine (http://moinejf.free.fr)
+ * Copyright (C) 2014 Philipp Zabel, Pengutronix
+ */
+
+#include <linux/align.h>
+#include <linux/build_bug.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include "hantro_jpeg.h"
+#include "hantro.h"
+
+#define LUMA_QUANT_OFF 25
+#define CHROMA_QUANT_OFF 90
+#define HEIGHT_OFF 159
+#define WIDTH_OFF 161
+
+#define HUFF_LUMA_DC_OFF 178
+#define HUFF_LUMA_AC_OFF 211
+#define HUFF_CHROMA_DC_OFF 394
+#define HUFF_CHROMA_AC_OFF 427
+
+/* Default tables from JPEG ITU-T.81
+ * (ISO/IEC 10918-1) Annex K, tables K.1 and K.2
+ */
+static const unsigned char luma_q_table[] = {
+ 0x10, 0x0b, 0x0a, 0x10, 0x18, 0x28, 0x33, 0x3d,
+ 0x0c, 0x0c, 0x0e, 0x13, 0x1a, 0x3a, 0x3c, 0x37,
+ 0x0e, 0x0d, 0x10, 0x18, 0x28, 0x39, 0x45, 0x38,
+ 0x0e, 0x11, 0x16, 0x1d, 0x33, 0x57, 0x50, 0x3e,
+ 0x12, 0x16, 0x25, 0x38, 0x44, 0x6d, 0x67, 0x4d,
+ 0x18, 0x23, 0x37, 0x40, 0x51, 0x68, 0x71, 0x5c,
+ 0x31, 0x40, 0x4e, 0x57, 0x67, 0x79, 0x78, 0x65,
+ 0x48, 0x5c, 0x5f, 0x62, 0x70, 0x64, 0x67, 0x63
+};
+
+static const unsigned char chroma_q_table[] = {
+ 0x11, 0x12, 0x18, 0x2f, 0x63, 0x63, 0x63, 0x63,
+ 0x12, 0x15, 0x1a, 0x42, 0x63, 0x63, 0x63, 0x63,
+ 0x18, 0x1a, 0x38, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x2f, 0x42, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63
+};
+
+static const unsigned char zigzag[] = {
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
+};
+
+static const u32 hw_reorder[] = {
+ 0, 8, 16, 24, 1, 9, 17, 25,
+ 32, 40, 48, 56, 33, 41, 49, 57,
+ 2, 10, 18, 26, 3, 11, 19, 27,
+ 34, 42, 50, 58, 35, 43, 51, 59,
+ 4, 12, 20, 28, 5, 13, 21, 29,
+ 36, 44, 52, 60, 37, 45, 53, 61,
+ 6, 14, 22, 30, 7, 15, 23, 31,
+ 38, 46, 54, 62, 39, 47, 55, 63
+};
+
+/* Huffman tables are shared with CODA */
+static const unsigned char luma_dc_table[] = {
+ 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b,
+};
+
+static const unsigned char chroma_dc_table[] = {
+ 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b,
+};
+
+static const unsigned char luma_ac_table[] = {
+ 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
+ 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7d,
+ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
+ 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
+ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
+ 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
+ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
+ 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
+ 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
+ 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+ 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
+ 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+ 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
+ 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
+ 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
+ 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
+ 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
+ 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa,
+};
+
+static const unsigned char chroma_ac_table[] = {
+ 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
+ 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
+ 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
+ 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
+ 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
+ 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
+ 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
+ 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
+ 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
+ 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
+ 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
+ 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
+ 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
+ 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
+ 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
+ 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
+ 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa,
+};
+
+/* For simplicity, we keep a pre-formatted JPEG header,
+ * and we'll use fixed offsets to change the width, height
+ * quantization tables, etc.
+ */
+static const unsigned char hantro_jpeg_header[] = {
+ /* SOI */
+ 0xff, 0xd8,
+
+ /* JFIF-APP0 */
+ 0xff, 0xe0, 0x00, 0x10, 0x4a, 0x46, 0x49, 0x46,
+ 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x00, 0x01,
+ 0x00, 0x00,
+
+ /* DQT */
+ 0xff, 0xdb, 0x00, 0x84,
+
+ 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* SOF */
+ 0xff, 0xc0, 0x00, 0x11, 0x08, 0x00, 0xf0, 0x01,
+ 0x40, 0x03, 0x01, 0x22, 0x00, 0x02, 0x11, 0x01,
+ 0x03, 0x11, 0x01,
+
+ /* DHT */
+ 0xff, 0xc4, 0x00, 0x1f, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ /* DHT */
+ 0xff, 0xc4, 0x00, 0xb5, 0x10,
+
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* DHT */
+ 0xff, 0xc4, 0x00, 0x1f, 0x01,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ /* DHT */
+ 0xff, 0xc4, 0x00, 0xb5, 0x11,
+
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* COM */
+ 0xff, 0xfe, 0x00, 0x03, 0x00,
+
+ /* SOS */
+ 0xff, 0xda, 0x00, 0x0c, 0x03, 0x01, 0x00, 0x02,
+ 0x11, 0x03, 0x11, 0x00, 0x3f, 0x00,
+};
+
+/*
+ * JPEG_HEADER_SIZE is used in other parts of the driver in lieu of
+ * "sizeof(hantro_jpeg_header)". The two must be equal.
+ */
+static_assert(sizeof(hantro_jpeg_header) == JPEG_HEADER_SIZE);
+
+/*
+ * hantro_jpeg_header is padded with a COM segment, so that the payload
+ * of the SOS segment (the entropy-encoded image scan), which should
+ * trail the whole header, is 8-byte aligned for the hardware to write
+ * to directly.
+ */
+static_assert(IS_ALIGNED(sizeof(hantro_jpeg_header), 8),
+ "Hantro JPEG header size needs to be 8-byte aligned.");
+
+static unsigned char jpeg_scale_qp(const unsigned char qp, int scale)
+{
+ unsigned int temp;
+
+ temp = DIV_ROUND_CLOSEST((unsigned int)qp * scale, 100);
+ if (temp <= 0)
+ temp = 1;
+ if (temp > 255)
+ temp = 255;
+
+ return (unsigned char)temp;
+}
+
+static void
+jpeg_scale_quant_table(unsigned char *file_q_tab,
+ unsigned char *reordered_q_tab,
+ const unsigned char *tab, int scale)
+{
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(zigzag) != JPEG_QUANT_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(hw_reorder) != JPEG_QUANT_SIZE);
+
+ for (i = 0; i < JPEG_QUANT_SIZE; i++) {
+ file_q_tab[i] = jpeg_scale_qp(tab[zigzag[i]], scale);
+ reordered_q_tab[i] = jpeg_scale_qp(tab[hw_reorder[i]], scale);
+ }
+}
+
+static void jpeg_set_quality(struct hantro_jpeg_ctx *ctx)
+{
+ int scale;
+
+ /*
+ * Non-linear scaling factor:
+ * [5,50] -> [1000..100], [51,100] -> [98..0]
+ */
+ if (ctx->quality < 50)
+ scale = 5000 / ctx->quality;
+ else
+ scale = 200 - 2 * ctx->quality;
+
+ BUILD_BUG_ON(ARRAY_SIZE(luma_q_table) != JPEG_QUANT_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(chroma_q_table) != JPEG_QUANT_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(ctx->hw_luma_qtable) != JPEG_QUANT_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(ctx->hw_chroma_qtable) != JPEG_QUANT_SIZE);
+
+ jpeg_scale_quant_table(ctx->buffer + LUMA_QUANT_OFF,
+ ctx->hw_luma_qtable, luma_q_table, scale);
+ jpeg_scale_quant_table(ctx->buffer + CHROMA_QUANT_OFF,
+ ctx->hw_chroma_qtable, chroma_q_table, scale);
+}
+
+void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx)
+{
+ char *buf = ctx->buffer;
+
+ memcpy(buf, hantro_jpeg_header,
+ sizeof(hantro_jpeg_header));
+
+ buf[HEIGHT_OFF + 0] = ctx->height >> 8;
+ buf[HEIGHT_OFF + 1] = ctx->height;
+ buf[WIDTH_OFF + 0] = ctx->width >> 8;
+ buf[WIDTH_OFF + 1] = ctx->width;
+
+ memcpy(buf + HUFF_LUMA_DC_OFF, luma_dc_table, sizeof(luma_dc_table));
+ memcpy(buf + HUFF_LUMA_AC_OFF, luma_ac_table, sizeof(luma_ac_table));
+ memcpy(buf + HUFF_CHROMA_DC_OFF, chroma_dc_table,
+ sizeof(chroma_dc_table));
+ memcpy(buf + HUFF_CHROMA_AC_OFF, chroma_ac_table,
+ sizeof(chroma_ac_table));
+
+ jpeg_set_quality(ctx);
+}
diff --git a/drivers/media/platform/verisilicon/hantro_jpeg.h b/drivers/media/platform/verisilicon/hantro_jpeg.h
new file mode 100644
index 0000000000..0b49d0b82c
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_jpeg.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#define JPEG_HEADER_SIZE 624
+#define JPEG_QUANT_SIZE 64
+
+struct hantro_jpeg_ctx {
+ int width;
+ int height;
+ int quality;
+ unsigned char *buffer;
+ unsigned char hw_luma_qtable[JPEG_QUANT_SIZE];
+ unsigned char hw_chroma_qtable[JPEG_QUANT_SIZE];
+};
+
+void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx);
diff --git a/drivers/media/platform/verisilicon/hantro_mpeg2.c b/drivers/media/platform/verisilicon/hantro_mpeg2.c
new file mode 100644
index 0000000000..04e545eb0a
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_mpeg2.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include "hantro.h"
+
+static const u8 zigzag[64] = {
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
+};
+
+void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
+ const struct v4l2_ctrl_mpeg2_quantisation *ctrl)
+{
+ int i, n;
+
+ if (!qtable || !ctrl)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(zigzag); i++) {
+ n = zigzag[i];
+ qtable[n + 0] = ctrl->intra_quantiser_matrix[i];
+ qtable[n + 64] = ctrl->non_intra_quantiser_matrix[i];
+ qtable[n + 128] = ctrl->chroma_intra_quantiser_matrix[i];
+ qtable[n + 192] = ctrl->chroma_non_intra_quantiser_matrix[i];
+ }
+}
+
+int hantro_mpeg2_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ ctx->mpeg2_dec.qtable.size = ARRAY_SIZE(zigzag) * 4;
+ ctx->mpeg2_dec.qtable.cpu =
+ dma_alloc_coherent(vpu->dev,
+ ctx->mpeg2_dec.qtable.size,
+ &ctx->mpeg2_dec.qtable.dma,
+ GFP_KERNEL);
+ if (!ctx->mpeg2_dec.qtable.cpu)
+ return -ENOMEM;
+ return 0;
+}
+
+void hantro_mpeg2_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ dma_free_coherent(vpu->dev,
+ ctx->mpeg2_dec.qtable.size,
+ ctx->mpeg2_dec.qtable.cpu,
+ ctx->mpeg2_dec.qtable.dma);
+}
diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
new file mode 100644
index 0000000000..64d6fb852a
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_postproc.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro G1 post-processor support
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+#include "hantro_g1_regs.h"
+#include "hantro_g2_regs.h"
+#include "hantro_v4l2.h"
+
+#define HANTRO_PP_REG_WRITE(vpu, reg_name, val) \
+{ \
+ hantro_reg_write(vpu, \
+ &hantro_g1_postproc_regs.reg_name, \
+ val); \
+}
+
+#define HANTRO_PP_REG_WRITE_RELAXED(vpu, reg_name, val) \
+{ \
+ hantro_reg_write_relaxed(vpu, \
+ &hantro_g1_postproc_regs.reg_name, \
+ val); \
+}
+
+#define VPU_PP_IN_YUYV 0x0
+#define VPU_PP_IN_NV12 0x1
+#define VPU_PP_IN_YUV420 0x2
+#define VPU_PP_IN_YUV240_TILED 0x5
+#define VPU_PP_OUT_RGB 0x0
+#define VPU_PP_OUT_YUYV 0x3
+
+static const struct hantro_postproc_regs hantro_g1_postproc_regs = {
+ .pipeline_en = {G1_REG_PP_INTERRUPT, 1, 0x1},
+ .max_burst = {G1_REG_PP_DEV_CONFIG, 0, 0x1f},
+ .clk_gate = {G1_REG_PP_DEV_CONFIG, 1, 0x1},
+ .out_swap32 = {G1_REG_PP_DEV_CONFIG, 5, 0x1},
+ .out_endian = {G1_REG_PP_DEV_CONFIG, 6, 0x1},
+ .out_luma_base = {G1_REG_PP_OUT_LUMA_BASE, 0, 0xffffffff},
+ .input_width = {G1_REG_PP_INPUT_SIZE, 0, 0x1ff},
+ .input_height = {G1_REG_PP_INPUT_SIZE, 9, 0x1ff},
+ .output_width = {G1_REG_PP_CONTROL, 4, 0x7ff},
+ .output_height = {G1_REG_PP_CONTROL, 15, 0x7ff},
+ .input_fmt = {G1_REG_PP_CONTROL, 29, 0x7},
+ .output_fmt = {G1_REG_PP_CONTROL, 26, 0x7},
+ .orig_width = {G1_REG_PP_MASK1_ORIG_WIDTH, 23, 0x1ff},
+ .display_width = {G1_REG_PP_DISPLAY_WIDTH, 0, 0xfff},
+};
+
+bool hantro_needs_postproc(const struct hantro_ctx *ctx,
+ const struct hantro_fmt *fmt)
+{
+ if (ctx->is_encoder)
+ return false;
+
+ if (ctx->need_postproc)
+ return true;
+
+ return fmt->postprocessed;
+}
+
+static void hantro_postproc_g1_enable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *dst_buf;
+ u32 src_pp_fmt, dst_pp_fmt;
+ dma_addr_t dst_dma;
+
+ /* Turn on pipeline mode. Must be done first. */
+ HANTRO_PP_REG_WRITE(vpu, pipeline_en, 0x1);
+
+ src_pp_fmt = VPU_PP_IN_NV12;
+
+ switch (ctx->vpu_dst_fmt->fourcc) {
+ case V4L2_PIX_FMT_YUYV:
+ dst_pp_fmt = VPU_PP_OUT_YUYV;
+ break;
+ default:
+ WARN(1, "output format %d not supported by the post-processor, this wasn't expected.",
+ ctx->vpu_dst_fmt->fourcc);
+ dst_pp_fmt = 0;
+ break;
+ }
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+
+ HANTRO_PP_REG_WRITE(vpu, clk_gate, 0x1);
+ HANTRO_PP_REG_WRITE(vpu, out_endian, 0x1);
+ HANTRO_PP_REG_WRITE(vpu, out_swap32, 0x1);
+ HANTRO_PP_REG_WRITE(vpu, max_burst, 16);
+ HANTRO_PP_REG_WRITE(vpu, out_luma_base, dst_dma);
+ HANTRO_PP_REG_WRITE(vpu, input_width, MB_WIDTH(ctx->dst_fmt.width));
+ HANTRO_PP_REG_WRITE(vpu, input_height, MB_HEIGHT(ctx->dst_fmt.height));
+ HANTRO_PP_REG_WRITE(vpu, input_fmt, src_pp_fmt);
+ HANTRO_PP_REG_WRITE(vpu, output_fmt, dst_pp_fmt);
+ HANTRO_PP_REG_WRITE(vpu, output_width, ctx->dst_fmt.width);
+ HANTRO_PP_REG_WRITE(vpu, output_height, ctx->dst_fmt.height);
+ HANTRO_PP_REG_WRITE(vpu, orig_width, MB_WIDTH(ctx->dst_fmt.width));
+ HANTRO_PP_REG_WRITE(vpu, display_width, ctx->dst_fmt.width);
+}
+
+static int down_scale_factor(struct hantro_ctx *ctx)
+{
+ if (ctx->src_fmt.width <= ctx->dst_fmt.width)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(ctx->src_fmt.width, ctx->dst_fmt.width);
+}
+
+static void hantro_postproc_g2_enable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *dst_buf;
+ int down_scale = down_scale_factor(ctx);
+ int out_depth;
+ size_t chroma_offset;
+ dma_addr_t dst_dma;
+
+ dst_buf = hantro_get_dst_buf(ctx);
+ dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ chroma_offset = ctx->dst_fmt.plane_fmt[0].bytesperline *
+ ctx->dst_fmt.height;
+
+ if (down_scale) {
+ hantro_reg_write(vpu, &g2_down_scale_e, 1);
+ hantro_reg_write(vpu, &g2_down_scale_y, down_scale >> 2);
+ hantro_reg_write(vpu, &g2_down_scale_x, down_scale >> 2);
+ hantro_write_addr(vpu, G2_DS_DST, dst_dma);
+ hantro_write_addr(vpu, G2_DS_DST_CHR, dst_dma + (chroma_offset >> down_scale));
+ } else {
+ hantro_write_addr(vpu, G2_RS_OUT_LUMA_ADDR, dst_dma);
+ hantro_write_addr(vpu, G2_RS_OUT_CHROMA_ADDR, dst_dma + chroma_offset);
+ }
+
+ out_depth = hantro_get_format_depth(ctx->dst_fmt.pixelformat);
+ if (ctx->dev->variant->legacy_regs) {
+ u8 pp_shift = 0;
+
+ if (out_depth > 8)
+ pp_shift = 16 - out_depth;
+
+ hantro_reg_write(ctx->dev, &g2_rs_out_bit_depth, out_depth);
+ hantro_reg_write(ctx->dev, &g2_pp_pix_shift, pp_shift);
+ } else {
+ hantro_reg_write(vpu, &g2_output_8_bits, out_depth > 8 ? 0 : 1);
+ hantro_reg_write(vpu, &g2_output_format, out_depth > 8 ? 1 : 0);
+ }
+ hantro_reg_write(vpu, &g2_out_rs_e, 1);
+}
+
+static int hantro_postproc_g2_enum_framesizes(struct hantro_ctx *ctx,
+ struct v4l2_frmsizeenum *fsize)
+{
+ /**
+ * G2 scaler can scale down by 0, 2, 4 or 8
+ * use fsize->index has power of 2 diviser
+ **/
+ if (fsize->index > 3)
+ return -EINVAL;
+
+ if (!ctx->src_fmt.width || !ctx->src_fmt.height)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = ctx->src_fmt.width >> fsize->index;
+ fsize->discrete.height = ctx->src_fmt.height >> fsize->index;
+
+ return 0;
+}
+
+void hantro_postproc_free(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ unsigned int i;
+
+ for (i = 0; i < VB2_MAX_FRAME; ++i) {
+ struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
+
+ if (priv->cpu) {
+ dma_free_attrs(vpu->dev, priv->size, priv->cpu,
+ priv->dma, priv->attrs);
+ priv->cpu = NULL;
+ }
+ }
+}
+
+int hantro_postproc_alloc(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct vb2_queue *cap_queue = &m2m_ctx->cap_q_ctx.q;
+ unsigned int num_buffers = cap_queue->num_buffers;
+ struct v4l2_pix_format_mplane pix_mp;
+ const struct hantro_fmt *fmt;
+ unsigned int i, buf_size;
+
+ /* this should always pick native format */
+ fmt = hantro_get_default_fmt(ctx, false, ctx->bit_depth, HANTRO_AUTO_POSTPROC);
+ if (!fmt)
+ return -EINVAL;
+ v4l2_fill_pixfmt_mp(&pix_mp, fmt->fourcc, ctx->src_fmt.width,
+ ctx->src_fmt.height);
+
+ buf_size = pix_mp.plane_fmt[0].sizeimage;
+ if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE)
+ buf_size += hantro_h264_mv_size(pix_mp.width,
+ pix_mp.height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME)
+ buf_size += hantro_vp9_mv_size(pix_mp.width,
+ pix_mp.height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE)
+ buf_size += hantro_hevc_mv_size(pix_mp.width,
+ pix_mp.height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_AV1_FRAME)
+ buf_size += hantro_av1_mv_size(pix_mp.width,
+ pix_mp.height);
+
+ for (i = 0; i < num_buffers; ++i) {
+ struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
+
+ /*
+ * The buffers on this queue are meant as intermediate
+ * buffers for the decoder, so no mapping is needed.
+ */
+ priv->attrs = DMA_ATTR_NO_KERNEL_MAPPING;
+ priv->cpu = dma_alloc_attrs(vpu->dev, buf_size, &priv->dma,
+ GFP_KERNEL, priv->attrs);
+ if (!priv->cpu)
+ return -ENOMEM;
+ priv->size = buf_size;
+ }
+ return 0;
+}
+
+static void hantro_postproc_g1_disable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ HANTRO_PP_REG_WRITE(vpu, pipeline_en, 0x0);
+}
+
+static void hantro_postproc_g2_disable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ hantro_reg_write(vpu, &g2_out_rs_e, 0);
+}
+
+void hantro_postproc_disable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ if (vpu->variant->postproc_ops && vpu->variant->postproc_ops->disable)
+ vpu->variant->postproc_ops->disable(ctx);
+}
+
+void hantro_postproc_enable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ if (vpu->variant->postproc_ops && vpu->variant->postproc_ops->enable)
+ vpu->variant->postproc_ops->enable(ctx);
+}
+
+int hanto_postproc_enum_framesizes(struct hantro_ctx *ctx,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ if (vpu->variant->postproc_ops && vpu->variant->postproc_ops->enum_framesizes)
+ return vpu->variant->postproc_ops->enum_framesizes(ctx, fsize);
+
+ return -EINVAL;
+}
+
+const struct hantro_postproc_ops hantro_g1_postproc_ops = {
+ .enable = hantro_postproc_g1_enable,
+ .disable = hantro_postproc_g1_disable,
+};
+
+const struct hantro_postproc_ops hantro_g2_postproc_ops = {
+ .enable = hantro_postproc_g2_enable,
+ .disable = hantro_postproc_g2_disable,
+ .enum_framesizes = hantro_postproc_g2_enum_framesizes,
+};
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
new file mode 100644
index 0000000000..db145519fc
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
@@ -0,0 +1,1022 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Collabora, Ltd.
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <Alpha.Lin@rock-chips.com>
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2010-2011 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+#include "hantro_v4l2.h"
+
+#define HANTRO_DEFAULT_BIT_DEPTH 8
+
+static int hantro_set_fmt_out(struct hantro_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp,
+ bool need_postproc);
+static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp);
+
+static const struct hantro_fmt *
+hantro_get_formats(const struct hantro_ctx *ctx, unsigned int *num_fmts, bool need_postproc)
+{
+ const struct hantro_fmt *formats;
+
+ if (need_postproc) {
+ *num_fmts = 0;
+ return NULL;
+ }
+
+ if (ctx->is_encoder) {
+ formats = ctx->dev->variant->enc_fmts;
+ *num_fmts = ctx->dev->variant->num_enc_fmts;
+ } else {
+ formats = ctx->dev->variant->dec_fmts;
+ *num_fmts = ctx->dev->variant->num_dec_fmts;
+ }
+
+ return formats;
+}
+
+static const struct hantro_fmt *
+hantro_get_postproc_formats(const struct hantro_ctx *ctx,
+ unsigned int *num_fmts)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ if (ctx->is_encoder || !vpu->variant->postproc_fmts) {
+ *num_fmts = 0;
+ return NULL;
+ }
+
+ *num_fmts = ctx->dev->variant->num_postproc_fmts;
+ return ctx->dev->variant->postproc_fmts;
+}
+
+int hantro_get_format_depth(u32 fourcc)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_P010:
+ case V4L2_PIX_FMT_P010_4L4:
+ case V4L2_PIX_FMT_NV15_4L4:
+ return 10;
+ default:
+ return 8;
+ }
+}
+
+static bool
+hantro_check_depth_match(const struct hantro_fmt *fmt, int bit_depth)
+{
+ int fmt_depth;
+
+ if (!fmt->match_depth && !fmt->postprocessed)
+ return true;
+
+ /* 0 means default depth, which is 8 */
+ if (!bit_depth)
+ bit_depth = HANTRO_DEFAULT_BIT_DEPTH;
+
+ fmt_depth = hantro_get_format_depth(fmt->fourcc);
+
+ /*
+ * Allow only downconversion for postproc formats for now.
+ * It may be possible to relax that on some HW.
+ */
+ if (!fmt->match_depth)
+ return fmt_depth <= bit_depth;
+
+ return fmt_depth == bit_depth;
+}
+
+static const struct hantro_fmt *
+hantro_find_format(const struct hantro_ctx *ctx, u32 fourcc)
+{
+ const struct hantro_fmt *formats;
+ unsigned int i, num_fmts;
+
+ formats = hantro_get_formats(ctx, &num_fmts, HANTRO_AUTO_POSTPROC);
+ for (i = 0; i < num_fmts; i++)
+ if (formats[i].fourcc == fourcc)
+ return &formats[i];
+
+ formats = hantro_get_postproc_formats(ctx, &num_fmts);
+ for (i = 0; i < num_fmts; i++)
+ if (formats[i].fourcc == fourcc)
+ return &formats[i];
+ return NULL;
+}
+
+const struct hantro_fmt *
+hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream,
+ int bit_depth, bool need_postproc)
+{
+ const struct hantro_fmt *formats;
+ unsigned int i, num_fmts;
+
+ formats = hantro_get_formats(ctx, &num_fmts, need_postproc);
+ for (i = 0; i < num_fmts; i++) {
+ if (bitstream == (formats[i].codec_mode !=
+ HANTRO_MODE_NONE) &&
+ hantro_check_depth_match(&formats[i], bit_depth))
+ return &formats[i];
+ }
+
+ formats = hantro_get_postproc_formats(ctx, &num_fmts);
+ for (i = 0; i < num_fmts; i++) {
+ if (bitstream == (formats[i].codec_mode !=
+ HANTRO_MODE_NONE) &&
+ hantro_check_depth_match(&formats[i], bit_depth))
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct hantro_dev *vpu = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ strscpy(cap->driver, vpu->dev->driver->name, sizeof(cap->driver));
+ strscpy(cap->card, vdev->name, sizeof(cap->card));
+ return 0;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ const struct hantro_fmt *fmt;
+
+ fmt = hantro_find_format(ctx, fsize->pixel_format);
+ if (!fmt) {
+ vpu_debug(0, "unsupported bitstream format (%08x)\n",
+ fsize->pixel_format);
+ return -EINVAL;
+ }
+
+ /* For non-coded formats check if postprocessing scaling is possible */
+ if (fmt->codec_mode == HANTRO_MODE_NONE) {
+ if (hantro_needs_postproc(ctx, fmt))
+ return hanto_postproc_enum_framesizes(ctx, fsize);
+ else
+ return -ENOTTY;
+ } else if (fsize->index != 0) {
+ vpu_debug(0, "invalid frame size index (expected 0, got %d)\n",
+ fsize->index);
+ return -EINVAL;
+ }
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = fmt->frmsize;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f, bool capture)
+
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ const struct hantro_fmt *fmt, *formats;
+ unsigned int num_fmts, i, j = 0;
+ bool skip_mode_none;
+
+ /*
+ * When dealing with an encoder:
+ * - on the capture side we want to filter out all MODE_NONE formats.
+ * - on the output side we want to filter out all formats that are
+ * not MODE_NONE.
+ * When dealing with a decoder:
+ * - on the capture side we want to filter out all formats that are
+ * not MODE_NONE.
+ * - on the output side we want to filter out all MODE_NONE formats.
+ */
+ skip_mode_none = capture == ctx->is_encoder;
+
+ formats = hantro_get_formats(ctx, &num_fmts, HANTRO_AUTO_POSTPROC);
+ for (i = 0; i < num_fmts; i++) {
+ bool mode_none = formats[i].codec_mode == HANTRO_MODE_NONE;
+ fmt = &formats[i];
+
+ if (skip_mode_none == mode_none)
+ continue;
+ if (!hantro_check_depth_match(fmt, ctx->bit_depth))
+ continue;
+ if (j == f->index) {
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+ ++j;
+ }
+
+ /*
+ * Enumerate post-processed formats. As per the specification,
+ * we enumerated these formats after natively decoded formats
+ * as a hint for applications on what's the preferred fomat.
+ */
+ if (!capture)
+ return -EINVAL;
+ formats = hantro_get_postproc_formats(ctx, &num_fmts);
+ for (i = 0; i < num_fmts; i++) {
+ fmt = &formats[i];
+
+ if (!hantro_check_depth_match(fmt, ctx->bit_depth))
+ continue;
+ if (j == f->index) {
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+ ++j;
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(file, priv, f, true);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(file, priv, f, false);
+}
+
+static int vidioc_g_fmt_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+
+ vpu_debug(4, "f->type = %d\n", f->type);
+
+ *pix_mp = ctx->src_fmt;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+
+ vpu_debug(4, "f->type = %d\n", f->type);
+
+ *pix_mp = ctx->dst_fmt;
+
+ return 0;
+}
+
+static int hantro_try_fmt(const struct hantro_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp,
+ enum v4l2_buf_type type)
+{
+ const struct hantro_fmt *fmt;
+ const struct hantro_fmt *vpu_fmt;
+ bool capture = V4L2_TYPE_IS_CAPTURE(type);
+ bool coded;
+
+ coded = capture == ctx->is_encoder;
+
+ vpu_debug(4, "trying format %c%c%c%c\n",
+ (pix_mp->pixelformat & 0x7f),
+ (pix_mp->pixelformat >> 8) & 0x7f,
+ (pix_mp->pixelformat >> 16) & 0x7f,
+ (pix_mp->pixelformat >> 24) & 0x7f);
+
+ fmt = hantro_find_format(ctx, pix_mp->pixelformat);
+ if (!fmt) {
+ fmt = hantro_get_default_fmt(ctx, coded, HANTRO_DEFAULT_BIT_DEPTH, HANTRO_AUTO_POSTPROC);
+ pix_mp->pixelformat = fmt->fourcc;
+ }
+
+ if (coded) {
+ pix_mp->num_planes = 1;
+ vpu_fmt = fmt;
+ } else if (ctx->is_encoder) {
+ vpu_fmt = hantro_find_format(ctx, ctx->dst_fmt.pixelformat);
+ } else {
+ /*
+ * Width/height on the CAPTURE end of a decoder are ignored and
+ * replaced by the OUTPUT ones.
+ */
+ pix_mp->width = ctx->src_fmt.width;
+ pix_mp->height = ctx->src_fmt.height;
+ vpu_fmt = fmt;
+ }
+
+ pix_mp->field = V4L2_FIELD_NONE;
+
+ v4l2_apply_frmsize_constraints(&pix_mp->width, &pix_mp->height,
+ &vpu_fmt->frmsize);
+
+ if (!coded) {
+ /* Fill remaining fields */
+ v4l2_fill_pixfmt_mp(pix_mp, fmt->fourcc, pix_mp->width,
+ pix_mp->height);
+ if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE &&
+ !hantro_needs_postproc(ctx, fmt))
+ pix_mp->plane_fmt[0].sizeimage +=
+ hantro_h264_mv_size(pix_mp->width,
+ pix_mp->height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME &&
+ !hantro_needs_postproc(ctx, fmt))
+ pix_mp->plane_fmt[0].sizeimage +=
+ hantro_vp9_mv_size(pix_mp->width,
+ pix_mp->height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE &&
+ !hantro_needs_postproc(ctx, fmt))
+ pix_mp->plane_fmt[0].sizeimage +=
+ hantro_hevc_mv_size(pix_mp->width,
+ pix_mp->height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_AV1_FRAME &&
+ !hantro_needs_postproc(ctx, fmt))
+ pix_mp->plane_fmt[0].sizeimage +=
+ hantro_av1_mv_size(pix_mp->width,
+ pix_mp->height);
+ } else if (!pix_mp->plane_fmt[0].sizeimage) {
+ /*
+ * For coded formats the application can specify
+ * sizeimage. If the application passes a zero sizeimage,
+ * let's default to the maximum frame size.
+ */
+ pix_mp->plane_fmt[0].sizeimage = fmt->header_size +
+ pix_mp->width * pix_mp->height * fmt->max_depth;
+ }
+
+ return 0;
+}
+
+static int vidioc_try_fmt_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return hantro_try_fmt(fh_to_ctx(priv), &f->fmt.pix_mp, f->type);
+}
+
+static int vidioc_try_fmt_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return hantro_try_fmt(fh_to_ctx(priv), &f->fmt.pix_mp, f->type);
+}
+
+static void
+hantro_reset_fmt(struct v4l2_pix_format_mplane *fmt,
+ const struct hantro_fmt *vpu_fmt)
+{
+ memset(fmt, 0, sizeof(*fmt));
+
+ fmt->pixelformat = vpu_fmt->fourcc;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_JPEG;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+ fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static void
+hantro_reset_encoded_fmt(struct hantro_ctx *ctx)
+{
+ const struct hantro_fmt *vpu_fmt;
+ struct v4l2_pix_format_mplane fmt;
+
+ vpu_fmt = hantro_get_default_fmt(ctx, true, HANTRO_DEFAULT_BIT_DEPTH, HANTRO_AUTO_POSTPROC);
+ if (!vpu_fmt)
+ return;
+
+ hantro_reset_fmt(&fmt, vpu_fmt);
+ fmt.width = vpu_fmt->frmsize.min_width;
+ fmt.height = vpu_fmt->frmsize.min_height;
+ if (ctx->is_encoder)
+ hantro_set_fmt_cap(ctx, &fmt);
+ else
+ hantro_set_fmt_out(ctx, &fmt, HANTRO_AUTO_POSTPROC);
+}
+
+int
+hantro_reset_raw_fmt(struct hantro_ctx *ctx, int bit_depth, bool need_postproc)
+{
+ const struct hantro_fmt *raw_vpu_fmt;
+ struct v4l2_pix_format_mplane raw_fmt, *encoded_fmt;
+ int ret;
+
+ raw_vpu_fmt = hantro_get_default_fmt(ctx, false, bit_depth, need_postproc);
+ if (!raw_vpu_fmt)
+ return -EINVAL;
+
+ if (ctx->is_encoder) {
+ encoded_fmt = &ctx->dst_fmt;
+ ctx->vpu_src_fmt = raw_vpu_fmt;
+ } else {
+ encoded_fmt = &ctx->src_fmt;
+ }
+
+ hantro_reset_fmt(&raw_fmt, raw_vpu_fmt);
+ raw_fmt.width = encoded_fmt->width;
+ raw_fmt.height = encoded_fmt->height;
+ if (ctx->is_encoder)
+ ret = hantro_set_fmt_out(ctx, &raw_fmt, need_postproc);
+ else
+ ret = hantro_set_fmt_cap(ctx, &raw_fmt);
+
+ if (!ret) {
+ ctx->bit_depth = bit_depth;
+ ctx->need_postproc = need_postproc;
+ }
+
+ return ret;
+}
+
+void hantro_reset_fmts(struct hantro_ctx *ctx)
+{
+ hantro_reset_encoded_fmt(ctx);
+ hantro_reset_raw_fmt(ctx, HANTRO_DEFAULT_BIT_DEPTH, HANTRO_AUTO_POSTPROC);
+}
+
+static void
+hantro_update_requires_request(struct hantro_ctx *ctx, u32 fourcc)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_JPEG:
+ ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = false;
+ break;
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ case V4L2_PIX_FMT_VP8_FRAME:
+ case V4L2_PIX_FMT_H264_SLICE:
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ case V4L2_PIX_FMT_VP9_FRAME:
+ ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+hantro_update_requires_hold_capture_buf(struct hantro_ctx *ctx, u32 fourcc)
+{
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ switch (fourcc) {
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ case V4L2_PIX_FMT_VP8_FRAME:
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ case V4L2_PIX_FMT_VP9_FRAME:
+ vq->subsystem_flags &= ~(VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
+ break;
+ case V4L2_PIX_FMT_H264_SLICE:
+ vq->subsystem_flags |= VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
+ break;
+ default:
+ break;
+ }
+}
+
+static int hantro_set_fmt_out(struct hantro_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp,
+ bool need_postproc)
+{
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ ret = hantro_try_fmt(ctx, pix_mp, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ if (!ctx->is_encoder) {
+ struct vb2_queue *peer_vq;
+
+ /*
+ * In order to support dynamic resolution change,
+ * the decoder admits a resolution change, as long
+ * as the pixelformat remains. Can't be done if streaming.
+ */
+ if (vb2_is_streaming(vq) || (vb2_is_busy(vq) &&
+ pix_mp->pixelformat != ctx->src_fmt.pixelformat))
+ return -EBUSY;
+ /*
+ * Since format change on the OUTPUT queue will reset
+ * the CAPTURE queue, we can't allow doing so
+ * when the CAPTURE queue has buffers allocated.
+ */
+ peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (vb2_is_busy(peer_vq))
+ return -EBUSY;
+ } else {
+ /*
+ * The encoder doesn't admit a format change if
+ * there are OUTPUT buffers allocated.
+ */
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+ }
+
+ ctx->vpu_src_fmt = hantro_find_format(ctx, pix_mp->pixelformat);
+ ctx->src_fmt = *pix_mp;
+
+ /*
+ * Current raw format might have become invalid with newly
+ * selected codec, so reset it to default just to be safe and
+ * keep internal driver state sane. User is mandated to set
+ * the raw format again after we return, so we don't need
+ * anything smarter.
+ * Note that hantro_reset_raw_fmt() also propagates size
+ * changes to the raw format.
+ */
+ if (!ctx->is_encoder)
+ hantro_reset_raw_fmt(ctx,
+ hantro_get_format_depth(pix_mp->pixelformat),
+ need_postproc);
+
+ /* Colorimetry information are always propagated. */
+ ctx->dst_fmt.colorspace = pix_mp->colorspace;
+ ctx->dst_fmt.ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->dst_fmt.xfer_func = pix_mp->xfer_func;
+ ctx->dst_fmt.quantization = pix_mp->quantization;
+
+ hantro_update_requires_request(ctx, pix_mp->pixelformat);
+ hantro_update_requires_hold_capture_buf(ctx, pix_mp->pixelformat);
+
+ vpu_debug(0, "OUTPUT codec mode: %d\n", ctx->vpu_src_fmt->codec_mode);
+ vpu_debug(0, "fmt - w: %d, h: %d\n",
+ pix_mp->width, pix_mp->height);
+ return 0;
+}
+
+static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp)
+{
+ struct vb2_queue *vq;
+ int ret;
+
+ /* Change not allowed if queue is busy. */
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ if (ctx->is_encoder) {
+ struct vb2_queue *peer_vq;
+
+ /*
+ * Since format change on the CAPTURE queue will reset
+ * the OUTPUT queue, we can't allow doing so
+ * when the OUTPUT queue has buffers allocated.
+ */
+ peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (vb2_is_busy(peer_vq) &&
+ (pix_mp->pixelformat != ctx->dst_fmt.pixelformat ||
+ pix_mp->height != ctx->dst_fmt.height ||
+ pix_mp->width != ctx->dst_fmt.width))
+ return -EBUSY;
+ }
+
+ ret = hantro_try_fmt(ctx, pix_mp, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ ctx->vpu_dst_fmt = hantro_find_format(ctx, pix_mp->pixelformat);
+ ctx->dst_fmt = *pix_mp;
+
+ /*
+ * Current raw format might have become invalid with newly
+ * selected codec, so reset it to default just to be safe and
+ * keep internal driver state sane. User is mandated to set
+ * the raw format again after we return, so we don't need
+ * anything smarter.
+ * Note that hantro_reset_raw_fmt() also propagates size
+ * changes to the raw format.
+ */
+ if (ctx->is_encoder)
+ hantro_reset_raw_fmt(ctx, HANTRO_DEFAULT_BIT_DEPTH, HANTRO_AUTO_POSTPROC);
+
+ /* Colorimetry information are always propagated. */
+ ctx->src_fmt.colorspace = pix_mp->colorspace;
+ ctx->src_fmt.ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->src_fmt.xfer_func = pix_mp->xfer_func;
+ ctx->src_fmt.quantization = pix_mp->quantization;
+
+ vpu_debug(0, "CAPTURE codec mode: %d\n", ctx->vpu_dst_fmt->codec_mode);
+ vpu_debug(0, "fmt - w: %d, h: %d\n",
+ pix_mp->width, pix_mp->height);
+
+ hantro_update_requires_request(ctx, pix_mp->pixelformat);
+
+ return 0;
+}
+
+static int
+vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
+{
+ return hantro_set_fmt_out(fh_to_ctx(priv), &f->fmt.pix_mp, HANTRO_AUTO_POSTPROC);
+}
+
+static int
+vidioc_s_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f)
+{
+ return hantro_set_fmt_cap(fh_to_ctx(priv), &f->fmt.pix_mp);
+}
+
+static int vidioc_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+
+ /* Crop only supported on source. */
+ if (!ctx->is_encoder ||
+ sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = ctx->src_fmt.width;
+ sel->r.height = ctx->src_fmt.height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = ctx->dst_fmt.width;
+ sel->r.height = ctx->dst_fmt.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_rect *rect = &sel->r;
+ struct vb2_queue *vq;
+
+ /* Crop only supported on source. */
+ if (!ctx->is_encoder ||
+ sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ /* Change not allowed if the queue is streaming. */
+ vq = v4l2_m2m_get_src_vq(ctx->fh.m2m_ctx);
+ if (vb2_is_streaming(vq))
+ return -EBUSY;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ /*
+ * We do not support offsets, and we can crop only inside
+ * right-most or bottom-most macroblocks.
+ */
+ if (rect->left != 0 || rect->top != 0 ||
+ round_up(rect->width, MB_DIM) != ctx->src_fmt.width ||
+ round_up(rect->height, MB_DIM) != ctx->src_fmt.height) {
+ /* Default to full frame for incorrect settings. */
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = ctx->src_fmt.width;
+ rect->height = ctx->src_fmt.height;
+ } else {
+ /* We support widths aligned to 4 pixels and arbitrary heights. */
+ rect->width = round_up(rect->width, 4);
+ }
+
+ ctx->dst_fmt.width = rect->width;
+ ctx->dst_fmt.height = rect->height;
+
+ return 0;
+}
+
+static const struct v4l2_event hantro_eos_event = {
+ .type = V4L2_EVENT_EOS
+};
+
+static int vidioc_encoder_cmd(struct file *file, void *priv,
+ struct v4l2_encoder_cmd *ec)
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+
+ ret = v4l2_m2m_ioctl_try_encoder_cmd(file, priv, ec);
+ if (ret < 0)
+ return ret;
+
+ if (!vb2_is_streaming(v4l2_m2m_get_src_vq(ctx->fh.m2m_ctx)) ||
+ !vb2_is_streaming(v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx)))
+ return 0;
+
+ ret = v4l2_m2m_ioctl_encoder_cmd(file, priv, ec);
+ if (ret < 0)
+ return ret;
+
+ if (ec->cmd == V4L2_ENC_CMD_STOP &&
+ v4l2_m2m_has_stopped(ctx->fh.m2m_ctx))
+ v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
+
+ if (ec->cmd == V4L2_ENC_CMD_START)
+ vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q);
+
+ return 0;
+}
+
+const struct v4l2_ioctl_ops hantro_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
+
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_cap_mplane,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_out_mplane,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_out_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_cap_mplane,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_out_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_cap_mplane,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_g_selection = vidioc_g_selection,
+ .vidioc_s_selection = vidioc_s_selection,
+
+ .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd,
+
+ .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
+ .vidioc_encoder_cmd = vidioc_encoder_cmd,
+};
+
+static int
+hantro_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format_mplane *pixfmt;
+ int i;
+
+ switch (vq->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ pixfmt = &ctx->dst_fmt;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ pixfmt = &ctx->src_fmt;
+ break;
+ default:
+ vpu_err("invalid queue type: %d\n", vq->type);
+ return -EINVAL;
+ }
+
+ if (*num_planes) {
+ if (*num_planes != pixfmt->num_planes)
+ return -EINVAL;
+ for (i = 0; i < pixfmt->num_planes; ++i)
+ if (sizes[i] < pixfmt->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ return 0;
+ }
+
+ *num_planes = pixfmt->num_planes;
+ for (i = 0; i < pixfmt->num_planes; ++i)
+ sizes[i] = pixfmt->plane_fmt[i].sizeimage;
+ return 0;
+}
+
+static int
+hantro_buf_plane_check(struct vb2_buffer *vb,
+ struct v4l2_pix_format_mplane *pixfmt)
+{
+ unsigned int sz;
+ int i;
+
+ for (i = 0; i < pixfmt->num_planes; ++i) {
+ sz = pixfmt->plane_fmt[i].sizeimage;
+ vpu_debug(4, "plane %d size: %ld, sizeimage: %u\n",
+ i, vb2_plane_size(vb, i), sz);
+ if (vb2_plane_size(vb, i) < sz) {
+ vpu_err("plane %d is too small for output\n", i);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int hantro_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format_mplane *pix_fmt;
+ int ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+ ret = hantro_buf_plane_check(vb, pix_fmt);
+ if (ret)
+ return ret;
+ /*
+ * Buffer's bytesused must be written by driver for CAPTURE buffers.
+ * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
+ * it to buffer length).
+ */
+ if (V4L2_TYPE_IS_CAPTURE(vq->type)) {
+ if (ctx->is_encoder)
+ vb2_set_plane_payload(vb, 0, 0);
+ else
+ vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
+ }
+
+ return 0;
+}
+
+static void hantro_buf_queue(struct vb2_buffer *vb)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type) &&
+ vb2_is_streaming(vb->vb2_queue) &&
+ v4l2_m2m_dst_buf_is_last(ctx->fh.m2m_ctx)) {
+ unsigned int i;
+
+ for (i = 0; i < vb->num_planes; i++)
+ vb2_set_plane_payload(vb, i, 0);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ vbuf->sequence = ctx->sequence_cap++;
+
+ v4l2_m2m_last_buffer_done(ctx->fh.m2m_ctx, vbuf);
+ v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
+ return;
+ }
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static bool hantro_vq_is_coded(struct vb2_queue *q)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+
+ return ctx->is_encoder != V4L2_TYPE_IS_OUTPUT(q->type);
+}
+
+static int hantro_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+ int ret = 0;
+
+ v4l2_m2m_update_start_streaming_state(ctx->fh.m2m_ctx, q);
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ ctx->sequence_out = 0;
+ else
+ ctx->sequence_cap = 0;
+
+ if (hantro_vq_is_coded(q)) {
+ enum hantro_codec_mode codec_mode;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ codec_mode = ctx->vpu_src_fmt->codec_mode;
+ else
+ codec_mode = ctx->vpu_dst_fmt->codec_mode;
+
+ vpu_debug(4, "Codec mode = %d\n", codec_mode);
+ ctx->codec_ops = &ctx->dev->variant->codec_ops[codec_mode];
+ if (ctx->codec_ops->init) {
+ ret = ctx->codec_ops->init(ctx);
+ if (ret)
+ return ret;
+ }
+
+ if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt)) {
+ ret = hantro_postproc_alloc(ctx);
+ if (ret)
+ goto err_codec_exit;
+ }
+ }
+ return ret;
+
+err_codec_exit:
+ if (ctx->codec_ops->exit)
+ ctx->codec_ops->exit(ctx);
+ return ret;
+}
+
+static void
+hantro_return_bufs(struct vb2_queue *q,
+ struct vb2_v4l2_buffer *(*buf_remove)(struct v4l2_m2m_ctx *))
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+
+ for (;;) {
+ struct vb2_v4l2_buffer *vbuf;
+
+ vbuf = buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf)
+ break;
+ v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static void hantro_stop_streaming(struct vb2_queue *q)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+
+ if (hantro_vq_is_coded(q)) {
+ hantro_postproc_free(ctx);
+ if (ctx->codec_ops && ctx->codec_ops->exit)
+ ctx->codec_ops->exit(ctx);
+ }
+
+ /*
+ * The mem2mem framework calls v4l2_m2m_cancel_job before
+ * .stop_streaming, so there isn't any job running and
+ * it is safe to return all the buffers.
+ */
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ hantro_return_bufs(q, v4l2_m2m_src_buf_remove);
+ else
+ hantro_return_bufs(q, v4l2_m2m_dst_buf_remove);
+
+ v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type) &&
+ v4l2_m2m_has_stopped(ctx->fh.m2m_ctx))
+ v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
+}
+
+static void hantro_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->ctrl_handler);
+}
+
+static int hantro_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+const struct vb2_ops hantro_queue_ops = {
+ .queue_setup = hantro_queue_setup,
+ .buf_prepare = hantro_buf_prepare,
+ .buf_queue = hantro_buf_queue,
+ .buf_out_validate = hantro_buf_out_validate,
+ .buf_request_complete = hantro_buf_request_complete,
+ .start_streaming = hantro_start_streaming,
+ .stop_streaming = hantro_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.h b/drivers/media/platform/verisilicon/hantro_v4l2.h
new file mode 100644
index 0000000000..fca7e3a690
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <Alpha.Lin@rock-chips.com>
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef HANTRO_V4L2_H_
+#define HANTRO_V4L2_H_
+
+#include "hantro.h"
+
+#define HANTRO_FORCE_POSTPROC true
+#define HANTRO_AUTO_POSTPROC false
+
+extern const struct v4l2_ioctl_ops hantro_ioctl_ops;
+extern const struct vb2_ops hantro_queue_ops;
+
+int hantro_reset_raw_fmt(struct hantro_ctx *ctx, int bit_depth, bool need_postproc);
+void hantro_reset_fmts(struct hantro_ctx *ctx);
+int hantro_get_format_depth(u32 fourcc);
+const struct hantro_fmt *
+hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream,
+ int bit_depth, bool need_postproc);
+
+#endif /* HANTRO_V4L2_H_ */
diff --git a/drivers/media/platform/verisilicon/hantro_vp8.c b/drivers/media/platform/verisilicon/hantro_vp8.c
new file mode 100644
index 0000000000..381bc1d3bf
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_vp8.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include "hantro.h"
+
+/*
+ * probs table with packed
+ */
+struct vp8_prob_tbl_packed {
+ u8 prob_mb_skip_false;
+ u8 prob_intra;
+ u8 prob_ref_last;
+ u8 prob_ref_golden;
+ u8 prob_segment[3];
+ u8 padding0;
+
+ u8 prob_luma_16x16_pred_mode[4];
+ u8 prob_chroma_pred_mode[3];
+ u8 padding1;
+
+ /* mv prob */
+ u8 prob_mv_context[2][V4L2_VP8_MV_PROB_CNT];
+ u8 padding2[2];
+
+ /* coeff probs */
+ u8 prob_coeffs[4][8][3][V4L2_VP8_COEFF_PROB_CNT];
+ u8 padding3[96];
+};
+
+/*
+ * filter taps taken to 7-bit precision,
+ * reference RFC6386#Page-16, filters[8][6]
+ */
+const u32 hantro_vp8_dec_mc_filter[8][6] = {
+ { 0, 0, 128, 0, 0, 0 },
+ { 0, -6, 123, 12, -1, 0 },
+ { 2, -11, 108, 36, -8, 1 },
+ { 0, -9, 93, 50, -6, 0 },
+ { 3, -16, 77, 77, -16, 3 },
+ { 0, -6, 50, 93, -9, 0 },
+ { 1, -8, 36, 108, -11, 2 },
+ { 0, -1, 12, 123, -6, 0 }
+};
+
+void hantro_vp8_prob_update(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ const struct v4l2_vp8_entropy *entropy = &hdr->entropy;
+ u32 i, j, k;
+ u8 *dst;
+
+ /* first probs */
+ dst = ctx->vp8_dec.prob_tbl.cpu;
+
+ dst[0] = hdr->prob_skip_false;
+ dst[1] = hdr->prob_intra;
+ dst[2] = hdr->prob_last;
+ dst[3] = hdr->prob_gf;
+ dst[4] = hdr->segment.segment_probs[0];
+ dst[5] = hdr->segment.segment_probs[1];
+ dst[6] = hdr->segment.segment_probs[2];
+ dst[7] = 0;
+
+ dst += 8;
+ dst[0] = entropy->y_mode_probs[0];
+ dst[1] = entropy->y_mode_probs[1];
+ dst[2] = entropy->y_mode_probs[2];
+ dst[3] = entropy->y_mode_probs[3];
+ dst[4] = entropy->uv_mode_probs[0];
+ dst[5] = entropy->uv_mode_probs[1];
+ dst[6] = entropy->uv_mode_probs[2];
+ dst[7] = 0; /*unused */
+
+ /* mv probs */
+ dst += 8;
+ dst[0] = entropy->mv_probs[0][0]; /* is short */
+ dst[1] = entropy->mv_probs[1][0];
+ dst[2] = entropy->mv_probs[0][1]; /* sign */
+ dst[3] = entropy->mv_probs[1][1];
+ dst[4] = entropy->mv_probs[0][8 + 9];
+ dst[5] = entropy->mv_probs[0][9 + 9];
+ dst[6] = entropy->mv_probs[1][8 + 9];
+ dst[7] = entropy->mv_probs[1][9 + 9];
+ dst += 8;
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < 8; j += 4) {
+ dst[0] = entropy->mv_probs[i][j + 9 + 0];
+ dst[1] = entropy->mv_probs[i][j + 9 + 1];
+ dst[2] = entropy->mv_probs[i][j + 9 + 2];
+ dst[3] = entropy->mv_probs[i][j + 9 + 3];
+ dst += 4;
+ }
+ }
+ for (i = 0; i < 2; ++i) {
+ dst[0] = entropy->mv_probs[i][0 + 2];
+ dst[1] = entropy->mv_probs[i][1 + 2];
+ dst[2] = entropy->mv_probs[i][2 + 2];
+ dst[3] = entropy->mv_probs[i][3 + 2];
+ dst[4] = entropy->mv_probs[i][4 + 2];
+ dst[5] = entropy->mv_probs[i][5 + 2];
+ dst[6] = entropy->mv_probs[i][6 + 2];
+ dst[7] = 0; /*unused */
+ dst += 8;
+ }
+
+ /* coeff probs (header part) */
+ dst = ctx->vp8_dec.prob_tbl.cpu;
+ dst += (8 * 7);
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 8; ++j) {
+ for (k = 0; k < 3; ++k) {
+ dst[0] = entropy->coeff_probs[i][j][k][0];
+ dst[1] = entropy->coeff_probs[i][j][k][1];
+ dst[2] = entropy->coeff_probs[i][j][k][2];
+ dst[3] = entropy->coeff_probs[i][j][k][3];
+ dst += 4;
+ }
+ }
+ }
+
+ /* coeff probs (footer part) */
+ dst = ctx->vp8_dec.prob_tbl.cpu;
+ dst += (8 * 55);
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 8; ++j) {
+ for (k = 0; k < 3; ++k) {
+ dst[0] = entropy->coeff_probs[i][j][k][4];
+ dst[1] = entropy->coeff_probs[i][j][k][5];
+ dst[2] = entropy->coeff_probs[i][j][k][6];
+ dst[3] = entropy->coeff_probs[i][j][k][7];
+ dst[4] = entropy->coeff_probs[i][j][k][8];
+ dst[5] = entropy->coeff_probs[i][j][k][9];
+ dst[6] = entropy->coeff_probs[i][j][k][10];
+ dst[7] = 0; /*unused */
+ dst += 8;
+ }
+ }
+ }
+}
+
+int hantro_vp8_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_aux_buf *aux_buf;
+ unsigned int mb_width, mb_height;
+ size_t segment_map_size;
+ int ret;
+
+ /* segment map table size calculation */
+ mb_width = DIV_ROUND_UP(ctx->dst_fmt.width, 16);
+ mb_height = DIV_ROUND_UP(ctx->dst_fmt.height, 16);
+ segment_map_size = round_up(DIV_ROUND_UP(mb_width * mb_height, 4), 64);
+
+ /*
+ * In context init the dma buffer for segment map must be allocated.
+ * And the data in segment map buffer must be set to all zero.
+ */
+ aux_buf = &ctx->vp8_dec.segment_map;
+ aux_buf->size = segment_map_size;
+ aux_buf->cpu = dma_alloc_coherent(vpu->dev, aux_buf->size,
+ &aux_buf->dma, GFP_KERNEL);
+ if (!aux_buf->cpu)
+ return -ENOMEM;
+
+ /*
+ * Allocate probability table buffer,
+ * total 1208 bytes, 4K page is far enough.
+ */
+ aux_buf = &ctx->vp8_dec.prob_tbl;
+ aux_buf->size = sizeof(struct vp8_prob_tbl_packed);
+ aux_buf->cpu = dma_alloc_coherent(vpu->dev, aux_buf->size,
+ &aux_buf->dma, GFP_KERNEL);
+ if (!aux_buf->cpu) {
+ ret = -ENOMEM;
+ goto err_free_seg_map;
+ }
+
+ return 0;
+
+err_free_seg_map:
+ dma_free_coherent(vpu->dev, ctx->vp8_dec.segment_map.size,
+ ctx->vp8_dec.segment_map.cpu,
+ ctx->vp8_dec.segment_map.dma);
+
+ return ret;
+}
+
+void hantro_vp8_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_vp8_dec_hw_ctx *vp8_dec = &ctx->vp8_dec;
+ struct hantro_dev *vpu = ctx->dev;
+
+ dma_free_coherent(vpu->dev, vp8_dec->segment_map.size,
+ vp8_dec->segment_map.cpu, vp8_dec->segment_map.dma);
+ dma_free_coherent(vpu->dev, vp8_dec->prob_tbl.size,
+ vp8_dec->prob_tbl.cpu, vp8_dec->prob_tbl.dma);
+}
diff --git a/drivers/media/platform/verisilicon/hantro_vp9.c b/drivers/media/platform/verisilicon/hantro_vp9.c
new file mode 100644
index 0000000000..566cd376c0
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_vp9.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VP9 codec driver
+ *
+ * Copyright (C) 2021 Collabora Ltd.
+ */
+
+#include <linux/types.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+#include "hantro_vp9.h"
+
+#define POW2(x) (1 << (x))
+
+#define MAX_LOG2_TILE_COLUMNS 6
+#define MAX_NUM_TILE_COLS POW2(MAX_LOG2_TILE_COLUMNS)
+#define MAX_TILE_COLS 20
+#define MAX_TILE_ROWS 22
+
+static size_t hantro_vp9_tile_filter_size(unsigned int height)
+{
+ u32 h, height32, size;
+
+ h = roundup(height, 8);
+
+ height32 = roundup(h, 64);
+ size = 24 * height32 * (MAX_NUM_TILE_COLS - 1); /* luma: 8, chroma: 8 + 8 */
+
+ return size;
+}
+
+static size_t hantro_vp9_bsd_control_size(unsigned int height)
+{
+ u32 h, height32;
+
+ h = roundup(height, 8);
+ height32 = roundup(h, 64);
+
+ return 16 * (height32 / 4) * (MAX_NUM_TILE_COLS - 1);
+}
+
+static size_t hantro_vp9_segment_map_size(unsigned int width, unsigned int height)
+{
+ u32 w, h;
+ int num_ctbs;
+
+ w = roundup(width, 8);
+ h = roundup(height, 8);
+ num_ctbs = ((w + 63) / 64) * ((h + 63) / 64);
+
+ return num_ctbs * 32;
+}
+
+static inline size_t hantro_vp9_prob_tab_size(void)
+{
+ return roundup(sizeof(struct hantro_g2_all_probs), 16);
+}
+
+static inline size_t hantro_vp9_count_tab_size(void)
+{
+ return roundup(sizeof(struct symbol_counts), 16);
+}
+
+static inline size_t hantro_vp9_tile_info_size(void)
+{
+ return roundup((MAX_TILE_COLS * MAX_TILE_ROWS * 4 * sizeof(u16) + 15 + 16) & ~0xf, 16);
+}
+
+static void *get_coeffs_arr(struct symbol_counts *cnts, int i, int j, int k, int l, int m)
+{
+ if (i == 0)
+ return &cnts->count_coeffs[j][k][l][m];
+
+ if (i == 1)
+ return &cnts->count_coeffs8x8[j][k][l][m];
+
+ if (i == 2)
+ return &cnts->count_coeffs16x16[j][k][l][m];
+
+ if (i == 3)
+ return &cnts->count_coeffs32x32[j][k][l][m];
+
+ return NULL;
+}
+
+static void *get_eobs1(struct symbol_counts *cnts, int i, int j, int k, int l, int m)
+{
+ if (i == 0)
+ return &cnts->count_coeffs[j][k][l][m][3];
+
+ if (i == 1)
+ return &cnts->count_coeffs8x8[j][k][l][m][3];
+
+ if (i == 2)
+ return &cnts->count_coeffs16x16[j][k][l][m][3];
+
+ if (i == 3)
+ return &cnts->count_coeffs32x32[j][k][l][m][3];
+
+ return NULL;
+}
+
+#define INNER_LOOP \
+ do { \
+ for (m = 0; m < ARRAY_SIZE(vp9_ctx->cnts.coeff[i][0][0][0]); ++m) { \
+ vp9_ctx->cnts.coeff[i][j][k][l][m] = \
+ get_coeffs_arr(cnts, i, j, k, l, m); \
+ vp9_ctx->cnts.eob[i][j][k][l][m][0] = \
+ &cnts->count_eobs[i][j][k][l][m]; \
+ vp9_ctx->cnts.eob[i][j][k][l][m][1] = \
+ get_eobs1(cnts, i, j, k, l, m); \
+ } \
+ } while (0)
+
+static void init_v4l2_vp9_count_tbl(struct hantro_ctx *ctx)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ struct symbol_counts *cnts = vp9_ctx->misc.cpu + vp9_ctx->ctx_counters_offset;
+ int i, j, k, l, m;
+
+ vp9_ctx->cnts.partition = &cnts->partition_counts;
+ vp9_ctx->cnts.skip = &cnts->mbskip_count;
+ vp9_ctx->cnts.intra_inter = &cnts->intra_inter_count;
+ vp9_ctx->cnts.tx32p = &cnts->tx32x32_count;
+ /*
+ * g2 hardware uses tx16x16_count[2][3], while the api
+ * expects tx16p[2][4], so this must be explicitly copied
+ * into vp9_ctx->cnts.tx16p when passing the data to the
+ * vp9 library function
+ */
+ vp9_ctx->cnts.tx8p = &cnts->tx8x8_count;
+
+ vp9_ctx->cnts.y_mode = &cnts->sb_ymode_counts;
+ vp9_ctx->cnts.uv_mode = &cnts->uv_mode_counts;
+ vp9_ctx->cnts.comp = &cnts->comp_inter_count;
+ vp9_ctx->cnts.comp_ref = &cnts->comp_ref_count;
+ vp9_ctx->cnts.single_ref = &cnts->single_ref_count;
+ vp9_ctx->cnts.filter = &cnts->switchable_interp_counts;
+ vp9_ctx->cnts.mv_joint = &cnts->mv_counts.joints;
+ vp9_ctx->cnts.sign = &cnts->mv_counts.sign;
+ vp9_ctx->cnts.classes = &cnts->mv_counts.classes;
+ vp9_ctx->cnts.class0 = &cnts->mv_counts.class0;
+ vp9_ctx->cnts.bits = &cnts->mv_counts.bits;
+ vp9_ctx->cnts.class0_fp = &cnts->mv_counts.class0_fp;
+ vp9_ctx->cnts.fp = &cnts->mv_counts.fp;
+ vp9_ctx->cnts.class0_hp = &cnts->mv_counts.class0_hp;
+ vp9_ctx->cnts.hp = &cnts->mv_counts.hp;
+
+ for (i = 0; i < ARRAY_SIZE(vp9_ctx->cnts.coeff); ++i)
+ for (j = 0; j < ARRAY_SIZE(vp9_ctx->cnts.coeff[i]); ++j)
+ for (k = 0; k < ARRAY_SIZE(vp9_ctx->cnts.coeff[i][0]); ++k)
+ for (l = 0; l < ARRAY_SIZE(vp9_ctx->cnts.coeff[i][0][0]); ++l)
+ INNER_LOOP;
+}
+
+int hantro_vp9_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ const struct hantro_variant *variant = vpu->variant;
+ struct hantro_vp9_dec_hw_ctx *vp9_dec = &ctx->vp9_dec;
+ struct hantro_aux_buf *tile_edge = &vp9_dec->tile_edge;
+ struct hantro_aux_buf *segment_map = &vp9_dec->segment_map;
+ struct hantro_aux_buf *misc = &vp9_dec->misc;
+ u32 i, max_width, max_height, size;
+
+ if (variant->num_dec_fmts < 1)
+ return -EINVAL;
+
+ for (i = 0; i < variant->num_dec_fmts; ++i)
+ if (variant->dec_fmts[i].fourcc == V4L2_PIX_FMT_VP9_FRAME)
+ break;
+
+ if (i == variant->num_dec_fmts)
+ return -EINVAL;
+
+ max_width = vpu->variant->dec_fmts[i].frmsize.max_width;
+ max_height = vpu->variant->dec_fmts[i].frmsize.max_height;
+
+ size = hantro_vp9_tile_filter_size(max_height);
+ vp9_dec->bsd_ctrl_offset = size;
+ size += hantro_vp9_bsd_control_size(max_height);
+
+ tile_edge->cpu = dma_alloc_coherent(vpu->dev, size, &tile_edge->dma, GFP_KERNEL);
+ if (!tile_edge->cpu)
+ return -ENOMEM;
+
+ tile_edge->size = size;
+ memset(tile_edge->cpu, 0, size);
+
+ size = hantro_vp9_segment_map_size(max_width, max_height);
+ vp9_dec->segment_map_size = size;
+ size *= 2; /* we need two areas of this size, used alternately */
+
+ segment_map->cpu = dma_alloc_coherent(vpu->dev, size, &segment_map->dma, GFP_KERNEL);
+ if (!segment_map->cpu)
+ goto err_segment_map;
+
+ segment_map->size = size;
+ memset(segment_map->cpu, 0, size);
+
+ size = hantro_vp9_prob_tab_size();
+ vp9_dec->ctx_counters_offset = size;
+ size += hantro_vp9_count_tab_size();
+ vp9_dec->tile_info_offset = size;
+ size += hantro_vp9_tile_info_size();
+
+ misc->cpu = dma_alloc_coherent(vpu->dev, size, &misc->dma, GFP_KERNEL);
+ if (!misc->cpu)
+ goto err_misc;
+
+ misc->size = size;
+ memset(misc->cpu, 0, size);
+
+ init_v4l2_vp9_count_tbl(ctx);
+
+ return 0;
+
+err_misc:
+ dma_free_coherent(vpu->dev, segment_map->size, segment_map->cpu, segment_map->dma);
+
+err_segment_map:
+ dma_free_coherent(vpu->dev, tile_edge->size, tile_edge->cpu, tile_edge->dma);
+
+ return -ENOMEM;
+}
+
+void hantro_vp9_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_vp9_dec_hw_ctx *vp9_dec = &ctx->vp9_dec;
+ struct hantro_aux_buf *tile_edge = &vp9_dec->tile_edge;
+ struct hantro_aux_buf *segment_map = &vp9_dec->segment_map;
+ struct hantro_aux_buf *misc = &vp9_dec->misc;
+
+ dma_free_coherent(vpu->dev, misc->size, misc->cpu, misc->dma);
+ dma_free_coherent(vpu->dev, segment_map->size, segment_map->cpu, segment_map->dma);
+ dma_free_coherent(vpu->dev, tile_edge->size, tile_edge->cpu, tile_edge->dma);
+}
diff --git a/drivers/media/platform/verisilicon/hantro_vp9.h b/drivers/media/platform/verisilicon/hantro_vp9.h
new file mode 100644
index 0000000000..26b69275f0
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_vp9.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VP9 codec driver
+ *
+ * Copyright (C) 2021 Collabora Ltd.
+ */
+
+struct hantro_g2_mv_probs {
+ u8 joint[3];
+ u8 sign[2];
+ u8 class0_bit[2][1];
+ u8 fr[2][3];
+ u8 class0_hp[2];
+ u8 hp[2];
+ u8 classes[2][10];
+ u8 class0_fr[2][2][3];
+ u8 bits[2][10];
+};
+
+struct hantro_g2_probs {
+ u8 inter_mode[7][4];
+ u8 is_inter[4];
+ u8 uv_mode[10][8];
+ u8 tx8[2][1];
+ u8 tx16[2][2];
+ u8 tx32[2][3];
+ u8 y_mode_tail[4][1];
+ u8 y_mode[4][8];
+ u8 partition[2][16][4]; /* [keyframe][][], [inter][][] */
+ u8 uv_mode_tail[10][1];
+ u8 interp_filter[4][2];
+ u8 comp_mode[5];
+ u8 skip[3];
+
+ u8 pad1[1];
+
+ struct hantro_g2_mv_probs mv;
+
+ u8 single_ref[5][2];
+ u8 comp_ref[5];
+
+ u8 pad2[17];
+
+ u8 coef[4][2][2][6][6][4];
+};
+
+struct hantro_g2_all_probs {
+ u8 kf_y_mode_prob[10][10][8];
+
+ u8 kf_y_mode_prob_tail[10][10][1];
+ u8 ref_pred_probs[3];
+ u8 mb_segment_tree_probs[7];
+ u8 segment_pred_probs[3];
+ u8 ref_scores[4];
+ u8 prob_comppred[2];
+
+ u8 pad1[9];
+
+ u8 kf_uv_mode_prob[10][8];
+ u8 kf_uv_mode_prob_tail[10][1];
+
+ u8 pad2[6];
+
+ struct hantro_g2_probs probs;
+};
+
+struct mv_counts {
+ u32 joints[4];
+ u32 sign[2][2];
+ u32 classes[2][11];
+ u32 class0[2][2];
+ u32 bits[2][10][2];
+ u32 class0_fp[2][2][4];
+ u32 fp[2][4];
+ u32 class0_hp[2][2];
+ u32 hp[2][2];
+};
+
+struct symbol_counts {
+ u32 inter_mode_counts[7][3][2];
+ u32 sb_ymode_counts[4][10];
+ u32 uv_mode_counts[10][10];
+ u32 partition_counts[16][4];
+ u32 switchable_interp_counts[4][3];
+ u32 intra_inter_count[4][2];
+ u32 comp_inter_count[5][2];
+ u32 single_ref_count[5][2][2];
+ u32 comp_ref_count[5][2];
+ u32 tx32x32_count[2][4];
+ u32 tx16x16_count[2][3];
+ u32 tx8x8_count[2][2];
+ u32 mbskip_count[3][2];
+
+ struct mv_counts mv_counts;
+
+ u32 count_coeffs[2][2][6][6][4];
+ u32 count_coeffs8x8[2][2][6][6][4];
+ u32 count_coeffs16x16[2][2][6][6][4];
+ u32 count_coeffs32x32[2][2][6][6][4];
+
+ u32 count_eobs[4][2][2][6][6];
+};
diff --git a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
new file mode 100644
index 0000000000..f850d8bdde
--- /dev/null
+++ b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2019 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+
+#include "hantro.h"
+#include "hantro_jpeg.h"
+#include "hantro_g1_regs.h"
+#include "hantro_g2_regs.h"
+
+#define CTRL_SOFT_RESET 0x00
+#define RESET_G1 BIT(1)
+#define RESET_G2 BIT(0)
+
+#define CTRL_CLOCK_ENABLE 0x04
+#define CLOCK_G1 BIT(1)
+#define CLOCK_G2 BIT(0)
+
+#define CTRL_G1_DEC_FUSE 0x08
+#define CTRL_G1_PP_FUSE 0x0c
+#define CTRL_G2_DEC_FUSE 0x10
+
+static void imx8m_soft_reset(struct hantro_dev *vpu, u32 reset_bits)
+{
+ u32 val;
+
+ /* Assert */
+ val = readl(vpu->ctrl_base + CTRL_SOFT_RESET);
+ val &= ~reset_bits;
+ writel(val, vpu->ctrl_base + CTRL_SOFT_RESET);
+
+ udelay(2);
+
+ /* Release */
+ val = readl(vpu->ctrl_base + CTRL_SOFT_RESET);
+ val |= reset_bits;
+ writel(val, vpu->ctrl_base + CTRL_SOFT_RESET);
+}
+
+static void imx8m_clk_enable(struct hantro_dev *vpu, u32 clock_bits)
+{
+ u32 val;
+
+ val = readl(vpu->ctrl_base + CTRL_CLOCK_ENABLE);
+ val |= clock_bits;
+ writel(val, vpu->ctrl_base + CTRL_CLOCK_ENABLE);
+}
+
+static int imx8mq_runtime_resume(struct hantro_dev *vpu)
+{
+ int ret;
+
+ ret = clk_bulk_prepare_enable(vpu->variant->num_clocks, vpu->clocks);
+ if (ret) {
+ dev_err(vpu->dev, "Failed to enable clocks\n");
+ return ret;
+ }
+
+ imx8m_soft_reset(vpu, RESET_G1 | RESET_G2);
+ imx8m_clk_enable(vpu, CLOCK_G1 | CLOCK_G2);
+
+ /* Set values of the fuse registers */
+ writel(0xffffffff, vpu->ctrl_base + CTRL_G1_DEC_FUSE);
+ writel(0xffffffff, vpu->ctrl_base + CTRL_G1_PP_FUSE);
+ writel(0xffffffff, vpu->ctrl_base + CTRL_G2_DEC_FUSE);
+
+ clk_bulk_disable_unprepare(vpu->variant->num_clocks, vpu->clocks);
+
+ return 0;
+}
+
+/*
+ * Supported formats.
+ */
+
+static const struct hantro_fmt imx8m_vpu_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt imx8m_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt imx8m_vpu_g2_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_P010,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12_4L4,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = TILE_MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = TILE_MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_P010_4L4,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = TILE_MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = TILE_MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_HEVC_SLICE,
+ .codec_mode = HANTRO_MODE_HEVC_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = TILE_MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = TILE_MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9_FRAME,
+ .codec_mode = HANTRO_MODE_VP9_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = TILE_MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = TILE_MB_DIM,
+ },
+ },
+};
+
+static irqreturn_t imx8m_vpu_g1_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, G1_REG_INTERRUPT);
+ state = (status & G1_REG_INTERRUPT_DEC_RDY_INT) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+static int imx8mq_vpu_hw_init(struct hantro_dev *vpu)
+{
+ vpu->ctrl_base = vpu->reg_bases[vpu->variant->num_regs - 1];
+
+ return 0;
+}
+
+static void imx8m_vpu_g1_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ imx8m_soft_reset(vpu, RESET_G1);
+}
+
+/*
+ * Supported codec ops.
+ */
+
+static const struct hantro_codec_ops imx8mq_vpu_codec_ops[] = {
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = imx8m_vpu_g1_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = imx8m_vpu_g1_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = imx8m_vpu_g1_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops imx8mq_vpu_g1_codec_ops[] = {
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops imx8mq_vpu_g2_codec_ops[] = {
+ [HANTRO_MODE_HEVC_DEC] = {
+ .run = hantro_g2_hevc_dec_run,
+ .init = hantro_hevc_dec_init,
+ .exit = hantro_hevc_dec_exit,
+ },
+ [HANTRO_MODE_VP9_DEC] = {
+ .run = hantro_g2_vp9_dec_run,
+ .done = hantro_g2_vp9_dec_done,
+ .init = hantro_vp9_dec_init,
+ .exit = hantro_vp9_dec_exit,
+ },
+};
+
+/*
+ * VPU variants.
+ */
+
+static const struct hantro_irq imx8mq_irqs[] = {
+ { "g1", imx8m_vpu_g1_irq },
+};
+
+static const struct hantro_irq imx8mq_g2_irqs[] = {
+ { "g2", hantro_g2_irq },
+};
+
+static const char * const imx8mq_clk_names[] = { "g1", "g2", "bus" };
+static const char * const imx8mq_reg_names[] = { "g1", "g2", "ctrl" };
+static const char * const imx8mq_g1_clk_names[] = { "g1" };
+static const char * const imx8mq_g2_clk_names[] = { "g2" };
+
+const struct hantro_variant imx8mq_vpu_variant = {
+ .dec_fmts = imx8m_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
+ .postproc_fmts = imx8m_vpu_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(imx8m_vpu_postproc_fmts),
+ .postproc_ops = &hantro_g1_postproc_ops,
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = imx8mq_vpu_codec_ops,
+ .init = imx8mq_vpu_hw_init,
+ .runtime_resume = imx8mq_runtime_resume,
+ .irqs = imx8mq_irqs,
+ .num_irqs = ARRAY_SIZE(imx8mq_irqs),
+ .clk_names = imx8mq_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_clk_names),
+ .reg_names = imx8mq_reg_names,
+ .num_regs = ARRAY_SIZE(imx8mq_reg_names)
+};
+
+const struct hantro_variant imx8mq_vpu_g1_variant = {
+ .dec_fmts = imx8m_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
+ .postproc_fmts = imx8m_vpu_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(imx8m_vpu_postproc_fmts),
+ .postproc_ops = &hantro_g1_postproc_ops,
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = imx8mq_vpu_g1_codec_ops,
+ .irqs = imx8mq_irqs,
+ .num_irqs = ARRAY_SIZE(imx8mq_irqs),
+ .clk_names = imx8mq_g1_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g1_clk_names),
+};
+
+const struct hantro_variant imx8mq_vpu_g2_variant = {
+ .dec_offset = 0x0,
+ .dec_fmts = imx8m_vpu_g2_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_g2_dec_fmts),
+ .postproc_fmts = imx8m_vpu_g2_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(imx8m_vpu_g2_postproc_fmts),
+ .postproc_ops = &hantro_g2_postproc_ops,
+ .codec = HANTRO_HEVC_DECODER | HANTRO_VP9_DECODER,
+ .codec_ops = imx8mq_vpu_g2_codec_ops,
+ .irqs = imx8mq_g2_irqs,
+ .num_irqs = ARRAY_SIZE(imx8mq_g2_irqs),
+ .clk_names = imx8mq_g2_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g2_clk_names),
+};
+
+const struct hantro_variant imx8mm_vpu_g1_variant = {
+ .dec_fmts = imx8m_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = imx8mq_vpu_g1_codec_ops,
+ .irqs = imx8mq_irqs,
+ .num_irqs = ARRAY_SIZE(imx8mq_irqs),
+ .clk_names = imx8mq_g1_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g1_clk_names),
+};
diff --git a/drivers/media/platform/verisilicon/rockchip_av1_entropymode.c b/drivers/media/platform/verisilicon/rockchip_av1_entropymode.c
new file mode 100644
index 0000000000..b1ae72ad67
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_av1_entropymode.c
@@ -0,0 +1,4424 @@
+// SPDX-License-Identifier: BSD-2-Clause
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "hantro.h"
+#include "rockchip_av1_entropymode.h"
+
+#define AOM_ICDF ICDF
+#define AOM_CDF2(a0) AOM_ICDF(a0)
+#define AOM_CDF3(a0, a1) \
+ AOM_ICDF(a0), AOM_ICDF(a1)
+#define AOM_CDF4(a0, a1, a2) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2)
+#define AOM_CDF5(a0, a1, a2, a3) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3)
+#define AOM_CDF6(a0, a1, a2, a3, a4) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4)
+#define AOM_CDF7(a0, a1, a2, a3, a4, a5) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), \
+ AOM_ICDF(a3), AOM_ICDF(a4), AOM_ICDF(a5)
+#define AOM_CDF8(a0, a1, a2, a3, a4, a5, a6) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), \
+ AOM_ICDF(a3), AOM_ICDF(a4), AOM_ICDF(a5), AOM_ICDF(a6)
+#define AOM_CDF9(a0, a1, a2, a3, a4, a5, a6, a7) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), \
+ AOM_ICDF(a4), AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7)
+#define AOM_CDF10(a0, a1, a2, a3, a4, a5, a6, a7, a8) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), \
+ AOM_ICDF(a4), AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8)
+#define AOM_CDF11(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), \
+ AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9)
+#define AOM_CDF12(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), AOM_ICDF(a5), \
+ AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9), AOM_ICDF(a10)
+#define AOM_CDF13(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), AOM_ICDF(a5), \
+ AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9), AOM_ICDF(a10), AOM_ICDF(a11)
+#define AOM_CDF14(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), \
+ AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9), \
+ AOM_ICDF(a10), AOM_ICDF(a11), AOM_ICDF(a12)
+#define AOM_CDF15(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), \
+ AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9), \
+ AOM_ICDF(a10), AOM_ICDF(a11), AOM_ICDF(a12), AOM_ICDF(a13)
+#define AOM_CDF16(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) \
+ AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), \
+ AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9), \
+ AOM_ICDF(a10), AOM_ICDF(a11), AOM_ICDF(a12), AOM_ICDF(a13), AOM_ICDF(a14)
+
+static const u16 default_kf_y_mode_cdf
+ [KF_MODE_CONTEXTS][KF_MODE_CONTEXTS][CDF_SIZE(AV1_INTRA_MODES)] = {
+ {
+ {
+ AOM_CDF13(15588, 17027, 19338, 20218, 20682, 21110,
+ 21825, 23244, 24189, 28165, 29093, 30466)
+ },
+ {
+ AOM_CDF13(12016, 18066, 19516, 20303, 20719, 21444,
+ 21888, 23032, 24434, 28658, 30172, 31409)
+ },
+ {
+ AOM_CDF13(10052, 10771, 22296, 22788, 23055, 23239,
+ 24133, 25620, 26160, 29336, 29929, 31567)
+ },
+ {
+ AOM_CDF13(14091, 15406, 16442, 18808, 19136, 19546,
+ 19998, 22096, 24746, 29585, 30958, 32462)
+ },
+ {
+ AOM_CDF13(12122, 13265, 15603, 16501, 18609, 20033,
+ 22391, 25583, 26437, 30261, 31073, 32475)
+ }
+ },
+ {
+ {
+ AOM_CDF13(10023, 19585, 20848, 21440, 21832, 22760,
+ 23089, 24023, 25381, 29014, 30482, 31436)
+ },
+ {
+ AOM_CDF13(5983, 24099, 24560, 24886, 25066, 25795,
+ 25913, 26423, 27610, 29905, 31276, 31794)
+ },
+ {
+ AOM_CDF13(7444, 12781, 20177, 20728, 21077, 21607,
+ 22170, 23405, 24469, 27915, 29090, 30492)
+ },
+ {
+ AOM_CDF13(8537, 14689, 15432, 17087, 17408, 18172,
+ 18408, 19825, 24649, 29153, 31096, 32210)
+ },
+ {
+ AOM_CDF13(7543, 14231, 15496, 16195, 17905, 20717,
+ 21984, 24516, 26001, 29675, 30981, 31994)
+ }
+ },
+ {
+ {
+ AOM_CDF13(12613, 13591, 21383, 22004, 22312, 22577,
+ 23401, 25055, 25729, 29538, 30305, 32077)
+ },
+ {
+ AOM_CDF13(9687, 13470, 18506, 19230, 19604, 20147,
+ 20695, 22062, 23219, 27743, 29211, 30907)
+ },
+ {
+ AOM_CDF13(6183, 6505, 26024, 26252, 26366, 26434,
+ 27082, 28354, 28555, 30467, 30794, 32086)
+ },
+ {
+ AOM_CDF13(10718, 11734, 14954, 17224, 17565, 17924,
+ 18561, 21523, 23878, 28975, 30287, 32252)
+ },
+ {
+ AOM_CDF13(9194, 9858, 16501, 17263, 18424, 19171,
+ 21563, 25961, 26561, 30072, 30737, 32463)
+ }
+ },
+ {
+ {
+ AOM_CDF13(12602, 14399, 15488, 18381, 18778, 19315,
+ 19724, 21419, 25060, 29696, 30917, 32409)
+ },
+ {
+ AOM_CDF13(8203, 13821, 14524, 17105, 17439, 18131,
+ 18404, 19468, 25225, 29485, 31158, 32342)
+ },
+ {
+ AOM_CDF13(8451, 9731, 15004, 17643, 18012, 18425,
+ 19070, 21538, 24605, 29118, 30078, 32018)
+ },
+ {
+ AOM_CDF13(7714, 9048, 9516, 16667, 16817, 16994,
+ 17153, 18767, 26743, 30389, 31536, 32528)
+ },
+ {
+ AOM_CDF13(8843, 10280, 11496, 15317, 16652, 17943,
+ 19108, 22718, 25769, 29953, 30983, 32485)
+ }
+ },
+ {
+ {
+ AOM_CDF13(12578, 13671, 15979, 16834, 19075, 20913,
+ 22989, 25449, 26219, 30214, 31150, 32477)
+ },
+ {
+ AOM_CDF13(9563, 13626, 15080, 15892, 17756, 20863,
+ 22207, 24236, 25380, 29653, 31143, 32277)
+ },
+ {
+ AOM_CDF13(8356, 8901, 17616, 18256, 19350, 20106,
+ 22598, 25947, 26466, 29900, 30523, 32261)
+ },
+ {
+ AOM_CDF13(10835, 11815, 13124, 16042, 17018, 18039,
+ 18947, 22753, 24615, 29489, 30883, 32482)
+ },
+ {
+ AOM_CDF13(7618, 8288, 9859, 10509, 15386, 18657,
+ 22903, 28776, 29180, 31355, 31802, 32593)
+ }
+ }
+};
+
+static const u16 default_angle_delta_cdf[DIRECTIONAL_MODES]
+ [CDF_SIZE(2 * MAX_ANGLE_DELTA + 1)] = {
+ { AOM_CDF7(2180, 5032, 7567, 22776, 26989, 30217) },
+ { AOM_CDF7(2301, 5608, 8801, 23487, 26974, 30330) },
+ { AOM_CDF7(3780, 11018, 13699, 19354, 23083, 31286) },
+ { AOM_CDF7(4581, 11226, 15147, 17138, 21834, 28397) },
+ { AOM_CDF7(1737, 10927, 14509, 19588, 22745, 28823) },
+ { AOM_CDF7(2664, 10176, 12485, 17650, 21600, 30495) },
+ { AOM_CDF7(2240, 11096, 15453, 20341, 22561, 28917) },
+ { AOM_CDF7(3605, 10428, 12459, 17676, 21244, 30655) }
+};
+
+static const u16 default_if_y_mode_cdf[BLOCK_SIZE_GROUPS][CDF_SIZE(AV1_INTRA_MODES)] = {
+ {
+ AOM_CDF13(22801, 23489, 24293, 24756, 25601, 26123,
+ 26606, 27418, 27945, 29228, 29685, 30349)
+ },
+ {
+ AOM_CDF13(18673, 19845, 22631, 23318, 23950, 24649,
+ 25527, 27364, 28152, 29701, 29984, 30852)
+ },
+ {
+ AOM_CDF13(19770, 20979, 23396, 23939, 24241, 24654,
+ 25136, 27073, 27830, 29360, 29730, 30659)
+ },
+ {
+ AOM_CDF13(20155, 21301, 22838, 23178, 23261, 23533,
+ 23703, 24804, 25352, 26575, 27016, 28049)
+ }
+};
+
+static const u16 default_uv_mode_cdf[CFL_ALLOWED_TYPES]
+ [AV1_INTRA_MODES][CDF_SIZE(UV_INTRA_MODES)] = {
+ {
+ {
+ AOM_CDF13(22631, 24152, 25378, 25661, 25986, 26520,
+ 27055, 27923, 28244, 30059, 30941, 31961)
+ },
+ {
+ AOM_CDF13(9513, 26881, 26973, 27046, 27118, 27664,
+ 27739, 27824, 28359, 29505, 29800, 31796)
+ },
+ {
+ AOM_CDF13(9845, 9915, 28663, 28704, 28757, 28780,
+ 29198, 29822, 29854, 30764, 31777, 32029)
+ },
+ {
+ AOM_CDF13(13639, 13897, 14171, 25331, 25606, 25727,
+ 25953, 27148, 28577, 30612, 31355, 32493)
+ },
+ {
+ AOM_CDF13(9764, 9835, 9930, 9954, 25386, 27053,
+ 27958, 28148, 28243, 31101, 31744, 32363)
+ },
+ {
+ AOM_CDF13(11825, 13589, 13677, 13720, 15048, 29213,
+ 29301, 29458, 29711, 31161, 31441, 32550)
+ },
+ {
+ AOM_CDF13(14175, 14399, 16608, 16821, 17718, 17775,
+ 28551, 30200, 30245, 31837, 32342, 32667)
+ },
+ {
+ AOM_CDF13(12885, 13038, 14978, 15590, 15673, 15748,
+ 16176, 29128, 29267, 30643, 31961, 32461)
+ },
+ {
+ AOM_CDF13(12026, 13661, 13874, 15305, 15490, 15726,
+ 15995, 16273, 28443, 30388, 30767, 32416)
+ },
+ {
+ AOM_CDF13(19052, 19840, 20579, 20916, 21150, 21467,
+ 21885, 22719, 23174, 28861, 30379, 32175)
+ },
+ {
+ AOM_CDF13(18627, 19649, 20974, 21219, 21492, 21816,
+ 22199, 23119, 23527, 27053, 31397, 32148)
+ },
+ {
+ AOM_CDF13(17026, 19004, 19997, 20339, 20586, 21103,
+ 21349, 21907, 22482, 25896, 26541, 31819)
+ },
+ {
+ AOM_CDF13(12124, 13759, 14959, 14992, 15007, 15051,
+ 15078, 15166, 15255, 15753, 16039, 16606)
+ }
+ },
+ {
+ {
+ AOM_CDF14(10407, 11208, 12900, 13181, 13823, 14175,
+ 14899, 15656, 15986, 20086, 20995, 22455,
+ 24212)
+ },
+ {
+ AOM_CDF14(4532, 19780, 20057, 20215, 20428, 21071,
+ 21199, 21451, 22099, 24228, 24693, 27032,
+ 29472)
+ },
+ {
+ AOM_CDF14(5273, 5379, 20177, 20270, 20385, 20439,
+ 20949, 21695, 21774, 23138, 24256, 24703,
+ 26679)
+ },
+ {
+ AOM_CDF14(6740, 7167, 7662, 14152, 14536, 14785,
+ 15034, 16741, 18371, 21520, 22206, 23389,
+ 24182)
+ },
+ {
+ AOM_CDF14(4987, 5368, 5928, 6068, 19114, 20315, 21857,
+ 22253, 22411, 24911, 25380, 26027, 26376)
+ },
+ {
+ AOM_CDF14(5370, 6889, 7247, 7393, 9498, 21114, 21402,
+ 21753, 21981, 24780, 25386, 26517, 27176)
+ },
+ {
+ AOM_CDF14(4816, 4961, 7204, 7326, 8765, 8930, 20169,
+ 20682, 20803, 23188, 23763, 24455, 24940)
+ },
+ {
+ AOM_CDF14(6608, 6740, 8529, 9049, 9257, 9356, 9735,
+ 18827, 19059, 22336, 23204, 23964, 24793)
+ },
+ {
+ AOM_CDF14(5998, 7419, 7781, 8933, 9255, 9549, 9753,
+ 10417, 18898, 22494, 23139, 24764, 25989)
+ },
+ {
+ AOM_CDF14(10660, 11298, 12550, 12957, 13322, 13624,
+ 14040, 15004, 15534, 20714, 21789, 23443,
+ 24861)
+ },
+ {
+ AOM_CDF14(10522, 11530, 12552, 12963, 13378, 13779,
+ 14245, 15235, 15902, 20102, 22696, 23774,
+ 25838)
+ },
+ {
+ AOM_CDF14(10099, 10691, 12639, 13049, 13386, 13665,
+ 14125, 15163, 15636, 19676, 20474, 23519,
+ 25208)
+ },
+ {
+ AOM_CDF14(3144, 5087, 7382, 7504, 7593, 7690, 7801,
+ 8064, 8232, 9248, 9875, 10521, 29048)
+ }
+ }
+};
+
+static const u16 default_partition_cdf[13][16] = {
+ {
+ AOM_CDF4(19132, 25510, 30392), AOM_CDF4(13928, 19855, 28540),
+ AOM_CDF4(12522, 23679, 28629), AOM_CDF4(9896, 18783, 25853),
+ AOM_CDF2(11570), AOM_CDF2(16855), AOM_CDF3(9413, 22581)
+ },
+ {
+ AOM_CDF10(15597, 20929, 24571, 26706, 27664, 28821, 29601, 30571, 31902)
+ },
+ {
+ AOM_CDF10(7925, 11043, 16785, 22470, 23971, 25043, 26651, 28701, 29834)
+ },
+ {
+ AOM_CDF10(5414, 13269, 15111, 20488, 22360, 24500, 25537, 26336, 32117)
+ },
+ {
+ AOM_CDF10(2662, 6362, 8614, 20860, 23053, 24778, 26436, 27829, 31171)
+ },
+ {
+ AOM_CDF10(18462, 20920, 23124, 27647, 28227, 29049, 29519, 30178, 31544)
+ },
+ {
+ AOM_CDF10(7689, 9060, 12056, 24992, 25660, 26182, 26951, 28041, 29052)
+ },
+ {
+ AOM_CDF10(6015, 9009, 10062, 24544, 25409, 26545, 27071, 27526, 32047)
+ },
+ {
+ AOM_CDF10(1394, 2208, 2796, 28614, 29061, 29466, 29840, 30185, 31899)
+ },
+ {
+ AOM_CDF10(20137, 21547, 23078, 29566, 29837, 30261, 30524, 30892, 31724),
+ AOM_CDF8(27899, 28219, 28529, 32484, 32539, 32619, 32639)
+ },
+ {
+ AOM_CDF10(6732, 7490, 9497, 27944, 28250, 28515, 28969, 29630, 30104),
+ AOM_CDF8(6607, 6990, 8268, 32060, 32219, 32338, 32371)
+ },
+ {
+ AOM_CDF10(5945, 7663, 8348, 28683, 29117, 29749, 30064, 30298, 32238),
+ AOM_CDF8(5429, 6676, 7122, 32027, 32227, 32531, 32582)
+ },
+ {
+ AOM_CDF10(870, 1212, 1487, 31198, 31394, 31574, 31743, 31881, 32332),
+ AOM_CDF8(711, 966, 1172, 32448, 32538, 32617, 32664)
+ },
+};
+
+static const u16 default_intra_ext_tx0_cdf[EXTTX_SIZES][AV1_INTRA_MODES][8] = {
+ {
+ { AOM_CDF7(1535, 8035, 9461, 12751, 23467, 27825)},
+ { AOM_CDF7(564, 3335, 9709, 10870, 18143, 28094)},
+ { AOM_CDF7(672, 3247, 3676, 11982, 19415, 23127)},
+ { AOM_CDF7(5279, 13885, 15487, 18044, 23527, 30252)},
+ { AOM_CDF7(4423, 6074, 7985, 10416, 25693, 29298)},
+ { AOM_CDF7(1486, 4241, 9460, 10662, 16456, 27694)},
+ { AOM_CDF7(439, 2838, 3522, 6737, 18058, 23754)},
+ { AOM_CDF7(1190, 4233, 4855, 11670, 20281, 24377)},
+ { AOM_CDF7(1045, 4312, 8647, 10159, 18644, 29335)},
+ { AOM_CDF7(202, 3734, 4747, 7298, 17127, 24016)},
+ { AOM_CDF7(447, 4312, 6819, 8884, 16010, 23858)},
+ { AOM_CDF7(277, 4369, 5255, 8905, 16465, 22271)},
+ { AOM_CDF7(3409, 5436, 10599, 15599, 19687, 24040)},
+ },
+ {
+ { AOM_CDF7(1870, 13742, 14530, 16498, 23770, 27698)},
+ { AOM_CDF7(326, 8796, 14632, 15079, 19272, 27486)},
+ { AOM_CDF7(484, 7576, 7712, 14443, 19159, 22591)},
+ { AOM_CDF7(1126, 15340, 15895, 17023, 20896, 30279)},
+ { AOM_CDF7(655, 4854, 5249, 5913, 22099, 27138)},
+ { AOM_CDF7(1299, 6458, 8885, 9290, 14851, 25497)},
+ { AOM_CDF7(311, 5295, 5552, 6885, 16107, 22672)},
+ { AOM_CDF7(883, 8059, 8270, 11258, 17289, 21549)},
+ { AOM_CDF7(741, 7580, 9318, 10345, 16688, 29046)},
+ { AOM_CDF7(110, 7406, 7915, 9195, 16041, 23329)},
+ { AOM_CDF7(363, 7974, 9357, 10673, 15629, 24474)},
+ { AOM_CDF7(153, 7647, 8112, 9936, 15307, 19996)},
+ { AOM_CDF7(3511, 6332, 11165, 15335, 19323, 23594)},
+ },
+ {
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ },
+ {
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ { AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
+ },
+};
+
+static const u16 default_intra_ext_tx1_cdf[EXTTX_SIZES][AV1_INTRA_MODES][4] = {
+ {
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ },
+ {
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ },
+ {
+ { AOM_CDF5(1127, 12814, 22772, 27483)},
+ { AOM_CDF5(145, 6761, 11980, 26667)},
+ { AOM_CDF5(362, 5887, 11678, 16725)},
+ { AOM_CDF5(385, 15213, 18587, 30693)},
+ { AOM_CDF5(25, 2914, 23134, 27903)},
+ { AOM_CDF5(60, 4470, 11749, 23991)},
+ { AOM_CDF5(37, 3332, 14511, 21448)},
+ { AOM_CDF5(157, 6320, 13036, 17439)},
+ { AOM_CDF5(119, 6719, 12906, 29396)},
+ { AOM_CDF5(47, 5537, 12576, 21499)},
+ { AOM_CDF5(269, 6076, 11258, 23115)},
+ { AOM_CDF5(83, 5615, 12001, 17228)},
+ { AOM_CDF5(1968, 5556, 12023, 18547)},
+ },
+ {
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ { AOM_CDF5(6554, 13107, 19661, 26214)},
+ },
+};
+
+static const u16 default_inter_ext_tx_cdf[2][EXTTX_SIZES][EXT_TX_TYPES] = {
+ {
+ {
+ AOM_CDF16(4458, 5560, 7695, 9709, 13330, 14789, 17537, 20266,
+ 21504, 22848, 23934, 25474, 27727, 28915, 30631)
+ },
+ {
+ AOM_CDF16(1645, 2573, 4778, 5711, 7807, 8622, 10522, 15357, 17674,
+ 20408, 22517, 25010, 27116, 28856, 30749)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384,
+ 18432, 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384,
+ 18432, 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ },
+ {
+ {
+ AOM_CDF12(2731, 5461, 8192, 10923, 13653, 16384, 19115, 21845,
+ 24576, 27307, 30037),
+ AOM_CDF2(16384)
+ },
+ {
+ AOM_CDF12(2731, 5461, 8192, 10923, 13653, 16384, 19115, 21845,
+ 24576, 27307, 30037),
+ AOM_CDF2(4167)
+ },
+ {
+ AOM_CDF12(770, 2421, 5225, 12907, 15819, 18927, 21561, 24089,
+ 26595, 28526, 30529),
+ AOM_CDF2(1998)
+ },
+ {
+ AOM_CDF12(2731, 5461, 8192, 10923, 13653, 16384, 19115, 21845,
+ 24576, 27307, 30037),
+ AOM_CDF2(748)
+ },
+ }
+};
+
+static const u16 default_cfl_sign_cdf[CDF_SIZE(CFL_JOINT_SIGNS)] = {
+ AOM_CDF8(1418, 2123, 13340, 18405, 26972, 28343, 32294)
+};
+
+static const u16 default_cfl_alpha_cdf[CFL_ALPHA_CONTEXTS][CDF_SIZE(CFL_ALPHABET_SIZE)] = {
+ {
+ AOM_CDF16(7637, 20719, 31401, 32481, 32657, 32688, 32692, 32696, 32700,
+ 32704, 32708, 32712, 32716, 32720, 32724)
+ },
+ {
+ AOM_CDF16(14365, 23603, 28135, 31168, 32167, 32395, 32487, 32573,
+ 32620, 32647, 32668, 32672, 32676, 32680, 32684)
+ },
+ {
+ AOM_CDF16(11532, 22380, 28445, 31360, 32349, 32523, 32584, 32649,
+ 32673, 32677, 32681, 32685, 32689, 32693, 32697)
+ },
+ {
+ AOM_CDF16(26990, 31402, 32282, 32571, 32692, 32696, 32700, 32704,
+ 32708, 32712, 32716, 32720, 32724, 32728, 32732)
+ },
+ {
+ AOM_CDF16(17248, 26058, 28904, 30608, 31305, 31877, 32126, 32321,
+ 32394, 32464, 32516, 32560, 32576, 32593, 32622)
+ },
+ {
+ AOM_CDF16(14738, 21678, 25779, 27901, 29024, 30302, 30980, 31843,
+ 32144, 32413, 32520, 32594, 32622, 32656, 32660)
+ }
+};
+
+static const u16 default_switchable_interp_cdf[SWITCHABLE_FILTER_CONTEXTS]
+ [CDF_SIZE(AV1_SWITCHABLE_FILTERS)] = {
+ { AOM_CDF3(31935, 32720) }, { AOM_CDF3(5568, 32719) },
+ { AOM_CDF3(422, 2938) }, { AOM_CDF3(28244, 32608) },
+ { AOM_CDF3(31206, 31953) }, { AOM_CDF3(4862, 32121) },
+ { AOM_CDF3(770, 1152) }, { AOM_CDF3(20889, 25637) },
+ { AOM_CDF3(31910, 32724) }, { AOM_CDF3(4120, 32712) },
+ { AOM_CDF3(305, 2247) }, { AOM_CDF3(27403, 32636) },
+ { AOM_CDF3(31022, 32009) }, { AOM_CDF3(2963, 32093) },
+ { AOM_CDF3(601, 943) }, { AOM_CDF3(14969, 21398) }
+};
+
+static const u16 default_newmv_cdf[NEWMV_MODE_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(24035) }, { AOM_CDF2(16630) }, { AOM_CDF2(15339) },
+ { AOM_CDF2(8386) }, { AOM_CDF2(12222) }, { AOM_CDF2(4676) }
+};
+
+static const u16 default_zeromv_cdf[GLOBALMV_MODE_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(2175) }, { AOM_CDF2(1054) }
+};
+
+static const u16 default_refmv_cdf[REFMV_MODE_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(23974) }, { AOM_CDF2(24188) }, { AOM_CDF2(17848) },
+ { AOM_CDF2(28622) }, { AOM_CDF2(24312) }, { AOM_CDF2(19923) }
+};
+
+static const u16 default_drl_cdf[DRL_MODE_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(13104) }, { AOM_CDF2(24560) }, { AOM_CDF2(18945) }
+};
+
+static const u16 default_inter_compound_mode_cdf[AV1_INTER_MODE_CONTEXTS]
+ [CDF_SIZE(INTER_COMPOUND_MODES)] = {
+ { AOM_CDF8(7760, 13823, 15808, 17641, 19156, 20666, 26891) },
+ { AOM_CDF8(10730, 19452, 21145, 22749, 24039, 25131, 28724) },
+ { AOM_CDF8(10664, 20221, 21588, 22906, 24295, 25387, 28436) },
+ { AOM_CDF8(13298, 16984, 20471, 24182, 25067, 25736, 26422) },
+ { AOM_CDF8(18904, 23325, 25242, 27432, 27898, 28258, 30758) },
+ { AOM_CDF8(10725, 17454, 20124, 22820, 24195, 25168, 26046) },
+ { AOM_CDF8(17125, 24273, 25814, 27492, 28214, 28704, 30592) },
+ { AOM_CDF8(13046, 23214, 24505, 25942, 27435, 28442, 29330) }
+};
+
+static const u16 default_interintra_cdf[BLOCK_SIZE_GROUPS][CDF_SIZE(2)] = {
+ { AOM_CDF2(16384) }, { AOM_CDF2(26887) }, { AOM_CDF2(27597) },
+ { AOM_CDF2(30237) }
+};
+
+static const u16 default_interintra_mode_cdf[BLOCK_SIZE_GROUPS][CDF_SIZE(INTERINTRA_MODES)] = {
+ { AOM_CDF4(8192, 16384, 24576) },
+ { AOM_CDF4(1875, 11082, 27332) },
+ { AOM_CDF4(2473, 9996, 26388) },
+ { AOM_CDF4(4238, 11537, 25926) }
+};
+
+static const u16 default_wedge_interintra_cdf[BLOCK_SIZES_ALL][CDF_SIZE(2)] = {
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(20036) }, { AOM_CDF2(24957) }, { AOM_CDF2(26704) },
+ { AOM_CDF2(27530) }, { AOM_CDF2(29564) }, { AOM_CDF2(29444) },
+ { AOM_CDF2(26872) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) }
+};
+
+static const u16 default_compound_type_cdf[BLOCK_SIZES_ALL][CDF_SIZE(COMPOUND_TYPES - 1)] = {
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(23431) },
+ { AOM_CDF2(13171) }, { AOM_CDF2(11470) }, { AOM_CDF2(9770) },
+ { AOM_CDF2(9100) },
+ { AOM_CDF2(8233) }, { AOM_CDF2(6172) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(11820) },
+ { AOM_CDF2(7701) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }
+};
+
+static const u16 default_wedge_idx_cdf[BLOCK_SIZES_ALL][CDF_SIZE(16)] = {
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384,
+ 18432, 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384,
+ 18432, 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384,
+ 18432, 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2438, 4440, 6599, 8663, 11005, 12874, 15751, 18094,
+ 20359, 22362, 24127, 25702, 27752, 29450, 31171)
+ },
+ {
+ AOM_CDF16(806, 3266, 6005, 6738, 7218, 7367, 7771, 14588, 16323,
+ 17367, 18452, 19422, 22839, 26127, 29629)
+ },
+ {
+ AOM_CDF16(2779, 3738, 4683, 7213, 7775, 8017, 8655, 14357, 17939,
+ 21332, 24520, 27470, 29456, 30529, 31656)
+ },
+ {
+ AOM_CDF16(1684, 3625, 5675, 7108, 9302, 11274, 14429, 17144, 19163,
+ 20961, 22884, 24471, 26719, 28714, 30877)
+ },
+ {
+ AOM_CDF16(1142, 3491, 6277, 7314, 8089, 8355, 9023, 13624, 15369,
+ 16730, 18114, 19313, 22521, 26012, 29550)
+ },
+ {
+ AOM_CDF16(2742, 4195, 5727, 8035, 8980, 9336, 10146, 14124, 17270,
+ 20533, 23434, 25972, 27944, 29570, 31416)
+ },
+ {
+ AOM_CDF16(1727, 3948, 6101, 7796, 9841, 12344, 15766, 18944, 20638,
+ 22038, 23963, 25311, 26988, 28766, 31012)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(154, 987, 1925, 2051, 2088, 2111, 2151, 23033, 23703, 24284,
+ 24985, 25684, 27259, 28883, 30911)
+ },
+ {
+ AOM_CDF16(1135, 1322, 1493, 2635, 2696, 2737, 2770, 21016, 22935,
+ 25057, 27251, 29173, 30089, 30960, 31933)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ },
+ {
+ AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
+ 20480, 22528, 24576, 26624, 28672, 30720)
+ }
+};
+
+static const u16 default_motion_mode_cdf[BLOCK_SIZES_ALL][CDF_SIZE(MOTION_MODES)] = {
+ { AOM_CDF3(10923, 21845) }, { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(10923, 21845) }, { AOM_CDF3(7651, 24760) },
+ { AOM_CDF3(4738, 24765) }, { AOM_CDF3(5391, 25528) },
+ { AOM_CDF3(19419, 26810) }, { AOM_CDF3(5123, 23606) },
+ { AOM_CDF3(11606, 24308) }, { AOM_CDF3(26260, 29116) },
+ { AOM_CDF3(20360, 28062) }, { AOM_CDF3(21679, 26830) },
+ { AOM_CDF3(29516, 30701) }, { AOM_CDF3(28898, 30397) },
+ { AOM_CDF3(30878, 31335) }, { AOM_CDF3(32507, 32558) },
+ { AOM_CDF3(10923, 21845) }, { AOM_CDF3(10923, 21845) },
+ { AOM_CDF3(28799, 31390) }, { AOM_CDF3(26431, 30774) },
+ { AOM_CDF3(28973, 31594) }, { AOM_CDF3(29742, 31203) }
+};
+
+static const u16 default_obmc_cdf[BLOCK_SIZES_ALL][CDF_SIZE(2)] = {
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(10437) },
+ { AOM_CDF2(9371) }, { AOM_CDF2(9301) }, { AOM_CDF2(17432) },
+ { AOM_CDF2(14423) },
+ { AOM_CDF2(15142) }, { AOM_CDF2(25817) }, { AOM_CDF2(22823) },
+ { AOM_CDF2(22083) },
+ { AOM_CDF2(30128) }, { AOM_CDF2(31014) }, { AOM_CDF2(31560) },
+ { AOM_CDF2(32638) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(23664) },
+ { AOM_CDF2(20901) },
+ { AOM_CDF2(24008) }, { AOM_CDF2(26879) }
+};
+
+static const u16 default_intra_inter_cdf[INTRA_INTER_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(806) },
+ { AOM_CDF2(16662) },
+ { AOM_CDF2(20186) },
+ { AOM_CDF2(26538) }
+};
+
+static const u16 default_comp_inter_cdf[COMP_INTER_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(26828) },
+ { AOM_CDF2(24035) },
+ { AOM_CDF2(12031) },
+ { AOM_CDF2(10640) },
+ { AOM_CDF2(2901) }
+};
+
+static const u16 default_comp_ref_type_cdf[COMP_REF_TYPE_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(1198) },
+ { AOM_CDF2(2070) },
+ { AOM_CDF2(9166) },
+ { AOM_CDF2(7499) },
+ { AOM_CDF2(22475) }
+};
+
+static const u16 default_uni_comp_ref_cdf[UNI_COMP_REF_CONTEXTS]
+ [UNIDIR_COMP_REFS - 1][CDF_SIZE(2)] = {
+ { { AOM_CDF2(5284)}, { AOM_CDF2(3865)}, { AOM_CDF2(3128)} },
+ { { AOM_CDF2(23152)}, { AOM_CDF2(14173)}, { AOM_CDF2(15270)} },
+ { { AOM_CDF2(31774)}, { AOM_CDF2(25120)}, { AOM_CDF2(26710)} }
+};
+
+static const u16 default_single_ref_cdf[REF_CONTEXTS][SINGLE_REFS - 1][CDF_SIZE(2)] = {
+ {
+ { AOM_CDF2(4897)},
+ { AOM_CDF2(1555)},
+ { AOM_CDF2(4236)},
+ { AOM_CDF2(8650)},
+ { AOM_CDF2(904)},
+ { AOM_CDF2(1444)}
+ },
+ {
+ { AOM_CDF2(16973)},
+ { AOM_CDF2(16751)},
+ { AOM_CDF2(19647)},
+ { AOM_CDF2(24773)},
+ { AOM_CDF2(11014)},
+ { AOM_CDF2(15087)}
+ },
+ {
+ { AOM_CDF2(29744)},
+ { AOM_CDF2(30279)},
+ { AOM_CDF2(31194)},
+ { AOM_CDF2(31895)},
+ { AOM_CDF2(26875)},
+ { AOM_CDF2(30304)}
+ }
+};
+
+static const u16 default_comp_ref_cdf[REF_CONTEXTS][FWD_REFS - 1][CDF_SIZE(2)] = {
+ { { AOM_CDF2(4946)}, { AOM_CDF2(9468)}, { AOM_CDF2(1503)} },
+ { { AOM_CDF2(19891)}, { AOM_CDF2(22441)}, { AOM_CDF2(15160)} },
+ { { AOM_CDF2(30731)}, { AOM_CDF2(31059)}, { AOM_CDF2(27544)} }
+};
+
+static const u16 default_comp_bwdref_cdf[REF_CONTEXTS][BWD_REFS - 1][CDF_SIZE(2)] = {
+ { { AOM_CDF2(2235)}, { AOM_CDF2(1423)} },
+ { { AOM_CDF2(17182)}, { AOM_CDF2(15175)} },
+ { { AOM_CDF2(30606)}, { AOM_CDF2(30489)} }
+};
+
+static const u16 default_palette_y_size_cdf[PALETTE_BLOCK_SIZES][CDF_SIZE(PALETTE_SIZES)] = {
+ { AOM_CDF7(7952, 13000, 18149, 21478, 25527, 29241) },
+ { AOM_CDF7(7139, 11421, 16195, 19544, 23666, 28073) },
+ { AOM_CDF7(7788, 12741, 17325, 20500, 24315, 28530) },
+ { AOM_CDF7(8271, 14064, 18246, 21564, 25071, 28533) },
+ { AOM_CDF7(12725, 19180, 21863, 24839, 27535, 30120) },
+ { AOM_CDF7(9711, 14888, 16923, 21052, 25661, 27875) },
+ { AOM_CDF7(14940, 20797, 21678, 24186, 27033, 28999) }
+};
+
+static const u16 default_palette_uv_size_cdf[PALETTE_BLOCK_SIZES][CDF_SIZE(PALETTE_SIZES)] = {
+ { AOM_CDF7(8713, 19979, 27128, 29609, 31331, 32272) },
+ { AOM_CDF7(5839, 15573, 23581, 26947, 29848, 31700) },
+ { AOM_CDF7(4426, 11260, 17999, 21483, 25863, 29430) },
+ { AOM_CDF7(3228, 9464, 14993, 18089, 22523, 27420) },
+ { AOM_CDF7(3768, 8886, 13091, 17852, 22495, 27207) },
+ { AOM_CDF7(2464, 8451, 12861, 21632, 25525, 28555) },
+ { AOM_CDF7(1269, 5435, 10433, 18963, 21700, 25865) }
+};
+
+static const u16 default_palette_y_mode_cdf[PALETTE_BLOCK_SIZES]
+ [PALETTE_Y_MODE_CONTEXTS][CDF_SIZE(2)] = {
+ { { AOM_CDF2(31676)}, { AOM_CDF2(3419)}, { AOM_CDF2(1261)} },
+ { { AOM_CDF2(31912)}, { AOM_CDF2(2859)}, { AOM_CDF2(980)} },
+ { { AOM_CDF2(31823)}, { AOM_CDF2(3400)}, { AOM_CDF2(781)} },
+ { { AOM_CDF2(32030)}, { AOM_CDF2(3561)}, { AOM_CDF2(904)} },
+ { { AOM_CDF2(32309)}, { AOM_CDF2(7337)}, { AOM_CDF2(1462)} },
+ { { AOM_CDF2(32265)}, { AOM_CDF2(4015)}, { AOM_CDF2(1521)} },
+ { { AOM_CDF2(32450)}, { AOM_CDF2(7946)}, { AOM_CDF2(129)} }
+};
+
+static const u16 default_palette_uv_mode_cdf[PALETTE_UV_MODE_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(32461) }, { AOM_CDF2(21488) }
+};
+
+static const u16 default_palette_y_color_index_cdf[PALETTE_IDX_CONTEXTS][8] = {
+ // Palette sizes 2 & 8
+ {
+ AOM_CDF2(28710),
+ AOM_CDF8(21689, 23883, 25163, 26352, 27506, 28827, 30195)
+ },
+ {
+ AOM_CDF2(16384),
+ AOM_CDF8(6892, 15385, 17840, 21606, 24287, 26753, 29204)
+ },
+ {
+ AOM_CDF2(10553),
+ AOM_CDF8(5651, 23182, 25042, 26518, 27982, 29392, 30900)
+ },
+ {
+ AOM_CDF2(27036),
+ AOM_CDF8(19349, 22578, 24418, 25994, 27524, 29031, 30448)
+ },
+ {
+ AOM_CDF2(31603),
+ AOM_CDF8(31028, 31270, 31504, 31705, 31927, 32153, 32392)
+ },
+ // Palette sizes 3 & 7
+ {
+ AOM_CDF3(27877, 30490),
+ AOM_CDF7(23105, 25199, 26464, 27684, 28931, 30318)
+ },
+ {
+ AOM_CDF3(11532, 25697),
+ AOM_CDF7(6950, 15447, 18952, 22681, 25567, 28563)
+ },
+ {
+ AOM_CDF3(6544, 30234),
+ AOM_CDF7(7560, 23474, 25490, 27203, 28921, 30708)
+ },
+ {
+ AOM_CDF3(23018, 28072),
+ AOM_CDF7(18544, 22373, 24457, 26195, 28119, 30045)
+ },
+ {
+ AOM_CDF3(31915, 32385),
+ AOM_CDF7(31198, 31451, 31670, 31882, 32123, 32391)
+ },
+ // Palette sizes 4 & 6
+ {
+ AOM_CDF4(25572, 28046, 30045),
+ AOM_CDF6(23132, 25407, 26970, 28435, 30073)
+ },
+ {
+ AOM_CDF4(9478, 21590, 27256),
+ AOM_CDF6(7443, 17242, 20717, 24762, 27982)
+ },
+ {
+ AOM_CDF4(7248, 26837, 29824),
+ AOM_CDF6(6300, 24862, 26944, 28784, 30671)
+ },
+ {
+ AOM_CDF4(19167, 24486, 28349),
+ AOM_CDF6(18916, 22895, 25267, 27435, 29652)
+ },
+ {
+ AOM_CDF4(31400, 31825, 32250),
+ AOM_CDF6(31270, 31550, 31808, 32059, 32353)
+ },
+ // Palette size 5
+ {
+ AOM_CDF5(24779, 26955, 28576, 30282),
+ AOM_CDF5(8669, 20364, 24073, 28093)
+ },
+ {
+ AOM_CDF5(4255, 27565, 29377, 31067),
+ AOM_CDF5(19864, 23674, 26716, 29530)
+ },
+ {
+ AOM_CDF5(31646, 31893, 32147, 32426),
+ 0, 0, 0, 0
+ }
+};
+
+static const u16 default_palette_uv_color_index_cdf[PALETTE_IDX_CONTEXTS][8] = {
+ // Palette sizes 2 & 8
+ {
+ AOM_CDF2(29089),
+ AOM_CDF8(21442, 23288, 24758, 26246, 27649, 28980, 30563)
+ },
+ {
+ AOM_CDF2(16384),
+ AOM_CDF8(5863, 14933, 17552, 20668, 23683, 26411, 29273)
+ },
+ {
+ AOM_CDF2(8713),
+ AOM_CDF8(3415, 25810, 26877, 27990, 29223, 30394, 31618)
+ },
+ {
+ AOM_CDF2(29257),
+ AOM_CDF8(17965, 20084, 22232, 23974, 26274, 28402, 30390)
+ },
+ {
+ AOM_CDF2(31610),
+ AOM_CDF8(31190, 31329, 31516, 31679, 31825, 32026, 32322)
+ },
+ // Palette sizes 3 & 7
+ {
+ AOM_CDF3(25257, 29145),
+ AOM_CDF7(21239, 23168, 25044, 26962, 28705, 30506)
+ },
+ {
+ AOM_CDF3(12287, 27293),
+ AOM_CDF7(6545, 15012, 18004, 21817, 25503, 28701)
+ },
+ {
+ AOM_CDF3(7033, 27960),
+ AOM_CDF7(3448, 26295, 27437, 28704, 30126, 31442)
+ },
+ {
+ AOM_CDF3(20145, 25405),
+ AOM_CDF7(15889, 18323, 21704, 24698, 26976, 29690)
+ },
+ {
+ AOM_CDF3(30608, 31639),
+ AOM_CDF7(30988, 31204, 31479, 31734, 31983, 32325)
+ },
+ // Palette sizes 4 & 6
+ {
+ AOM_CDF4(24210, 27175, 29903),
+ AOM_CDF6(22217, 24567, 26637, 28683, 30548)
+ },
+ {
+ AOM_CDF4(9888, 22386, 27214),
+ AOM_CDF6(7307, 16406, 19636, 24632, 28424)
+ },
+ {
+ AOM_CDF4(5901, 26053, 29293),
+ AOM_CDF6(4441, 25064, 26879, 28942, 30919)
+ },
+ {
+ AOM_CDF4(18318, 22152, 28333),
+ AOM_CDF6(17210, 20528, 23319, 26750, 29582)
+ },
+ {
+ AOM_CDF4(30459, 31136, 31926),
+ AOM_CDF6(30674, 30953, 31396, 31735, 32207)
+ },
+ // Palette size 5
+ {
+ AOM_CDF5(22980, 25479, 27781, 29986),
+ AOM_CDF5(8413, 21408, 24859, 28874)
+ },
+ {
+ AOM_CDF5(2257, 29449, 30594, 31598),
+ AOM_CDF5(19189, 21202, 25915, 28620)
+ },
+ {
+ AOM_CDF5(31844, 32044, 32281, 32518),
+ 0, 0, 0, 0
+ }
+};
+
+static const u16 default_txfm_partition_cdf[TXFM_PARTITION_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(28581) }, { AOM_CDF2(23846) }, { AOM_CDF2(20847) },
+ { AOM_CDF2(24315) }, { AOM_CDF2(18196) }, { AOM_CDF2(12133) },
+ { AOM_CDF2(18791) }, { AOM_CDF2(10887) }, { AOM_CDF2(11005) },
+ { AOM_CDF2(27179) }, { AOM_CDF2(20004) }, { AOM_CDF2(11281) },
+ { AOM_CDF2(26549) }, { AOM_CDF2(19308) }, { AOM_CDF2(14224) },
+ { AOM_CDF2(28015) }, { AOM_CDF2(21546) }, { AOM_CDF2(14400) },
+ { AOM_CDF2(28165) }, { AOM_CDF2(22401) }, { AOM_CDF2(16088) }
+};
+
+static const u16 default_skip_cdfs[SKIP_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(31671) }, { AOM_CDF2(16515) }, { AOM_CDF2(4576) }
+};
+
+static const u16 default_skip_mode_cdfs[SKIP_MODE_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(32621) }, { AOM_CDF2(20708) }, { AOM_CDF2(8127) }
+};
+
+static const u16 default_compound_idx_cdfs[COMP_INDEX_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(18244) }, { AOM_CDF2(12865) }, { AOM_CDF2(7053) },
+ { AOM_CDF2(13259) }, { AOM_CDF2(9334) }, { AOM_CDF2(4644) }
+};
+
+static const u16 default_comp_group_idx_cdfs[COMP_GROUP_IDX_CONTEXTS][CDF_SIZE(2)] = {
+ { AOM_CDF2(26607) }, { AOM_CDF2(22891) }, { AOM_CDF2(18840) },
+ { AOM_CDF2(24594) }, { AOM_CDF2(19934) }, { AOM_CDF2(22674) }
+};
+
+static const u16 default_intrabc_cdf[CDF_SIZE(2)] = { AOM_CDF2(30531) };
+
+static const u16 default_filter_intra_mode_cdf[CDF_SIZE(FILTER_INTRA_MODES)] = {
+ AOM_CDF5(8949, 12776, 17211, 29558)
+};
+
+static const u16 default_filter_intra_cdfs[BLOCK_SIZES_ALL][CDF_SIZE(2)] = {
+ { AOM_CDF2(4621) }, { AOM_CDF2(6743) }, { AOM_CDF2(5893) }, { AOM_CDF2(7866) },
+ { AOM_CDF2(12551) }, { AOM_CDF2(9394) }, { AOM_CDF2(12408) }, { AOM_CDF2(14301) },
+ { AOM_CDF2(12756) }, { AOM_CDF2(22343) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
+ { AOM_CDF2(12770) }, { AOM_CDF2(10368) }, { AOM_CDF2(20229) }, { AOM_CDF2(18101) },
+ { AOM_CDF2(16384) }, { AOM_CDF2(16384) }
+};
+
+static const u16 default_delta_q_cdf[CDF_SIZE(DELTA_Q_PROBS + 1)] = {
+ AOM_CDF4(28160, 32120, 32677)
+};
+
+static const u16 default_delta_lf_multi_cdf[FRAME_LF_COUNT][CDF_SIZE(DELTA_LF_PROBS + 1)] = {
+ { AOM_CDF4(28160, 32120, 32677) },
+ { AOM_CDF4(28160, 32120, 32677) },
+ { AOM_CDF4(28160, 32120, 32677) },
+ { AOM_CDF4(28160, 32120, 32677) }
+};
+
+static const u16 default_delta_lf_cdf[CDF_SIZE(DELTA_LF_PROBS + 1)] = {
+ AOM_CDF4(28160, 32120, 32677)
+};
+
+static const u16 default_segment_pred_cdf[SEG_TEMPORAL_PRED_CTXS][CDF_SIZE(2)] = {
+ { AOM_CDF2(128 * 128) },
+ { AOM_CDF2(128 * 128) },
+ { AOM_CDF2(128 * 128) }
+};
+
+static const u16 default_spatial_pred_seg_tree_cdf[SPATIAL_PREDICTION_PROBS]
+ [CDF_SIZE(MAX_SEGMENTS)] = {
+ {
+ AOM_CDF8(5622, 7893, 16093, 18233, 27809, 28373, 32533),
+ },
+ {
+ AOM_CDF8(14274, 18230, 22557, 24935, 29980, 30851, 32344),
+ },
+ {
+ AOM_CDF8(27527, 28487, 28723, 28890, 32397, 32647, 32679),
+ },
+};
+
+static const u16 default_tx_size_cdf[MAX_TX_CATS]
+ [AV1_TX_SIZE_CONTEXTS][CDF_SIZE(MAX_TX_DEPTH + 1)] = {
+ {
+ { AOM_CDF2(19968)},
+ { AOM_CDF2(19968)},
+ { AOM_CDF2(24320)}
+ },
+ {
+ { AOM_CDF3(12272, 30172)},
+ { AOM_CDF3(12272, 30172)},
+ { AOM_CDF3(18677, 30848)}
+ },
+ {
+ { AOM_CDF3(12986, 15180)},
+ { AOM_CDF3(12986, 15180)},
+ { AOM_CDF3(24302, 25602)}
+ },
+ {
+ { AOM_CDF3(5782, 11475)},
+ { AOM_CDF3(5782, 11475)},
+ { AOM_CDF3(16803, 22759)}
+ },
+};
+
+static const u16 av1_default_dc_sign_cdfs[TOKEN_CDF_Q_CTXS]
+ [PLANE_TYPES][DC_SIGN_CONTEXTS][CDF_SIZE(2)] = {
+ {
+ {
+ { AOM_CDF2(128 * 125)},
+ { AOM_CDF2(128 * 102)},
+ { AOM_CDF2(128 * 147)},
+ },
+ {
+ { AOM_CDF2(128 * 119)},
+ { AOM_CDF2(128 * 101)},
+ { AOM_CDF2(128 * 135)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(128 * 125)},
+ { AOM_CDF2(128 * 102)},
+ { AOM_CDF2(128 * 147)},
+ },
+ {
+ { AOM_CDF2(128 * 119)},
+ { AOM_CDF2(128 * 101)},
+ { AOM_CDF2(128 * 135)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(128 * 125)},
+ { AOM_CDF2(128 * 102)},
+ { AOM_CDF2(128 * 147)},
+ },
+ {
+ { AOM_CDF2(128 * 119)},
+ { AOM_CDF2(128 * 101)},
+ { AOM_CDF2(128 * 135)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(128 * 125)},
+ { AOM_CDF2(128 * 102)},
+ { AOM_CDF2(128 * 147)},
+ },
+ {
+ { AOM_CDF2(128 * 119)},
+ { AOM_CDF2(128 * 101)},
+ { AOM_CDF2(128 * 135)},
+ }
+ },
+};
+
+static const u16 av1_default_txb_skip_cdfs[TOKEN_CDF_Q_CTXS]
+ [TX_SIZES][TXB_SKIP_CONTEXTS][CDF_SIZE(2)] = {
+ {
+ {
+ { AOM_CDF2(31849)},
+ { AOM_CDF2(5892)},
+ { AOM_CDF2(12112)},
+ { AOM_CDF2(21935)},
+ { AOM_CDF2(20289)},
+ { AOM_CDF2(27473)},
+ { AOM_CDF2(32487)},
+ { AOM_CDF2(7654)},
+ { AOM_CDF2(19473)},
+ { AOM_CDF2(29984)},
+ { AOM_CDF2(9961)},
+ { AOM_CDF2(30242)},
+ { AOM_CDF2(32117)}
+ },
+ {
+ { AOM_CDF2(31548)},
+ { AOM_CDF2(1549)},
+ { AOM_CDF2(10130)},
+ { AOM_CDF2(16656)},
+ { AOM_CDF2(18591)},
+ { AOM_CDF2(26308)},
+ { AOM_CDF2(32537)},
+ { AOM_CDF2(5403)},
+ { AOM_CDF2(18096)},
+ { AOM_CDF2(30003)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(29957)},
+ { AOM_CDF2(5391)},
+ { AOM_CDF2(18039)},
+ { AOM_CDF2(23566)},
+ { AOM_CDF2(22431)},
+ { AOM_CDF2(25822)},
+ { AOM_CDF2(32197)},
+ { AOM_CDF2(3778)},
+ { AOM_CDF2(15336)},
+ { AOM_CDF2(28981)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(17920)},
+ { AOM_CDF2(1818)},
+ { AOM_CDF2(7282)},
+ { AOM_CDF2(25273)},
+ { AOM_CDF2(10923)},
+ { AOM_CDF2(31554)},
+ { AOM_CDF2(32624)},
+ { AOM_CDF2(1366)},
+ { AOM_CDF2(15628)},
+ { AOM_CDF2(30462)},
+ { AOM_CDF2(146)},
+ { AOM_CDF2(5132)},
+ { AOM_CDF2(31657)}
+ },
+ {
+ { AOM_CDF2(6308)},
+ { AOM_CDF2(117)},
+ { AOM_CDF2(1638)},
+ { AOM_CDF2(2161)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(10923)},
+ { AOM_CDF2(30247)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ }
+ },
+ {
+ {
+ { AOM_CDF2(30371)},
+ { AOM_CDF2(7570)},
+ { AOM_CDF2(13155)},
+ { AOM_CDF2(20751)},
+ { AOM_CDF2(20969)},
+ { AOM_CDF2(27067)},
+ { AOM_CDF2(32013)},
+ { AOM_CDF2(5495)},
+ { AOM_CDF2(17942)},
+ { AOM_CDF2(28280)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(31782)},
+ { AOM_CDF2(1836)},
+ { AOM_CDF2(10689)},
+ { AOM_CDF2(17604)},
+ { AOM_CDF2(21622)},
+ { AOM_CDF2(27518)},
+ { AOM_CDF2(32399)},
+ { AOM_CDF2(4419)},
+ { AOM_CDF2(16294)},
+ { AOM_CDF2(28345)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(31901)},
+ { AOM_CDF2(10311)},
+ { AOM_CDF2(18047)},
+ { AOM_CDF2(24806)},
+ { AOM_CDF2(23288)},
+ { AOM_CDF2(27914)},
+ { AOM_CDF2(32296)},
+ { AOM_CDF2(4215)},
+ { AOM_CDF2(15756)},
+ { AOM_CDF2(28341)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(26726)},
+ { AOM_CDF2(1045)},
+ { AOM_CDF2(11703)},
+ { AOM_CDF2(20590)},
+ { AOM_CDF2(18554)},
+ { AOM_CDF2(25970)},
+ { AOM_CDF2(31938)},
+ { AOM_CDF2(5583)},
+ { AOM_CDF2(21313)},
+ { AOM_CDF2(29390)},
+ { AOM_CDF2(641)},
+ { AOM_CDF2(22265)},
+ { AOM_CDF2(31452)}
+ },
+ {
+ { AOM_CDF2(26584)},
+ { AOM_CDF2(188)},
+ { AOM_CDF2(8847)},
+ { AOM_CDF2(24519)},
+ { AOM_CDF2(22938)},
+ { AOM_CDF2(30583)},
+ { AOM_CDF2(32608)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ }
+ },
+ {
+ {
+ { AOM_CDF2(29614)},
+ { AOM_CDF2(9068)},
+ { AOM_CDF2(12924)},
+ { AOM_CDF2(19538)},
+ { AOM_CDF2(17737)},
+ { AOM_CDF2(24619)},
+ { AOM_CDF2(30642)},
+ { AOM_CDF2(4119)},
+ { AOM_CDF2(16026)},
+ { AOM_CDF2(25657)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(31957)},
+ { AOM_CDF2(3230)},
+ { AOM_CDF2(11153)},
+ { AOM_CDF2(18123)},
+ { AOM_CDF2(20143)},
+ { AOM_CDF2(26536)},
+ { AOM_CDF2(31986)},
+ { AOM_CDF2(3050)},
+ { AOM_CDF2(14603)},
+ { AOM_CDF2(25155)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(32363)},
+ { AOM_CDF2(10692)},
+ { AOM_CDF2(19090)},
+ { AOM_CDF2(24357)},
+ { AOM_CDF2(24442)},
+ { AOM_CDF2(28312)},
+ { AOM_CDF2(32169)},
+ { AOM_CDF2(3648)},
+ { AOM_CDF2(15690)},
+ { AOM_CDF2(26815)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(30669)},
+ { AOM_CDF2(3832)},
+ { AOM_CDF2(11663)},
+ { AOM_CDF2(18889)},
+ { AOM_CDF2(19782)},
+ { AOM_CDF2(23313)},
+ { AOM_CDF2(31330)},
+ { AOM_CDF2(5124)},
+ { AOM_CDF2(18719)},
+ { AOM_CDF2(28468)},
+ { AOM_CDF2(3082)},
+ { AOM_CDF2(20982)},
+ { AOM_CDF2(29443)}
+ },
+ {
+ { AOM_CDF2(28573)},
+ { AOM_CDF2(3183)},
+ { AOM_CDF2(17802)},
+ { AOM_CDF2(25977)},
+ { AOM_CDF2(26677)},
+ { AOM_CDF2(27832)},
+ { AOM_CDF2(32387)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ }
+ },
+ {
+ {
+ { AOM_CDF2(26887)},
+ { AOM_CDF2(6729)},
+ { AOM_CDF2(10361)},
+ { AOM_CDF2(17442)},
+ { AOM_CDF2(15045)},
+ { AOM_CDF2(22478)},
+ { AOM_CDF2(29072)},
+ { AOM_CDF2(2713)},
+ { AOM_CDF2(11861)},
+ { AOM_CDF2(20773)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(31903)},
+ { AOM_CDF2(2044)},
+ { AOM_CDF2(7528)},
+ { AOM_CDF2(14618)},
+ { AOM_CDF2(16182)},
+ { AOM_CDF2(24168)},
+ { AOM_CDF2(31037)},
+ { AOM_CDF2(2786)},
+ { AOM_CDF2(11194)},
+ { AOM_CDF2(20155)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(32510)},
+ { AOM_CDF2(8430)},
+ { AOM_CDF2(17318)},
+ { AOM_CDF2(24154)},
+ { AOM_CDF2(23674)},
+ { AOM_CDF2(28789)},
+ { AOM_CDF2(32139)},
+ { AOM_CDF2(3440)},
+ { AOM_CDF2(13117)},
+ { AOM_CDF2(22702)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ },
+ {
+ { AOM_CDF2(31671)},
+ { AOM_CDF2(2056)},
+ { AOM_CDF2(11746)},
+ { AOM_CDF2(16852)},
+ { AOM_CDF2(18635)},
+ { AOM_CDF2(24715)},
+ { AOM_CDF2(31484)},
+ { AOM_CDF2(4656)},
+ { AOM_CDF2(16074)},
+ { AOM_CDF2(24704)},
+ { AOM_CDF2(1806)},
+ { AOM_CDF2(14645)},
+ { AOM_CDF2(25336)}
+ },
+ {
+ { AOM_CDF2(31539)},
+ { AOM_CDF2(8433)},
+ { AOM_CDF2(20576)},
+ { AOM_CDF2(27904)},
+ { AOM_CDF2(27852)},
+ { AOM_CDF2(30026)},
+ { AOM_CDF2(32441)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)}
+ }
+ }
+};
+
+static const u16 av1_default_eob_extra_cdfs[TOKEN_CDF_Q_CTXS][TX_SIZES][PLANE_TYPES]
+ [EOB_COEF_CONTEXTS][CDF_SIZE(2)] = {
+ {
+ {
+ {
+ { AOM_CDF2(16961)},
+ { AOM_CDF2(17223)},
+ { AOM_CDF2(7621)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(19069)},
+ { AOM_CDF2(22525)},
+ { AOM_CDF2(13377)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(20401)},
+ { AOM_CDF2(17025)},
+ { AOM_CDF2(12845)},
+ { AOM_CDF2(12873)},
+ { AOM_CDF2(14094)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(20681)},
+ { AOM_CDF2(20701)},
+ { AOM_CDF2(15250)},
+ { AOM_CDF2(15017)},
+ { AOM_CDF2(14928)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(23905)},
+ { AOM_CDF2(17194)},
+ { AOM_CDF2(16170)},
+ { AOM_CDF2(17695)},
+ { AOM_CDF2(13826)},
+ { AOM_CDF2(15810)},
+ { AOM_CDF2(12036)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(23959)},
+ { AOM_CDF2(20799)},
+ { AOM_CDF2(19021)},
+ { AOM_CDF2(16203)},
+ { AOM_CDF2(17886)},
+ { AOM_CDF2(14144)},
+ { AOM_CDF2(12010)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(27399)},
+ { AOM_CDF2(16327)},
+ { AOM_CDF2(18071)},
+ { AOM_CDF2(19584)},
+ { AOM_CDF2(20721)},
+ { AOM_CDF2(18432)},
+ { AOM_CDF2(19560)},
+ { AOM_CDF2(10150)},
+ { AOM_CDF2(8805)},
+ },
+ {
+ { AOM_CDF2(24932)},
+ { AOM_CDF2(20833)},
+ { AOM_CDF2(12027)},
+ { AOM_CDF2(16670)},
+ { AOM_CDF2(19914)},
+ { AOM_CDF2(15106)},
+ { AOM_CDF2(17662)},
+ { AOM_CDF2(13783)},
+ { AOM_CDF2(28756)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(23406)},
+ { AOM_CDF2(21845)},
+ { AOM_CDF2(18432)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(17096)},
+ { AOM_CDF2(12561)},
+ { AOM_CDF2(17320)},
+ { AOM_CDF2(22395)},
+ { AOM_CDF2(21370)},
+ },
+ {
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF2(17471)},
+ { AOM_CDF2(20223)},
+ { AOM_CDF2(11357)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(20335)},
+ { AOM_CDF2(21667)},
+ { AOM_CDF2(14818)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(20430)},
+ { AOM_CDF2(20662)},
+ { AOM_CDF2(15367)},
+ { AOM_CDF2(16970)},
+ { AOM_CDF2(14657)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(22117)},
+ { AOM_CDF2(22028)},
+ { AOM_CDF2(18650)},
+ { AOM_CDF2(16042)},
+ { AOM_CDF2(15885)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(22409)},
+ { AOM_CDF2(21012)},
+ { AOM_CDF2(15650)},
+ { AOM_CDF2(17395)},
+ { AOM_CDF2(15469)},
+ { AOM_CDF2(20205)},
+ { AOM_CDF2(19511)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(24220)},
+ { AOM_CDF2(22480)},
+ { AOM_CDF2(17737)},
+ { AOM_CDF2(18916)},
+ { AOM_CDF2(19268)},
+ { AOM_CDF2(18412)},
+ { AOM_CDF2(18844)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(25991)},
+ { AOM_CDF2(20314)},
+ { AOM_CDF2(17731)},
+ { AOM_CDF2(19678)},
+ { AOM_CDF2(18649)},
+ { AOM_CDF2(17307)},
+ { AOM_CDF2(21798)},
+ { AOM_CDF2(17549)},
+ { AOM_CDF2(15630)},
+ },
+ {
+ { AOM_CDF2(26585)},
+ { AOM_CDF2(21469)},
+ { AOM_CDF2(20432)},
+ { AOM_CDF2(17735)},
+ { AOM_CDF2(19280)},
+ { AOM_CDF2(15235)},
+ { AOM_CDF2(20297)},
+ { AOM_CDF2(22471)},
+ { AOM_CDF2(28997)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(26605)},
+ { AOM_CDF2(11304)},
+ { AOM_CDF2(16726)},
+ { AOM_CDF2(16560)},
+ { AOM_CDF2(20866)},
+ { AOM_CDF2(23524)},
+ { AOM_CDF2(19878)},
+ { AOM_CDF2(13469)},
+ { AOM_CDF2(23084)},
+ },
+ {
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF2(18983)},
+ { AOM_CDF2(20512)},
+ { AOM_CDF2(14885)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(20090)},
+ { AOM_CDF2(19444)},
+ { AOM_CDF2(17286)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(19139)},
+ { AOM_CDF2(21487)},
+ { AOM_CDF2(18959)},
+ { AOM_CDF2(20910)},
+ { AOM_CDF2(19089)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(20536)},
+ { AOM_CDF2(20664)},
+ { AOM_CDF2(20625)},
+ { AOM_CDF2(19123)},
+ { AOM_CDF2(14862)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(19833)},
+ { AOM_CDF2(21502)},
+ { AOM_CDF2(17485)},
+ { AOM_CDF2(20267)},
+ { AOM_CDF2(18353)},
+ { AOM_CDF2(23329)},
+ { AOM_CDF2(21478)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(22041)},
+ { AOM_CDF2(23434)},
+ { AOM_CDF2(20001)},
+ { AOM_CDF2(20554)},
+ { AOM_CDF2(20951)},
+ { AOM_CDF2(20145)},
+ { AOM_CDF2(15562)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(23312)},
+ { AOM_CDF2(21607)},
+ { AOM_CDF2(16526)},
+ { AOM_CDF2(18957)},
+ { AOM_CDF2(18034)},
+ { AOM_CDF2(18934)},
+ { AOM_CDF2(24247)},
+ { AOM_CDF2(16921)},
+ { AOM_CDF2(17080)},
+ },
+ {
+ { AOM_CDF2(26579)},
+ { AOM_CDF2(24910)},
+ { AOM_CDF2(18637)},
+ { AOM_CDF2(19800)},
+ { AOM_CDF2(20388)},
+ { AOM_CDF2(9887)},
+ { AOM_CDF2(15642)},
+ { AOM_CDF2(30198)},
+ { AOM_CDF2(24721)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(26998)},
+ { AOM_CDF2(16737)},
+ { AOM_CDF2(17838)},
+ { AOM_CDF2(18922)},
+ { AOM_CDF2(19515)},
+ { AOM_CDF2(18636)},
+ { AOM_CDF2(17333)},
+ { AOM_CDF2(15776)},
+ { AOM_CDF2(22658)},
+ },
+ {
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF2(20177)},
+ { AOM_CDF2(20789)},
+ { AOM_CDF2(20262)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(21416)},
+ { AOM_CDF2(20855)},
+ { AOM_CDF2(23410)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(20238)},
+ { AOM_CDF2(21057)},
+ { AOM_CDF2(19159)},
+ { AOM_CDF2(22337)},
+ { AOM_CDF2(20159)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(20125)},
+ { AOM_CDF2(20559)},
+ { AOM_CDF2(21707)},
+ { AOM_CDF2(22296)},
+ { AOM_CDF2(17333)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(19941)},
+ { AOM_CDF2(20527)},
+ { AOM_CDF2(21470)},
+ { AOM_CDF2(22487)},
+ { AOM_CDF2(19558)},
+ { AOM_CDF2(22354)},
+ { AOM_CDF2(20331)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ },
+ {
+ { AOM_CDF2(22752)},
+ { AOM_CDF2(25006)},
+ { AOM_CDF2(22075)},
+ { AOM_CDF2(21576)},
+ { AOM_CDF2(17740)},
+ { AOM_CDF2(21690)},
+ { AOM_CDF2(19211)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(21442)},
+ { AOM_CDF2(22358)},
+ { AOM_CDF2(18503)},
+ { AOM_CDF2(20291)},
+ { AOM_CDF2(19945)},
+ { AOM_CDF2(21294)},
+ { AOM_CDF2(21178)},
+ { AOM_CDF2(19400)},
+ { AOM_CDF2(10556)},
+ },
+ {
+ { AOM_CDF2(24648)},
+ { AOM_CDF2(24949)},
+ { AOM_CDF2(20708)},
+ { AOM_CDF2(23905)},
+ { AOM_CDF2(20501)},
+ { AOM_CDF2(9558)},
+ { AOM_CDF2(9423)},
+ { AOM_CDF2(30365)},
+ { AOM_CDF2(19253)},
+ }
+ },
+ {
+ {
+ { AOM_CDF2(26064)},
+ { AOM_CDF2(22098)},
+ { AOM_CDF2(19613)},
+ { AOM_CDF2(20525)},
+ { AOM_CDF2(17595)},
+ { AOM_CDF2(16618)},
+ { AOM_CDF2(20497)},
+ { AOM_CDF2(18989)},
+ { AOM_CDF2(15513)},
+ },
+ {
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ { AOM_CDF2(16384)},
+ }
+ }
+ }
+};
+
+static const u16 av1_default_eob_multi16_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][4] = {
+ {
+ {
+ { AOM_CDF5(840, 1039, 1980, 4895)},
+ { AOM_CDF5(370, 671, 1883, 4471)}
+ },
+ {
+ { AOM_CDF5(3247, 4950, 9688, 14563)},
+ { AOM_CDF5(1904, 3354, 7763, 14647)}
+ }
+ },
+ {
+ {
+ { AOM_CDF5(2125, 2551, 5165, 8946)},
+ { AOM_CDF5(513, 765, 1859, 6339)}
+ },
+ {
+ { AOM_CDF5(7637, 9498, 14259, 19108)},
+ { AOM_CDF5(2497, 4096, 8866, 16993)}
+ }
+ },
+ {
+ {
+ { AOM_CDF5(4016, 4897, 8881, 14968)},
+ { AOM_CDF5(716, 1105, 2646, 10056)}
+ },
+ {
+ { AOM_CDF5(11139, 13270, 18241, 23566)},
+ { AOM_CDF5(3192, 5032, 10297, 19755)}
+ }
+ },
+ {
+ {
+ { AOM_CDF5(6708, 8958, 14746, 22133)},
+ { AOM_CDF5(1222, 2074, 4783, 15410)}
+ },
+ {
+ { AOM_CDF5(19575, 21766, 26044, 29709)},
+ { AOM_CDF5(7297, 10767, 19273, 28194)}
+ }
+ }
+};
+
+static const u16 av1_default_eob_multi32_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][8] = {
+ {
+ {
+ { AOM_CDF6(400, 520, 977, 2102, 6542)},
+ { AOM_CDF6(210, 405, 1315, 3326, 7537)}
+ },
+ {
+ { AOM_CDF6(2636, 4273, 7588, 11794, 20401)},
+ { AOM_CDF6(1786, 3179, 6902, 11357, 19054)}
+ }
+ },
+ {
+ {
+ { AOM_CDF6(989, 1249, 2019, 4151, 10785)},
+ { AOM_CDF6(313, 441, 1099, 2917, 8562)}
+ },
+ {
+ { AOM_CDF6(8394, 10352, 13932, 18855, 26014)},
+ { AOM_CDF6(2578, 4124, 8181, 13670, 24234)}
+ }
+ },
+ {
+ {
+ { AOM_CDF6(2515, 3003, 4452, 8162, 16041)},
+ { AOM_CDF6(574, 821, 1836, 5089, 13128)}
+ },
+ {
+ { AOM_CDF6(13468, 16303, 20361, 25105, 29281)},
+ { AOM_CDF6(3542, 5502, 10415, 16760, 25644)}
+ }
+ },
+ {
+ {
+ { AOM_CDF6(4617, 5709, 8446, 13584, 23135)},
+ { AOM_CDF6(1156, 1702, 3675, 9274, 20539)}
+ },
+ {
+ { AOM_CDF6(22086, 24282, 27010, 29770, 31743)},
+ { AOM_CDF6(7699, 10897, 20891, 26926, 31628)}
+ }
+ }
+};
+
+static const u16 av1_default_eob_multi64_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][8] = {
+ {
+ {
+ { AOM_CDF7(329, 498, 1101, 1784, 3265, 7758)},
+ { AOM_CDF7(335, 730, 1459, 5494, 8755, 12997)}
+ },
+ {
+ { AOM_CDF7(3505, 5304, 10086, 13814, 17684, 23370)},
+ { AOM_CDF7(1563, 2700, 4876, 10911, 14706, 22480)}
+ }
+ },
+ {
+ {
+ { AOM_CDF7(1260, 1446, 2253, 3712, 6652, 13369)},
+ { AOM_CDF7(401, 605, 1029, 2563, 5845, 12626)}
+ },
+ {
+ { AOM_CDF7(8609, 10612, 14624, 18714, 22614, 29024)},
+ { AOM_CDF7(1923, 3127, 5867, 9703, 14277, 27100)}
+ }
+ },
+ {
+ {
+ { AOM_CDF7(2374, 2772, 4583, 7276, 12288, 19706)},
+ { AOM_CDF7(497, 810, 1315, 3000, 7004, 15641)}
+ },
+ {
+ { AOM_CDF7(15050, 17126, 21410, 24886, 28156, 30726)},
+ { AOM_CDF7(4034, 6290, 10235, 14982, 21214, 28491)}
+ }
+ },
+ {
+ {
+ { AOM_CDF7(6307, 7541, 12060, 16358, 22553, 27865)},
+ { AOM_CDF7(1289, 2320, 3971, 7926, 14153, 24291)}
+ },
+ {
+ { AOM_CDF7(24212, 25708, 28268, 30035, 31307, 32049)},
+ { AOM_CDF7(8726, 12378, 19409, 26450, 30038, 32462)}
+ }
+ }
+};
+
+static const u16 av1_default_eob_multi128_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][8] = {
+ {
+ {
+ { AOM_CDF8(219, 482, 1140, 2091, 3680, 6028, 12586)},
+ { AOM_CDF8(371, 699, 1254, 4830, 9479, 12562, 17497)}
+ },
+ {
+ { AOM_CDF8(5245, 7456, 12880, 15852, 20033, 23932, 27608)},
+ { AOM_CDF8(2054, 3472, 5869, 14232, 18242, 20590, 26752)}
+ }
+ },
+ {
+ {
+ { AOM_CDF8(685, 933, 1488, 2714, 4766, 8562, 19254)},
+ { AOM_CDF8(217, 352, 618, 2303, 5261, 9969, 17472)}
+ },
+ {
+ { AOM_CDF8(8045, 11200, 15497, 19595, 23948, 27408, 30938)},
+ { AOM_CDF8(2310, 4160, 7471, 14997, 17931, 20768, 30240)}
+ }
+ },
+ {
+ {
+ { AOM_CDF8(1366, 1738, 2527, 5016, 9355, 15797, 24643)},
+ { AOM_CDF8(354, 558, 944, 2760, 7287, 14037, 21779)}
+ },
+ {
+ { AOM_CDF8(13627, 16246, 20173, 24429, 27948, 30415, 31863)},
+ { AOM_CDF8(6275, 9889, 14769, 23164, 27988, 30493, 32272)}
+ }
+ },
+ {
+ {
+ { AOM_CDF8(3472, 4885, 7489, 12481, 18517, 24536, 29635)},
+ { AOM_CDF8(886, 1731, 3271, 8469, 15569, 22126, 28383)}
+ },
+ {
+ { AOM_CDF8(24313, 26062, 28385, 30107, 31217, 31898, 32345)},
+ { AOM_CDF8(9165, 13282, 21150, 30286, 31894, 32571, 32712)}
+ }
+ }
+};
+
+static const u16 av1_default_eob_multi256_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][8] = {
+ {
+ {
+ { AOM_CDF9(310, 584, 1887, 3589, 6168, 8611, 11352, 15652)},
+ { AOM_CDF9(998, 1850, 2998, 5604, 17341, 19888, 22899, 25583)}
+ },
+ {
+ { AOM_CDF9(2520, 3240, 5952, 8870, 12577, 17558, 19954, 24168)},
+ { AOM_CDF9(2203, 4130, 7435, 10739, 20652, 23681, 25609, 27261)}
+ }
+ },
+ {
+ {
+ { AOM_CDF9(1448, 2109, 4151, 6263, 9329, 13260, 17944, 23300)},
+ { AOM_CDF9(399, 1019, 1749, 3038, 10444, 15546, 22739, 27294)}
+ },
+ {
+ { AOM_CDF9(6402, 8148, 12623, 15072, 18728, 22847, 26447, 29377)},
+ { AOM_CDF9(1674, 3252, 5734, 10159, 22397, 23802, 24821, 30940)}
+ }
+ },
+ {
+ {
+ { AOM_CDF9(3089, 3920, 6038, 9460, 14266, 19881, 25766, 29176)},
+ { AOM_CDF9(1084, 2358, 3488, 5122, 11483, 18103, 26023, 29799)}
+ },
+ {
+ { AOM_CDF9(11514, 13794, 17480, 20754, 24361, 27378, 29492, 31277)},
+ { AOM_CDF9(6571, 9610, 15516, 21826, 29092, 30829, 31842, 32708)}
+ }
+ },
+ {
+ {
+ { AOM_CDF9(5348, 7113, 11820, 15924, 22106, 26777, 30334, 31757)},
+ { AOM_CDF9(2453, 4474, 6307, 8777, 16474, 22975, 29000, 31547)}
+ },
+ {
+ { AOM_CDF9(23110, 24597, 27140, 28894, 30167, 30927, 31392, 32094)},
+ { AOM_CDF9(9998, 17661, 25178, 28097, 31308, 32038, 32403, 32695)}
+ }
+ }
+};
+
+static const u16 av1_default_eob_multi512_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][16] = {
+ {
+ {
+ { AOM_CDF10(641, 983, 3707, 5430, 10234, 14958, 18788, 23412, 26061)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ },
+ {
+ { AOM_CDF10(5095, 6446, 9996, 13354, 16017, 17986, 20919, 26129, 29140)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ }
+ },
+ {
+ {
+ { AOM_CDF10(1230, 2278, 5035, 7776, 11871, 15346, 19590, 24584, 28749)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ },
+ {
+ { AOM_CDF10(7265, 9979, 15819, 19250, 21780, 23846, 26478, 28396, 31811)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ }
+ },
+ {
+ {
+ { AOM_CDF10(2624, 3936, 6480, 9686, 13979, 17726, 23267, 28410, 31078)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ },
+ {
+ { AOM_CDF10(12015, 14769, 19588, 22052, 24222, 25812, 27300, 29219, 32114)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ }
+ },
+ {
+ {
+ { AOM_CDF10(5927, 7809, 10923, 14597, 19439, 24135, 28456, 31142, 32060)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ },
+ {
+ { AOM_CDF10(21093, 23043, 25742, 27658, 29097, 29716, 30073, 30820, 31956)},
+ { AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
+ }
+ }
+};
+
+static const u16 av1_default_eob_multi1024_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][16] = {
+ {
+ {
+ { AOM_CDF11(393, 421, 751, 1623, 3160,
+ 6352, 13345, 18047, 22571, 25830)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ },
+ {
+ { AOM_CDF11(1865, 1988, 2930, 4242, 10533,
+ 16538, 21354, 27255, 28546, 31784)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ }
+ },
+ {
+ {
+ { AOM_CDF11(696, 948, 3145, 5702, 9706,
+ 13217, 17851, 21856, 25692, 28034)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ },
+ {
+ { AOM_CDF11(2672, 3591, 9330, 17084, 22725,
+ 24284, 26527, 28027, 28377, 30876)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ }
+ },
+ {
+ {
+ { AOM_CDF11(2784, 3831, 7041, 10521, 14847,
+ 18844, 23155, 26682, 29229, 31045)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ },
+ {
+ { AOM_CDF11(9577, 12466, 17739, 20750, 22061,
+ 23215, 24601, 25483, 25843, 32056)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ }
+ },
+ {
+ {
+ { AOM_CDF11(6698, 8334, 11961, 15762, 20186,
+ 23862, 27434, 29326, 31082, 32050)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ },
+ {
+ { AOM_CDF11(20569, 22426, 25569, 26859, 28053,
+ 28913, 29486, 29724, 29807, 32570)},
+ { AOM_CDF11(2979, 5958, 8937, 11916, 14895,
+ 17873, 20852, 23831, 26810, 29789)}
+ }
+ }
+};
+
+static const u16 av1_default_coeff_lps_multi_cdfs[TOKEN_CDF_Q_CTXS]
+ [TX_SIZES][PLANE_TYPES][LEVEL_CONTEXTS][CDF_SIZE(BR_CDF_SIZE) + 1] = {
+ {
+ {
+ {
+ { AOM_CDF4(14298, 20718, 24174)}, { AOM_CDF4(12536, 19601, 23789)},
+ { AOM_CDF4(8712, 15051, 19503)}, { AOM_CDF4(6170, 11327, 15434)},
+ { AOM_CDF4(4742, 8926, 12538)}, { AOM_CDF4(3803, 7317, 10546)},
+ { AOM_CDF4(1696, 3317, 4871)}, { AOM_CDF4(14392, 19951, 22756)},
+ { AOM_CDF4(15978, 23218, 26818)}, { AOM_CDF4(12187, 19474, 23889)},
+ { AOM_CDF4(9176, 15640, 20259)}, { AOM_CDF4(7068, 12655, 17028)},
+ { AOM_CDF4(5656, 10442, 14472)}, { AOM_CDF4(2580, 4992, 7244)},
+ { AOM_CDF4(12136, 18049, 21426)}, { AOM_CDF4(13784, 20721, 24481)},
+ { AOM_CDF4(10836, 17621, 21900)}, { AOM_CDF4(8372, 14444, 18847)},
+ { AOM_CDF4(6523, 11779, 16000)}, { AOM_CDF4(5337, 9898, 13760)},
+ { AOM_CDF4(3034, 5860, 8462)}
+ },
+ {
+ { AOM_CDF4(15967, 22905, 26286)}, { AOM_CDF4(13534, 20654, 24579)},
+ { AOM_CDF4(9504, 16092, 20535)}, { AOM_CDF4(6975, 12568, 16903)},
+ { AOM_CDF4(5364, 10091, 14020)}, { AOM_CDF4(4357, 8370, 11857)},
+ { AOM_CDF4(2506, 4934, 7218)}, { AOM_CDF4(23032, 28815, 30936)},
+ { AOM_CDF4(19540, 26704, 29719)}, { AOM_CDF4(15158, 22969, 27097)},
+ { AOM_CDF4(11408, 18865, 23650)}, { AOM_CDF4(8885, 15448, 20250)},
+ { AOM_CDF4(7108, 12853, 17416)}, { AOM_CDF4(4231, 8041, 11480)},
+ { AOM_CDF4(19823, 26490, 29156)}, { AOM_CDF4(18890, 25929, 28932)},
+ { AOM_CDF4(15660, 23491, 27433)}, { AOM_CDF4(12147, 19776, 24488)},
+ { AOM_CDF4(9728, 16774, 21649)}, { AOM_CDF4(7919, 14277, 19066)},
+ { AOM_CDF4(5440, 10170, 14185)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(14406, 20862, 24414)}, { AOM_CDF4(11824, 18907, 23109)},
+ { AOM_CDF4(8257, 14393, 18803)}, { AOM_CDF4(5860, 10747, 14778)},
+ { AOM_CDF4(4475, 8486, 11984)}, { AOM_CDF4(3606, 6954, 10043)},
+ { AOM_CDF4(1736, 3410, 5048)}, { AOM_CDF4(14430, 20046, 22882)},
+ { AOM_CDF4(15593, 22899, 26709)}, { AOM_CDF4(12102, 19368, 23811)},
+ { AOM_CDF4(9059, 15584, 20262)}, { AOM_CDF4(6999, 12603, 17048)},
+ { AOM_CDF4(5684, 10497, 14553)}, { AOM_CDF4(2822, 5438, 7862)},
+ { AOM_CDF4(15785, 21585, 24359)}, { AOM_CDF4(18347, 25229, 28266)},
+ { AOM_CDF4(14974, 22487, 26389)}, { AOM_CDF4(11423, 18681, 23271)},
+ { AOM_CDF4(8863, 15350, 20008)}, { AOM_CDF4(7153, 12852, 17278)},
+ { AOM_CDF4(3707, 7036, 9982)}
+ },
+ {
+ { AOM_CDF4(15460, 21696, 25469)}, { AOM_CDF4(12170, 19249, 23191)},
+ { AOM_CDF4(8723, 15027, 19332)}, { AOM_CDF4(6428, 11704, 15874)},
+ { AOM_CDF4(4922, 9292, 13052)}, { AOM_CDF4(4139, 7695, 11010)},
+ { AOM_CDF4(2291, 4508, 6598)}, { AOM_CDF4(19856, 26920, 29828)},
+ { AOM_CDF4(17923, 25289, 28792)}, { AOM_CDF4(14278, 21968, 26297)},
+ { AOM_CDF4(10910, 18136, 22950)}, { AOM_CDF4(8423, 14815, 19627)},
+ { AOM_CDF4(6771, 12283, 16774)}, { AOM_CDF4(4074, 7750, 11081)},
+ { AOM_CDF4(19852, 26074, 28672)}, { AOM_CDF4(19371, 26110, 28989)},
+ { AOM_CDF4(16265, 23873, 27663)}, { AOM_CDF4(12758, 20378, 24952)},
+ { AOM_CDF4(10095, 17098, 21961)}, { AOM_CDF4(8250, 14628, 19451)},
+ { AOM_CDF4(5205, 9745, 13622)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(10563, 16233, 19763)}, { AOM_CDF4(9794, 16022, 19804)},
+ { AOM_CDF4(6750, 11945, 15759)}, { AOM_CDF4(4963, 9186, 12752)},
+ { AOM_CDF4(3845, 7435, 10627)}, { AOM_CDF4(3051, 6085, 8834)},
+ { AOM_CDF4(1311, 2596, 3830)}, { AOM_CDF4(11246, 16404, 19689)},
+ { AOM_CDF4(12315, 18911, 22731)}, { AOM_CDF4(10557, 17095, 21289)},
+ { AOM_CDF4(8136, 14006, 18249)}, { AOM_CDF4(6348, 11474, 15565)},
+ { AOM_CDF4(5196, 9655, 13400)}, { AOM_CDF4(2349, 4526, 6587)},
+ { AOM_CDF4(13337, 18730, 21569)}, { AOM_CDF4(19306, 26071, 28882)},
+ { AOM_CDF4(15952, 23540, 27254)}, { AOM_CDF4(12409, 19934, 24430)},
+ { AOM_CDF4(9760, 16706, 21389)}, { AOM_CDF4(8004, 14220, 18818)},
+ { AOM_CDF4(4138, 7794, 10961)}
+ },
+ {
+ { AOM_CDF4(10870, 16684, 20949)}, { AOM_CDF4(9664, 15230, 18680)},
+ { AOM_CDF4(6886, 12109, 15408)}, { AOM_CDF4(4825, 8900, 12305)},
+ { AOM_CDF4(3630, 7162, 10314)}, { AOM_CDF4(3036, 6429, 9387)},
+ { AOM_CDF4(1671, 3296, 4940)}, { AOM_CDF4(13819, 19159, 23026)},
+ { AOM_CDF4(11984, 19108, 23120)}, { AOM_CDF4(10690, 17210, 21663)},
+ { AOM_CDF4(7984, 14154, 18333)}, { AOM_CDF4(6868, 12294, 16124)},
+ { AOM_CDF4(5274, 8994, 12868)}, { AOM_CDF4(2988, 5771, 8424)},
+ { AOM_CDF4(19736, 26647, 29141)}, { AOM_CDF4(18933, 26070, 28984)},
+ { AOM_CDF4(15779, 23048, 27200)}, { AOM_CDF4(12638, 20061, 24532)},
+ { AOM_CDF4(10692, 17545, 22220)}, { AOM_CDF4(9217, 15251, 20054)},
+ { AOM_CDF4(5078, 9284, 12594)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(2331, 3662, 5244)}, { AOM_CDF4(2891, 4771, 6145)},
+ { AOM_CDF4(4598, 7623, 9729)}, { AOM_CDF4(3520, 6845, 9199)},
+ { AOM_CDF4(3417, 6119, 9324)}, { AOM_CDF4(2601, 5412, 7385)},
+ { AOM_CDF4(600, 1173, 1744)}, { AOM_CDF4(7672, 13286, 17469)},
+ { AOM_CDF4(4232, 7792, 10793)}, { AOM_CDF4(2915, 5317, 7397)},
+ { AOM_CDF4(2318, 4356, 6152)}, { AOM_CDF4(2127, 4000, 5554)},
+ { AOM_CDF4(1850, 3478, 5275)}, { AOM_CDF4(977, 1933, 2843)},
+ { AOM_CDF4(18280, 24387, 27989)}, { AOM_CDF4(15852, 22671, 26185)},
+ { AOM_CDF4(13845, 20951, 24789)}, { AOM_CDF4(11055, 17966, 22129)},
+ { AOM_CDF4(9138, 15422, 19801)}, { AOM_CDF4(7454, 13145, 17456)},
+ { AOM_CDF4(3370, 6393, 9013)}
+ },
+ {
+ { AOM_CDF4(5842, 9229, 10838)}, { AOM_CDF4(2313, 3491, 4276)},
+ { AOM_CDF4(2998, 6104, 7496)}, { AOM_CDF4(2420, 7447, 9868)},
+ { AOM_CDF4(3034, 8495, 10923)}, { AOM_CDF4(4076, 8937, 10975)},
+ { AOM_CDF4(1086, 2370, 3299)}, { AOM_CDF4(9714, 17254, 20444)},
+ { AOM_CDF4(8543, 13698, 17123)}, { AOM_CDF4(4918, 9007, 11910)},
+ { AOM_CDF4(4129, 7532, 10553)}, { AOM_CDF4(2364, 5533, 8058)},
+ { AOM_CDF4(1834, 3546, 5563)}, { AOM_CDF4(1473, 2908, 4133)},
+ { AOM_CDF4(15405, 21193, 25619)}, { AOM_CDF4(15691, 21952, 26561)},
+ { AOM_CDF4(12962, 19194, 24165)}, { AOM_CDF4(10272, 17855, 22129)},
+ { AOM_CDF4(8588, 15270, 20718)}, { AOM_CDF4(8682, 14669, 19500)},
+ { AOM_CDF4(4870, 9636, 13205)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF4(14995, 21341, 24749)}, { AOM_CDF4(13158, 20289, 24601)},
+ { AOM_CDF4(8941, 15326, 19876)}, { AOM_CDF4(6297, 11541, 15807)},
+ { AOM_CDF4(4817, 9029, 12776)}, { AOM_CDF4(3731, 7273, 10627)},
+ { AOM_CDF4(1847, 3617, 5354)}, { AOM_CDF4(14472, 19659, 22343)},
+ { AOM_CDF4(16806, 24162, 27533)}, { AOM_CDF4(12900, 20404, 24713)},
+ { AOM_CDF4(9411, 16112, 20797)}, { AOM_CDF4(7056, 12697, 17148)},
+ { AOM_CDF4(5544, 10339, 14460)}, { AOM_CDF4(2954, 5704, 8319)},
+ { AOM_CDF4(12464, 18071, 21354)}, { AOM_CDF4(15482, 22528, 26034)},
+ { AOM_CDF4(12070, 19269, 23624)}, { AOM_CDF4(8953, 15406, 20106)},
+ { AOM_CDF4(7027, 12730, 17220)}, { AOM_CDF4(5887, 10913, 15140)},
+ { AOM_CDF4(3793, 7278, 10447)}
+ },
+ {
+ { AOM_CDF4(15571, 22232, 25749)}, { AOM_CDF4(14506, 21575, 25374)},
+ { AOM_CDF4(10189, 17089, 21569)}, { AOM_CDF4(7316, 13301, 17915)},
+ { AOM_CDF4(5783, 10912, 15190)}, { AOM_CDF4(4760, 9155, 13088)},
+ { AOM_CDF4(2993, 5966, 8774)}, { AOM_CDF4(23424, 28903, 30778)},
+ { AOM_CDF4(20775, 27666, 30290)}, { AOM_CDF4(16474, 24410, 28299)},
+ { AOM_CDF4(12471, 20180, 24987)}, { AOM_CDF4(9410, 16487, 21439)},
+ { AOM_CDF4(7536, 13614, 18529)}, { AOM_CDF4(5048, 9586, 13549)},
+ { AOM_CDF4(21090, 27290, 29756)}, { AOM_CDF4(20796, 27402, 30026)},
+ { AOM_CDF4(17819, 25485, 28969)}, { AOM_CDF4(13860, 21909, 26462)},
+ { AOM_CDF4(11002, 18494, 23529)}, { AOM_CDF4(8953, 15929, 20897)},
+ { AOM_CDF4(6448, 11918, 16454)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(15999, 22208, 25449)}, { AOM_CDF4(13050, 19988, 24122)},
+ { AOM_CDF4(8594, 14864, 19378)}, { AOM_CDF4(6033, 11079, 15238)},
+ { AOM_CDF4(4554, 8683, 12347)}, { AOM_CDF4(3672, 7139, 10337)},
+ { AOM_CDF4(1900, 3771, 5576)}, { AOM_CDF4(15788, 21340, 23949)},
+ { AOM_CDF4(16825, 24235, 27758)}, { AOM_CDF4(12873, 20402, 24810)},
+ { AOM_CDF4(9590, 16363, 21094)}, { AOM_CDF4(7352, 13209, 17733)},
+ { AOM_CDF4(5960, 10989, 15184)}, { AOM_CDF4(3232, 6234, 9007)},
+ { AOM_CDF4(15761, 20716, 23224)}, { AOM_CDF4(19318, 25989, 28759)},
+ { AOM_CDF4(15529, 23094, 26929)}, { AOM_CDF4(11662, 18989, 23641)},
+ { AOM_CDF4(8955, 15568, 20366)}, { AOM_CDF4(7281, 13106, 17708)},
+ { AOM_CDF4(4248, 8059, 11440)}
+ },
+ {
+ { AOM_CDF4(14899, 21217, 24503)}, { AOM_CDF4(13519, 20283, 24047)},
+ { AOM_CDF4(9429, 15966, 20365)}, { AOM_CDF4(6700, 12355, 16652)},
+ { AOM_CDF4(5088, 9704, 13716)}, { AOM_CDF4(4243, 8154, 11731)},
+ { AOM_CDF4(2702, 5364, 7861)}, { AOM_CDF4(22745, 28388, 30454)},
+ { AOM_CDF4(20235, 27146, 29922)}, { AOM_CDF4(15896, 23715, 27637)},
+ { AOM_CDF4(11840, 19350, 24131)}, { AOM_CDF4(9122, 15932, 20880)},
+ { AOM_CDF4(7488, 13581, 18362)}, { AOM_CDF4(5114, 9568, 13370)},
+ { AOM_CDF4(20845, 26553, 28932)}, { AOM_CDF4(20981, 27372, 29884)},
+ { AOM_CDF4(17781, 25335, 28785)}, { AOM_CDF4(13760, 21708, 26297)},
+ { AOM_CDF4(10975, 18415, 23365)}, { AOM_CDF4(9045, 15789, 20686)},
+ { AOM_CDF4(6130, 11199, 15423)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(13549, 19724, 23158)}, { AOM_CDF4(11844, 18382, 22246)},
+ { AOM_CDF4(7919, 13619, 17773)}, { AOM_CDF4(5486, 10143, 13946)},
+ { AOM_CDF4(4166, 7983, 11324)}, { AOM_CDF4(3364, 6506, 9427)},
+ { AOM_CDF4(1598, 3160, 4674)}, { AOM_CDF4(15281, 20979, 23781)},
+ { AOM_CDF4(14939, 22119, 25952)}, { AOM_CDF4(11363, 18407, 22812)},
+ { AOM_CDF4(8609, 14857, 19370)}, { AOM_CDF4(6737, 12184, 16480)},
+ { AOM_CDF4(5506, 10263, 14262)}, { AOM_CDF4(2990, 5786, 8380)},
+ { AOM_CDF4(20249, 25253, 27417)}, { AOM_CDF4(21070, 27518, 30001)},
+ { AOM_CDF4(16854, 24469, 28074)}, { AOM_CDF4(12864, 20486, 25000)},
+ { AOM_CDF4(9962, 16978, 21778)}, { AOM_CDF4(8074, 14338, 19048)},
+ { AOM_CDF4(4494, 8479, 11906)}
+ },
+ {
+ { AOM_CDF4(13960, 19617, 22829)}, { AOM_CDF4(11150, 17341, 21228)},
+ { AOM_CDF4(7150, 12964, 17190)}, { AOM_CDF4(5331, 10002, 13867)},
+ { AOM_CDF4(4167, 7744, 11057)}, { AOM_CDF4(3480, 6629, 9646)},
+ { AOM_CDF4(1883, 3784, 5686)}, { AOM_CDF4(18752, 25660, 28912)},
+ { AOM_CDF4(16968, 24586, 28030)}, { AOM_CDF4(13520, 21055, 25313)},
+ { AOM_CDF4(10453, 17626, 22280)}, { AOM_CDF4(8386, 14505, 19116)},
+ { AOM_CDF4(6742, 12595, 17008)}, { AOM_CDF4(4273, 8140, 11499)},
+ { AOM_CDF4(22120, 27827, 30233)}, { AOM_CDF4(20563, 27358, 29895)},
+ { AOM_CDF4(17076, 24644, 28153)}, { AOM_CDF4(13362, 20942, 25309)},
+ { AOM_CDF4(10794, 17965, 22695)}, { AOM_CDF4(9014, 15652, 20319)},
+ { AOM_CDF4(5708, 10512, 14497)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(5705, 10930, 15725)}, { AOM_CDF4(7946, 12765, 16115)},
+ { AOM_CDF4(6801, 12123, 16226)}, { AOM_CDF4(5462, 10135, 14200)},
+ { AOM_CDF4(4189, 8011, 11507)}, { AOM_CDF4(3191, 6229, 9408)},
+ { AOM_CDF4(1057, 2137, 3212)}, { AOM_CDF4(10018, 17067, 21491)},
+ { AOM_CDF4(7380, 12582, 16453)}, { AOM_CDF4(6068, 10845, 14339)},
+ { AOM_CDF4(5098, 9198, 12555)}, { AOM_CDF4(4312, 8010, 11119)},
+ { AOM_CDF4(3700, 6966, 9781)}, { AOM_CDF4(1693, 3326, 4887)},
+ { AOM_CDF4(18757, 24930, 27774)}, { AOM_CDF4(17648, 24596, 27817)},
+ { AOM_CDF4(14707, 22052, 26026)}, { AOM_CDF4(11720, 18852, 23292)},
+ { AOM_CDF4(9357, 15952, 20525)}, { AOM_CDF4(7810, 13753, 18210)},
+ { AOM_CDF4(3879, 7333, 10328)}
+ },
+ {
+ { AOM_CDF4(8278, 13242, 15922)}, { AOM_CDF4(10547, 15867, 18919)},
+ { AOM_CDF4(9106, 15842, 20609)}, { AOM_CDF4(6833, 13007, 17218)},
+ { AOM_CDF4(4811, 9712, 13923)}, { AOM_CDF4(3985, 7352, 11128)},
+ { AOM_CDF4(1688, 3458, 5262)}, { AOM_CDF4(12951, 21861, 26510)},
+ { AOM_CDF4(9788, 16044, 20276)}, { AOM_CDF4(6309, 11244, 14870)},
+ { AOM_CDF4(5183, 9349, 12566)}, { AOM_CDF4(4389, 8229, 11492)},
+ { AOM_CDF4(3633, 6945, 10620)}, { AOM_CDF4(3600, 6847, 9907)},
+ { AOM_CDF4(21748, 28137, 30255)}, { AOM_CDF4(19436, 26581, 29560)},
+ { AOM_CDF4(16359, 24201, 27953)}, { AOM_CDF4(13961, 21693, 25871)},
+ { AOM_CDF4(11544, 18686, 23322)}, { AOM_CDF4(9372, 16462, 20952)},
+ { AOM_CDF4(6138, 11210, 15390)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF4(16138, 22223, 25509)}, { AOM_CDF4(15347, 22430, 26332)},
+ { AOM_CDF4(9614, 16736, 21332)}, { AOM_CDF4(6600, 12275, 16907)},
+ { AOM_CDF4(4811, 9424, 13547)}, { AOM_CDF4(3748, 7809, 11420)},
+ { AOM_CDF4(2254, 4587, 6890)}, { AOM_CDF4(15196, 20284, 23177)},
+ { AOM_CDF4(18317, 25469, 28451)}, { AOM_CDF4(13918, 21651, 25842)},
+ { AOM_CDF4(10052, 17150, 21995)}, { AOM_CDF4(7499, 13630, 18587)},
+ { AOM_CDF4(6158, 11417, 16003)}, { AOM_CDF4(4014, 7785, 11252)},
+ { AOM_CDF4(15048, 21067, 24384)}, { AOM_CDF4(18202, 25346, 28553)},
+ { AOM_CDF4(14302, 22019, 26356)}, { AOM_CDF4(10839, 18139, 23166)},
+ { AOM_CDF4(8715, 15744, 20806)}, { AOM_CDF4(7536, 13576, 18544)},
+ { AOM_CDF4(5413, 10335, 14498)}
+ },
+ {
+ { AOM_CDF4(17394, 24501, 27895)}, { AOM_CDF4(15889, 23420, 27185)},
+ { AOM_CDF4(11561, 19133, 23870)}, { AOM_CDF4(8285, 14812, 19844)},
+ { AOM_CDF4(6496, 12043, 16550)}, { AOM_CDF4(4771, 9574, 13677)},
+ { AOM_CDF4(3603, 6830, 10144)}, { AOM_CDF4(21656, 27704, 30200)},
+ { AOM_CDF4(21324, 27915, 30511)}, { AOM_CDF4(17327, 25336, 28997)},
+ { AOM_CDF4(13417, 21381, 26033)}, { AOM_CDF4(10132, 17425, 22338)},
+ { AOM_CDF4(8580, 15016, 19633)}, { AOM_CDF4(5694, 11477, 16411)},
+ { AOM_CDF4(24116, 29780, 31450)}, { AOM_CDF4(23853, 29695, 31591)},
+ { AOM_CDF4(20085, 27614, 30428)}, { AOM_CDF4(15326, 24335, 28575)},
+ { AOM_CDF4(11814, 19472, 24810)}, { AOM_CDF4(10221, 18611, 24767)},
+ { AOM_CDF4(7689, 14558, 20321)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(16214, 22380, 25770)}, { AOM_CDF4(14213, 21304, 25295)},
+ { AOM_CDF4(9213, 15823, 20455)}, { AOM_CDF4(6395, 11758, 16139)},
+ { AOM_CDF4(4779, 9187, 13066)}, { AOM_CDF4(3821, 7501, 10953)},
+ { AOM_CDF4(2293, 4567, 6795)}, { AOM_CDF4(15859, 21283, 23820)},
+ { AOM_CDF4(18404, 25602, 28726)}, { AOM_CDF4(14325, 21980, 26206)},
+ { AOM_CDF4(10669, 17937, 22720)}, { AOM_CDF4(8297, 14642, 19447)},
+ { AOM_CDF4(6746, 12389, 16893)}, { AOM_CDF4(4324, 8251, 11770)},
+ { AOM_CDF4(16532, 21631, 24475)}, { AOM_CDF4(20667, 27150, 29668)},
+ { AOM_CDF4(16728, 24510, 28175)}, { AOM_CDF4(12861, 20645, 25332)},
+ { AOM_CDF4(10076, 17361, 22417)}, { AOM_CDF4(8395, 14940, 19963)},
+ { AOM_CDF4(5731, 10683, 14912)}
+ },
+ {
+ { AOM_CDF4(14433, 21155, 24938)}, { AOM_CDF4(14658, 21716, 25545)},
+ { AOM_CDF4(9923, 16824, 21557)}, { AOM_CDF4(6982, 13052, 17721)},
+ { AOM_CDF4(5419, 10503, 15050)}, { AOM_CDF4(4852, 9162, 13014)},
+ { AOM_CDF4(3271, 6395, 9630)}, { AOM_CDF4(22210, 27833, 30109)},
+ { AOM_CDF4(20750, 27368, 29821)}, { AOM_CDF4(16894, 24828, 28573)},
+ { AOM_CDF4(13247, 21276, 25757)}, { AOM_CDF4(10038, 17265, 22563)},
+ { AOM_CDF4(8587, 14947, 20327)}, { AOM_CDF4(5645, 11371, 15252)},
+ { AOM_CDF4(22027, 27526, 29714)}, { AOM_CDF4(23098, 29146, 31221)},
+ { AOM_CDF4(19886, 27341, 30272)}, { AOM_CDF4(15609, 23747, 28046)},
+ { AOM_CDF4(11993, 20065, 24939)}, { AOM_CDF4(9637, 18267, 23671)},
+ { AOM_CDF4(7625, 13801, 19144)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(14438, 20798, 24089)}, { AOM_CDF4(12621, 19203, 23097)},
+ { AOM_CDF4(8177, 14125, 18402)}, { AOM_CDF4(5674, 10501, 14456)},
+ { AOM_CDF4(4236, 8239, 11733)}, { AOM_CDF4(3447, 6750, 9806)},
+ { AOM_CDF4(1986, 3950, 5864)}, { AOM_CDF4(16208, 22099, 24930)},
+ { AOM_CDF4(16537, 24025, 27585)}, { AOM_CDF4(12780, 20381, 24867)},
+ { AOM_CDF4(9767, 16612, 21416)}, { AOM_CDF4(7686, 13738, 18398)},
+ { AOM_CDF4(6333, 11614, 15964)}, { AOM_CDF4(3941, 7571, 10836)},
+ { AOM_CDF4(22819, 27422, 29202)}, { AOM_CDF4(22224, 28514, 30721)},
+ { AOM_CDF4(17660, 25433, 28913)}, { AOM_CDF4(13574, 21482, 26002)},
+ { AOM_CDF4(10629, 17977, 22938)}, { AOM_CDF4(8612, 15298, 20265)},
+ { AOM_CDF4(5607, 10491, 14596)}
+ },
+ {
+ { AOM_CDF4(13569, 19800, 23206)}, { AOM_CDF4(13128, 19924, 23869)},
+ { AOM_CDF4(8329, 14841, 19403)}, { AOM_CDF4(6130, 10976, 15057)},
+ { AOM_CDF4(4682, 8839, 12518)}, { AOM_CDF4(3656, 7409, 10588)},
+ { AOM_CDF4(2577, 5099, 7412)}, { AOM_CDF4(22427, 28684, 30585)},
+ { AOM_CDF4(20913, 27750, 30139)}, { AOM_CDF4(15840, 24109, 27834)},
+ { AOM_CDF4(12308, 20029, 24569)}, { AOM_CDF4(10216, 16785, 21458)},
+ { AOM_CDF4(8309, 14203, 19113)}, { AOM_CDF4(6043, 11168, 15307)},
+ { AOM_CDF4(23166, 28901, 30998)}, { AOM_CDF4(21899, 28405, 30751)},
+ { AOM_CDF4(18413, 26091, 29443)}, { AOM_CDF4(15233, 23114, 27352)},
+ { AOM_CDF4(12683, 20472, 25288)}, { AOM_CDF4(10702, 18259, 23409)},
+ { AOM_CDF4(8125, 14464, 19226)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(9040, 14786, 18360)}, { AOM_CDF4(9979, 15718, 19415)},
+ { AOM_CDF4(7913, 13918, 18311)}, { AOM_CDF4(5859, 10889, 15184)},
+ { AOM_CDF4(4593, 8677, 12510)}, { AOM_CDF4(3820, 7396, 10791)},
+ { AOM_CDF4(1730, 3471, 5192)}, { AOM_CDF4(11803, 18365, 22709)},
+ { AOM_CDF4(11419, 18058, 22225)}, { AOM_CDF4(9418, 15774, 20243)},
+ { AOM_CDF4(7539, 13325, 17657)}, { AOM_CDF4(6233, 11317, 15384)},
+ { AOM_CDF4(5137, 9656, 13545)}, { AOM_CDF4(2977, 5774, 8349)},
+ { AOM_CDF4(21207, 27246, 29640)}, { AOM_CDF4(19547, 26578, 29497)},
+ { AOM_CDF4(16169, 23871, 27690)}, { AOM_CDF4(12820, 20458, 25018)},
+ { AOM_CDF4(10224, 17332, 22214)}, { AOM_CDF4(8526, 15048, 19884)},
+ { AOM_CDF4(5037, 9410, 13118)}
+ },
+ {
+ { AOM_CDF4(12339, 17329, 20140)}, { AOM_CDF4(13505, 19895, 23225)},
+ { AOM_CDF4(9847, 16944, 21564)}, { AOM_CDF4(7280, 13256, 18348)},
+ { AOM_CDF4(4712, 10009, 14454)}, { AOM_CDF4(4361, 7914, 12477)},
+ { AOM_CDF4(2870, 5628, 7995)}, { AOM_CDF4(20061, 25504, 28526)},
+ { AOM_CDF4(15235, 22878, 26145)}, { AOM_CDF4(12985, 19958, 24155)},
+ { AOM_CDF4(9782, 16641, 21403)}, { AOM_CDF4(9456, 16360, 20760)},
+ { AOM_CDF4(6855, 12940, 18557)}, { AOM_CDF4(5661, 10564, 15002)},
+ { AOM_CDF4(25656, 30602, 31894)}, { AOM_CDF4(22570, 29107, 31092)},
+ { AOM_CDF4(18917, 26423, 29541)}, { AOM_CDF4(15940, 23649, 27754)},
+ { AOM_CDF4(12803, 20581, 25219)}, { AOM_CDF4(11082, 18695, 23376)},
+ { AOM_CDF4(7939, 14373, 19005)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF4(18315, 24289, 27551)}, { AOM_CDF4(16854, 24068, 27835)},
+ { AOM_CDF4(10140, 17927, 23173)}, { AOM_CDF4(6722, 12982, 18267)},
+ { AOM_CDF4(4661, 9826, 14706)}, { AOM_CDF4(3832, 8165, 12294)},
+ { AOM_CDF4(2795, 6098, 9245)}, { AOM_CDF4(17145, 23326, 26672)},
+ { AOM_CDF4(20733, 27680, 30308)}, { AOM_CDF4(16032, 24461, 28546)},
+ { AOM_CDF4(11653, 20093, 25081)}, { AOM_CDF4(9290, 16429, 22086)},
+ { AOM_CDF4(7796, 14598, 19982)}, { AOM_CDF4(6502, 12378, 17441)},
+ { AOM_CDF4(21681, 27732, 30320)}, { AOM_CDF4(22389, 29044, 31261)},
+ { AOM_CDF4(19027, 26731, 30087)}, { AOM_CDF4(14739, 23755, 28624)},
+ { AOM_CDF4(11358, 20778, 25511)}, { AOM_CDF4(10995, 18073, 24190)},
+ { AOM_CDF4(9162, 14990, 20617)}
+ },
+ {
+ { AOM_CDF4(21425, 27952, 30388)}, { AOM_CDF4(18062, 25838, 29034)},
+ { AOM_CDF4(11956, 19881, 24808)}, { AOM_CDF4(7718, 15000, 20980)},
+ { AOM_CDF4(5702, 11254, 16143)}, { AOM_CDF4(4898, 9088, 16864)},
+ { AOM_CDF4(3679, 6776, 11907)}, { AOM_CDF4(23294, 30160, 31663)},
+ { AOM_CDF4(24397, 29896, 31836)}, { AOM_CDF4(19245, 27128, 30593)},
+ { AOM_CDF4(13202, 19825, 26404)}, { AOM_CDF4(11578, 19297, 23957)},
+ { AOM_CDF4(8073, 13297, 21370)}, { AOM_CDF4(5461, 10923, 19745)},
+ { AOM_CDF4(27367, 30521, 31934)}, { AOM_CDF4(24904, 30671, 31940)},
+ { AOM_CDF4(23075, 28460, 31299)}, { AOM_CDF4(14400, 23658, 30417)},
+ { AOM_CDF4(13885, 23882, 28325)}, { AOM_CDF4(14746, 22938, 27853)},
+ { AOM_CDF4(5461, 16384, 27307)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(18274, 24813, 27890)}, { AOM_CDF4(15537, 23149, 27003)},
+ { AOM_CDF4(9449, 16740, 21827)}, { AOM_CDF4(6700, 12498, 17261)},
+ { AOM_CDF4(4988, 9866, 14198)}, { AOM_CDF4(4236, 8147, 11902)},
+ { AOM_CDF4(2867, 5860, 8654)}, { AOM_CDF4(17124, 23171, 26101)},
+ { AOM_CDF4(20396, 27477, 30148)}, { AOM_CDF4(16573, 24629, 28492)},
+ { AOM_CDF4(12749, 20846, 25674)}, { AOM_CDF4(10233, 17878, 22818)},
+ { AOM_CDF4(8525, 15332, 20363)}, { AOM_CDF4(6283, 11632, 16255)},
+ { AOM_CDF4(20466, 26511, 29286)}, { AOM_CDF4(23059, 29174, 31191)},
+ { AOM_CDF4(19481, 27263, 30241)}, { AOM_CDF4(15458, 23631, 28137)},
+ { AOM_CDF4(12416, 20608, 25693)}, { AOM_CDF4(10261, 18011, 23261)},
+ { AOM_CDF4(8016, 14655, 19666)}
+ },
+ {
+ { AOM_CDF4(17616, 24586, 28112)}, { AOM_CDF4(15809, 23299, 27155)},
+ { AOM_CDF4(10767, 18890, 23793)}, { AOM_CDF4(7727, 14255, 18865)},
+ { AOM_CDF4(6129, 11926, 16882)}, { AOM_CDF4(4482, 9704, 14861)},
+ { AOM_CDF4(3277, 7452, 11522)}, { AOM_CDF4(22956, 28551, 30730)},
+ { AOM_CDF4(22724, 28937, 30961)}, { AOM_CDF4(18467, 26324, 29580)},
+ { AOM_CDF4(13234, 20713, 25649)}, { AOM_CDF4(11181, 17592, 22481)},
+ { AOM_CDF4(8291, 18358, 24576)}, { AOM_CDF4(7568, 11881, 14984)},
+ { AOM_CDF4(24948, 29001, 31147)}, { AOM_CDF4(25674, 30619, 32151)},
+ { AOM_CDF4(20841, 26793, 29603)}, { AOM_CDF4(14669, 24356, 28666)},
+ { AOM_CDF4(11334, 23593, 28219)}, { AOM_CDF4(8922, 14762, 22873)},
+ { AOM_CDF4(8301, 13544, 20535)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(17113, 23733, 27081)}, { AOM_CDF4(14139, 21406, 25452)},
+ { AOM_CDF4(8552, 15002, 19776)}, { AOM_CDF4(5871, 11120, 15378)},
+ { AOM_CDF4(4455, 8616, 12253)}, { AOM_CDF4(3469, 6910, 10386)},
+ { AOM_CDF4(2255, 4553, 6782)}, { AOM_CDF4(18224, 24376, 27053)},
+ { AOM_CDF4(19290, 26710, 29614)}, { AOM_CDF4(14936, 22991, 27184)},
+ { AOM_CDF4(11238, 18951, 23762)}, { AOM_CDF4(8786, 15617, 20588)},
+ { AOM_CDF4(7317, 13228, 18003)}, { AOM_CDF4(5101, 9512, 13493)},
+ { AOM_CDF4(22639, 28222, 30210)}, { AOM_CDF4(23216, 29331, 31307)},
+ { AOM_CDF4(19075, 26762, 29895)}, { AOM_CDF4(15014, 23113, 27457)},
+ { AOM_CDF4(11938, 19857, 24752)}, { AOM_CDF4(9942, 17280, 22282)},
+ { AOM_CDF4(7167, 13144, 17752)}
+ },
+ {
+ { AOM_CDF4(15820, 22738, 26488)}, { AOM_CDF4(13530, 20885, 25216)},
+ { AOM_CDF4(8395, 15530, 20452)}, { AOM_CDF4(6574, 12321, 16380)},
+ { AOM_CDF4(5353, 10419, 14568)}, { AOM_CDF4(4613, 8446, 12381)},
+ { AOM_CDF4(3440, 7158, 9903)}, { AOM_CDF4(24247, 29051, 31224)},
+ { AOM_CDF4(22118, 28058, 30369)}, { AOM_CDF4(16498, 24768, 28389)},
+ { AOM_CDF4(12920, 21175, 26137)}, { AOM_CDF4(10730, 18619, 25352)},
+ { AOM_CDF4(10187, 16279, 22791)}, { AOM_CDF4(9310, 14631, 22127)},
+ { AOM_CDF4(24970, 30558, 32057)}, { AOM_CDF4(24801, 29942, 31698)},
+ { AOM_CDF4(22432, 28453, 30855)}, { AOM_CDF4(19054, 25680, 29580)},
+ { AOM_CDF4(14392, 23036, 28109)}, { AOM_CDF4(12495, 20947, 26650)},
+ { AOM_CDF4(12442, 20326, 26214)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(12162, 18785, 22648)}, { AOM_CDF4(12749, 19697, 23806)},
+ { AOM_CDF4(8580, 15297, 20346)}, { AOM_CDF4(6169, 11749, 16543)},
+ { AOM_CDF4(4836, 9391, 13448)}, { AOM_CDF4(3821, 7711, 11613)},
+ { AOM_CDF4(2228, 4601, 7070)}, { AOM_CDF4(16319, 24725, 28280)},
+ { AOM_CDF4(15698, 23277, 27168)}, { AOM_CDF4(12726, 20368, 25047)},
+ { AOM_CDF4(9912, 17015, 21976)}, { AOM_CDF4(7888, 14220, 19179)},
+ { AOM_CDF4(6777, 12284, 17018)}, { AOM_CDF4(4492, 8590, 12252)},
+ { AOM_CDF4(23249, 28904, 30947)}, { AOM_CDF4(21050, 27908, 30512)},
+ { AOM_CDF4(17440, 25340, 28949)}, { AOM_CDF4(14059, 22018, 26541)},
+ { AOM_CDF4(11288, 18903, 23898)}, { AOM_CDF4(9411, 16342, 21428)},
+ { AOM_CDF4(6278, 11588, 15944)}
+ },
+ {
+ { AOM_CDF4(13981, 20067, 23226)}, { AOM_CDF4(16922, 23580, 26783)},
+ { AOM_CDF4(11005, 19039, 24487)}, { AOM_CDF4(7389, 14218, 19798)},
+ { AOM_CDF4(5598, 11505, 17206)}, { AOM_CDF4(6090, 11213, 15659)},
+ { AOM_CDF4(3820, 7371, 10119)}, { AOM_CDF4(21082, 26925, 29675)},
+ { AOM_CDF4(21262, 28627, 31128)}, { AOM_CDF4(18392, 26454, 30437)},
+ { AOM_CDF4(14870, 22910, 27096)}, { AOM_CDF4(12620, 19484, 24908)},
+ { AOM_CDF4(9290, 16553, 22802)}, { AOM_CDF4(6668, 14288, 20004)},
+ { AOM_CDF4(27704, 31055, 31949)}, { AOM_CDF4(24709, 29978, 31788)},
+ { AOM_CDF4(21668, 29264, 31657)}, { AOM_CDF4(18295, 26968, 30074)},
+ { AOM_CDF4(16399, 24422, 29313)}, { AOM_CDF4(14347, 23026, 28104)},
+ { AOM_CDF4(12370, 19806, 24477)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ }
+};
+
+static const u16 av1_default_coeff_base_multi_cdfs
+ [TOKEN_CDF_Q_CTXS][TX_SIZES][PLANE_TYPES]
+ [SIG_COEF_CONTEXTS][CDF_SIZE(NUM_BASE_LEVELS + 2) + 1] = {
+ {
+ {
+ {
+ { AOM_CDF4(4034, 8930, 12727)}, { AOM_CDF4(18082, 29741, 31877)},
+ { AOM_CDF4(12596, 26124, 30493)}, { AOM_CDF4(9446, 21118, 27005)},
+ { AOM_CDF4(6308, 15141, 21279)}, { AOM_CDF4(2463, 6357, 9783)},
+ { AOM_CDF4(20667, 30546, 31929)}, { AOM_CDF4(13043, 26123, 30134)},
+ { AOM_CDF4(8151, 18757, 24778)}, { AOM_CDF4(5255, 12839, 18632)},
+ { AOM_CDF4(2820, 7206, 11161)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(15736, 27553, 30604)},
+ { AOM_CDF4(11210, 23794, 28787)}, { AOM_CDF4(5947, 13874, 19701)},
+ { AOM_CDF4(4215, 9323, 13891)}, { AOM_CDF4(2833, 6462, 10059)},
+ { AOM_CDF4(19605, 30393, 31582)}, { AOM_CDF4(13523, 26252, 30248)},
+ { AOM_CDF4(8446, 18622, 24512)}, { AOM_CDF4(3818, 10343, 15974)},
+ { AOM_CDF4(1481, 4117, 6796)}, { AOM_CDF4(22649, 31302, 32190)},
+ { AOM_CDF4(14829, 27127, 30449)}, { AOM_CDF4(8313, 17702, 23304)},
+ { AOM_CDF4(3022, 8301, 12786)}, { AOM_CDF4(1536, 4412, 7184)},
+ { AOM_CDF4(22354, 29774, 31372)}, { AOM_CDF4(14723, 25472, 29214)},
+ { AOM_CDF4(6673, 13745, 18662)}, { AOM_CDF4(2068, 5766, 9322)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(6302, 16444, 21761)}, { AOM_CDF4(23040, 31538, 32475)},
+ { AOM_CDF4(15196, 28452, 31496)}, { AOM_CDF4(10020, 22946, 28514)},
+ { AOM_CDF4(6533, 16862, 23501)}, { AOM_CDF4(3538, 9816, 15076)},
+ { AOM_CDF4(24444, 31875, 32525)}, { AOM_CDF4(15881, 28924, 31635)},
+ { AOM_CDF4(9922, 22873, 28466)}, { AOM_CDF4(6527, 16966, 23691)},
+ { AOM_CDF4(4114, 11303, 17220)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(20201, 30770, 32209)},
+ { AOM_CDF4(14754, 28071, 31258)}, { AOM_CDF4(8378, 20186, 26517)},
+ { AOM_CDF4(5916, 15299, 21978)}, { AOM_CDF4(4268, 11583, 17901)},
+ { AOM_CDF4(24361, 32025, 32581)}, { AOM_CDF4(18673, 30105, 31943)},
+ { AOM_CDF4(10196, 22244, 27576)}, { AOM_CDF4(5495, 14349, 20417)},
+ { AOM_CDF4(2676, 7415, 11498)}, { AOM_CDF4(24678, 31958, 32585)},
+ { AOM_CDF4(18629, 29906, 31831)}, { AOM_CDF4(9364, 20724, 26315)},
+ { AOM_CDF4(4641, 12318, 18094)}, { AOM_CDF4(2758, 7387, 11579)},
+ { AOM_CDF4(25433, 31842, 32469)}, { AOM_CDF4(18795, 29289, 31411)},
+ { AOM_CDF4(7644, 17584, 23592)}, { AOM_CDF4(3408, 9014, 15047)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(4536, 10072, 14001)}, { AOM_CDF4(25459, 31416, 32206)},
+ { AOM_CDF4(16605, 28048, 30818)}, { AOM_CDF4(11008, 22857, 27719)},
+ { AOM_CDF4(6915, 16268, 22315)}, { AOM_CDF4(2625, 6812, 10537)},
+ { AOM_CDF4(24257, 31788, 32499)}, { AOM_CDF4(16880, 29454, 31879)},
+ { AOM_CDF4(11958, 25054, 29778)}, { AOM_CDF4(7916, 18718, 25084)},
+ { AOM_CDF4(3383, 8777, 13446)}, { AOM_CDF4(22720, 31603, 32393)},
+ { AOM_CDF4(14960, 28125, 31335)}, { AOM_CDF4(9731, 22210, 27928)},
+ { AOM_CDF4(6304, 15832, 22277)}, { AOM_CDF4(2910, 7818, 12166)},
+ { AOM_CDF4(20375, 30627, 32131)}, { AOM_CDF4(13904, 27284, 30887)},
+ { AOM_CDF4(9368, 21558, 27144)}, { AOM_CDF4(5937, 14966, 21119)},
+ { AOM_CDF4(2667, 7225, 11319)}, { AOM_CDF4(23970, 31470, 32378)},
+ { AOM_CDF4(17173, 29734, 32018)}, { AOM_CDF4(12795, 25441, 29965)},
+ { AOM_CDF4(8981, 19680, 25893)}, { AOM_CDF4(4728, 11372, 16902)},
+ { AOM_CDF4(24287, 31797, 32439)}, { AOM_CDF4(16703, 29145, 31696)},
+ { AOM_CDF4(10833, 23554, 28725)}, { AOM_CDF4(6468, 16566, 23057)},
+ { AOM_CDF4(2415, 6562, 10278)}, { AOM_CDF4(26610, 32395, 32659)},
+ { AOM_CDF4(18590, 30498, 32117)}, { AOM_CDF4(12420, 25756, 29950)},
+ { AOM_CDF4(7639, 18746, 24710)}, { AOM_CDF4(3001, 8086, 12347)},
+ { AOM_CDF4(25076, 32064, 32580)}, { AOM_CDF4(17946, 30128, 32028)},
+ { AOM_CDF4(12024, 24985, 29378)}, { AOM_CDF4(7517, 18390, 24304)},
+ { AOM_CDF4(3243, 8781, 13331)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(6037, 16771, 21957)}, { AOM_CDF4(24774, 31704, 32426)},
+ { AOM_CDF4(16830, 28589, 31056)}, { AOM_CDF4(10602, 22828, 27760)},
+ { AOM_CDF4(6733, 16829, 23071)}, { AOM_CDF4(3250, 8914, 13556)},
+ { AOM_CDF4(25582, 32220, 32668)}, { AOM_CDF4(18659, 30342, 32223)},
+ { AOM_CDF4(12546, 26149, 30515)}, { AOM_CDF4(8420, 20451, 26801)},
+ { AOM_CDF4(4636, 12420, 18344)}, { AOM_CDF4(27581, 32362, 32639)},
+ { AOM_CDF4(18987, 30083, 31978)}, { AOM_CDF4(11327, 24248, 29084)},
+ { AOM_CDF4(7264, 17719, 24120)}, { AOM_CDF4(3995, 10768, 16169)},
+ { AOM_CDF4(25893, 31831, 32487)}, { AOM_CDF4(16577, 28587, 31379)},
+ { AOM_CDF4(10189, 22748, 28182)}, { AOM_CDF4(6832, 17094, 23556)},
+ { AOM_CDF4(3708, 10110, 15334)}, { AOM_CDF4(25904, 32282, 32656)},
+ { AOM_CDF4(19721, 30792, 32276)}, { AOM_CDF4(12819, 26243, 30411)},
+ { AOM_CDF4(8572, 20614, 26891)}, { AOM_CDF4(5364, 14059, 20467)},
+ { AOM_CDF4(26580, 32438, 32677)}, { AOM_CDF4(20852, 31225, 32340)},
+ { AOM_CDF4(12435, 25700, 29967)}, { AOM_CDF4(8691, 20825, 26976)},
+ { AOM_CDF4(4446, 12209, 17269)}, { AOM_CDF4(27350, 32429, 32696)},
+ { AOM_CDF4(21372, 30977, 32272)}, { AOM_CDF4(12673, 25270, 29853)},
+ { AOM_CDF4(9208, 20925, 26640)}, { AOM_CDF4(5018, 13351, 18732)},
+ { AOM_CDF4(27351, 32479, 32713)}, { AOM_CDF4(21398, 31209, 32387)},
+ { AOM_CDF4(12162, 25047, 29842)}, { AOM_CDF4(7896, 18691, 25319)},
+ { AOM_CDF4(4670, 12882, 18881)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(5487, 10460, 13708)}, { AOM_CDF4(21597, 28303, 30674)},
+ { AOM_CDF4(11037, 21953, 26476)}, { AOM_CDF4(8147, 17962, 22952)},
+ { AOM_CDF4(5242, 13061, 18532)}, { AOM_CDF4(1889, 5208, 8182)},
+ { AOM_CDF4(26774, 32133, 32590)}, { AOM_CDF4(17844, 29564, 31767)},
+ { AOM_CDF4(11690, 24438, 29171)}, { AOM_CDF4(7542, 18215, 24459)},
+ { AOM_CDF4(2993, 8050, 12319)}, { AOM_CDF4(28023, 32328, 32591)},
+ { AOM_CDF4(18651, 30126, 31954)}, { AOM_CDF4(12164, 25146, 29589)},
+ { AOM_CDF4(7762, 18530, 24771)}, { AOM_CDF4(3492, 9183, 13920)},
+ { AOM_CDF4(27591, 32008, 32491)}, { AOM_CDF4(17149, 28853, 31510)},
+ { AOM_CDF4(11485, 24003, 28860)}, { AOM_CDF4(7697, 18086, 24210)},
+ { AOM_CDF4(3075, 7999, 12218)}, { AOM_CDF4(28268, 32482, 32654)},
+ { AOM_CDF4(19631, 31051, 32404)}, { AOM_CDF4(13860, 27260, 31020)},
+ { AOM_CDF4(9605, 21613, 27594)}, { AOM_CDF4(4876, 12162, 17908)},
+ { AOM_CDF4(27248, 32316, 32576)}, { AOM_CDF4(18955, 30457, 32075)},
+ { AOM_CDF4(11824, 23997, 28795)}, { AOM_CDF4(7346, 18196, 24647)},
+ { AOM_CDF4(3403, 9247, 14111)}, { AOM_CDF4(29711, 32655, 32735)},
+ { AOM_CDF4(21169, 31394, 32417)}, { AOM_CDF4(13487, 27198, 30957)},
+ { AOM_CDF4(8828, 21683, 27614)}, { AOM_CDF4(4270, 11451, 17038)},
+ { AOM_CDF4(28708, 32578, 32731)}, { AOM_CDF4(20120, 31241, 32482)},
+ { AOM_CDF4(13692, 27550, 31321)}, { AOM_CDF4(9418, 22514, 28439)},
+ { AOM_CDF4(4999, 13283, 19462)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(5673, 14302, 19711)}, { AOM_CDF4(26251, 30701, 31834)},
+ { AOM_CDF4(12782, 23783, 27803)}, { AOM_CDF4(9127, 20657, 25808)},
+ { AOM_CDF4(6368, 16208, 21462)}, { AOM_CDF4(2465, 7177, 10822)},
+ { AOM_CDF4(29961, 32563, 32719)}, { AOM_CDF4(18318, 29891, 31949)},
+ { AOM_CDF4(11361, 24514, 29357)}, { AOM_CDF4(7900, 19603, 25607)},
+ { AOM_CDF4(4002, 10590, 15546)}, { AOM_CDF4(29637, 32310, 32595)},
+ { AOM_CDF4(18296, 29913, 31809)}, { AOM_CDF4(10144, 21515, 26871)},
+ { AOM_CDF4(5358, 14322, 20394)}, { AOM_CDF4(3067, 8362, 13346)},
+ { AOM_CDF4(28652, 32470, 32676)}, { AOM_CDF4(17538, 30771, 32209)},
+ { AOM_CDF4(13924, 26882, 30494)}, { AOM_CDF4(10496, 22837, 27869)},
+ { AOM_CDF4(7236, 16396, 21621)}, { AOM_CDF4(30743, 32687, 32746)},
+ { AOM_CDF4(23006, 31676, 32489)}, { AOM_CDF4(14494, 27828, 31120)},
+ { AOM_CDF4(10174, 22801, 28352)}, { AOM_CDF4(6242, 15281, 21043)},
+ { AOM_CDF4(25817, 32243, 32720)}, { AOM_CDF4(18618, 31367, 32325)},
+ { AOM_CDF4(13997, 28318, 31878)}, { AOM_CDF4(12255, 26534, 31383)},
+ { AOM_CDF4(9561, 21588, 28450)}, { AOM_CDF4(28188, 32635, 32724)},
+ { AOM_CDF4(22060, 32365, 32728)}, { AOM_CDF4(18102, 30690, 32528)},
+ { AOM_CDF4(14196, 28864, 31999)}, { AOM_CDF4(12262, 25792, 30865)},
+ { AOM_CDF4(24176, 32109, 32628)}, { AOM_CDF4(18280, 29681, 31963)},
+ { AOM_CDF4(10205, 23703, 29664)}, { AOM_CDF4(7889, 20025, 27676)},
+ { AOM_CDF4(6060, 16743, 23970)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(5141, 7096, 8260)}, { AOM_CDF4(27186, 29022, 29789)},
+ { AOM_CDF4(6668, 12568, 15682)}, { AOM_CDF4(2172, 6181, 8638)},
+ { AOM_CDF4(1126, 3379, 4531)}, { AOM_CDF4(443, 1361, 2254)},
+ { AOM_CDF4(26083, 31153, 32436)}, { AOM_CDF4(13486, 24603, 28483)},
+ { AOM_CDF4(6508, 14840, 19910)}, { AOM_CDF4(3386, 8800, 13286)},
+ { AOM_CDF4(1530, 4322, 7054)}, { AOM_CDF4(29639, 32080, 32548)},
+ { AOM_CDF4(15897, 27552, 30290)}, { AOM_CDF4(8588, 20047, 25383)},
+ { AOM_CDF4(4889, 13339, 19269)}, { AOM_CDF4(2240, 6871, 10498)},
+ { AOM_CDF4(28165, 32197, 32517)}, { AOM_CDF4(20735, 30427, 31568)},
+ { AOM_CDF4(14325, 24671, 27692)}, { AOM_CDF4(5119, 12554, 17805)},
+ { AOM_CDF4(1810, 5441, 8261)}, { AOM_CDF4(31212, 32724, 32748)},
+ { AOM_CDF4(23352, 31766, 32545)}, { AOM_CDF4(14669, 27570, 31059)},
+ { AOM_CDF4(8492, 20894, 27272)}, { AOM_CDF4(3644, 10194, 15204)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(2461, 7013, 9371)}, { AOM_CDF4(24749, 29600, 30986)},
+ { AOM_CDF4(9466, 19037, 22417)}, { AOM_CDF4(3584, 9280, 14400)},
+ { AOM_CDF4(1505, 3929, 5433)}, { AOM_CDF4(677, 1500, 2736)},
+ { AOM_CDF4(23987, 30702, 32117)}, { AOM_CDF4(13554, 24571, 29263)},
+ { AOM_CDF4(6211, 14556, 21155)}, { AOM_CDF4(3135, 10972, 15625)},
+ { AOM_CDF4(2435, 7127, 11427)}, { AOM_CDF4(31300, 32532, 32550)},
+ { AOM_CDF4(14757, 30365, 31954)}, { AOM_CDF4(4405, 11612, 18553)},
+ { AOM_CDF4(580, 4132, 7322)}, { AOM_CDF4(1695, 10169, 14124)},
+ { AOM_CDF4(30008, 32282, 32591)}, { AOM_CDF4(19244, 30108, 31748)},
+ { AOM_CDF4(11180, 24158, 29555)}, { AOM_CDF4(5650, 14972, 19209)},
+ { AOM_CDF4(2114, 5109, 8456)}, { AOM_CDF4(31856, 32716, 32748)},
+ { AOM_CDF4(23012, 31664, 32572)}, { AOM_CDF4(13694, 26656, 30636)},
+ { AOM_CDF4(8142, 19508, 26093)}, { AOM_CDF4(4253, 10955, 16724)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(601, 983, 1311)}, { AOM_CDF4(18725, 23406, 28087)},
+ { AOM_CDF4(5461, 8192, 10923)}, { AOM_CDF4(3781, 15124, 21425)},
+ { AOM_CDF4(2587, 7761, 12072)}, { AOM_CDF4(106, 458, 810)},
+ { AOM_CDF4(22282, 29710, 31894)}, { AOM_CDF4(8508, 20926, 25984)},
+ { AOM_CDF4(3726, 12713, 18083)}, { AOM_CDF4(1620, 7112, 10893)},
+ { AOM_CDF4(729, 2236, 3495)}, { AOM_CDF4(30163, 32474, 32684)},
+ { AOM_CDF4(18304, 30464, 32000)}, { AOM_CDF4(11443, 26526, 29647)},
+ { AOM_CDF4(6007, 15292, 21299)}, { AOM_CDF4(2234, 6703, 8937)},
+ { AOM_CDF4(30954, 32177, 32571)}, { AOM_CDF4(17363, 29562, 31076)},
+ { AOM_CDF4(9686, 22464, 27410)}, { AOM_CDF4(8192, 16384, 21390)},
+ { AOM_CDF4(1755, 8046, 11264)}, { AOM_CDF4(31168, 32734, 32748)},
+ { AOM_CDF4(22486, 31441, 32471)}, { AOM_CDF4(12833, 25627, 29738)},
+ { AOM_CDF4(6980, 17379, 23122)}, { AOM_CDF4(3111, 8887, 13479)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF4(6041, 11854, 15927)}, { AOM_CDF4(20326, 30905, 32251)},
+ { AOM_CDF4(14164, 26831, 30725)}, { AOM_CDF4(9760, 20647, 26585)},
+ { AOM_CDF4(6416, 14953, 21219)}, { AOM_CDF4(2966, 7151, 10891)},
+ { AOM_CDF4(23567, 31374, 32254)}, { AOM_CDF4(14978, 27416, 30946)},
+ { AOM_CDF4(9434, 20225, 26254)}, { AOM_CDF4(6658, 14558, 20535)},
+ { AOM_CDF4(3916, 8677, 12989)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(18088, 29545, 31587)},
+ { AOM_CDF4(13062, 25843, 30073)}, { AOM_CDF4(8940, 16827, 22251)},
+ { AOM_CDF4(7654, 13220, 17973)}, { AOM_CDF4(5733, 10316, 14456)},
+ { AOM_CDF4(22879, 31388, 32114)}, { AOM_CDF4(15215, 27993, 30955)},
+ { AOM_CDF4(9397, 19445, 24978)}, { AOM_CDF4(3442, 9813, 15344)},
+ { AOM_CDF4(1368, 3936, 6532)}, { AOM_CDF4(25494, 32033, 32406)},
+ { AOM_CDF4(16772, 27963, 30718)}, { AOM_CDF4(9419, 18165, 23260)},
+ { AOM_CDF4(2677, 7501, 11797)}, { AOM_CDF4(1516, 4344, 7170)},
+ { AOM_CDF4(26556, 31454, 32101)}, { AOM_CDF4(17128, 27035, 30108)},
+ { AOM_CDF4(8324, 15344, 20249)}, { AOM_CDF4(1903, 5696, 9469)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8455, 19003, 24368)}, { AOM_CDF4(23563, 32021, 32604)},
+ { AOM_CDF4(16237, 29446, 31935)}, { AOM_CDF4(10724, 23999, 29358)},
+ { AOM_CDF4(6725, 17528, 24416)}, { AOM_CDF4(3927, 10927, 16825)},
+ { AOM_CDF4(26313, 32288, 32634)}, { AOM_CDF4(17430, 30095, 32095)},
+ { AOM_CDF4(11116, 24606, 29679)}, { AOM_CDF4(7195, 18384, 25269)},
+ { AOM_CDF4(4726, 12852, 19315)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(22822, 31648, 32483)},
+ { AOM_CDF4(16724, 29633, 31929)}, { AOM_CDF4(10261, 23033, 28725)},
+ { AOM_CDF4(7029, 17840, 24528)}, { AOM_CDF4(4867, 13886, 21502)},
+ { AOM_CDF4(25298, 31892, 32491)}, { AOM_CDF4(17809, 29330, 31512)},
+ { AOM_CDF4(9668, 21329, 26579)}, { AOM_CDF4(4774, 12956, 18976)},
+ { AOM_CDF4(2322, 7030, 11540)}, { AOM_CDF4(25472, 31920, 32543)},
+ { AOM_CDF4(17957, 29387, 31632)}, { AOM_CDF4(9196, 20593, 26400)},
+ { AOM_CDF4(4680, 12705, 19202)}, { AOM_CDF4(2917, 8456, 13436)},
+ { AOM_CDF4(26471, 32059, 32574)}, { AOM_CDF4(18458, 29783, 31909)},
+ { AOM_CDF4(8400, 19464, 25956)}, { AOM_CDF4(3812, 10973, 17206)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(6779, 13743, 17678)}, { AOM_CDF4(24806, 31797, 32457)},
+ { AOM_CDF4(17616, 29047, 31372)}, { AOM_CDF4(11063, 23175, 28003)},
+ { AOM_CDF4(6521, 16110, 22324)}, { AOM_CDF4(2764, 7504, 11654)},
+ { AOM_CDF4(25266, 32367, 32637)}, { AOM_CDF4(19054, 30553, 32175)},
+ { AOM_CDF4(12139, 25212, 29807)}, { AOM_CDF4(7311, 18162, 24704)},
+ { AOM_CDF4(3397, 9164, 14074)}, { AOM_CDF4(25988, 32208, 32522)},
+ { AOM_CDF4(16253, 28912, 31526)}, { AOM_CDF4(9151, 21387, 27372)},
+ { AOM_CDF4(5688, 14915, 21496)}, { AOM_CDF4(2717, 7627, 12004)},
+ { AOM_CDF4(23144, 31855, 32443)}, { AOM_CDF4(16070, 28491, 31325)},
+ { AOM_CDF4(8702, 20467, 26517)}, { AOM_CDF4(5243, 13956, 20367)},
+ { AOM_CDF4(2621, 7335, 11567)}, { AOM_CDF4(26636, 32340, 32630)},
+ { AOM_CDF4(19990, 31050, 32341)}, { AOM_CDF4(13243, 26105, 30315)},
+ { AOM_CDF4(8588, 19521, 25918)}, { AOM_CDF4(4717, 11585, 17304)},
+ { AOM_CDF4(25844, 32292, 32582)}, { AOM_CDF4(19090, 30635, 32097)},
+ { AOM_CDF4(11963, 24546, 28939)}, { AOM_CDF4(6218, 16087, 22354)},
+ { AOM_CDF4(2340, 6608, 10426)}, { AOM_CDF4(28046, 32576, 32694)},
+ { AOM_CDF4(21178, 31313, 32296)}, { AOM_CDF4(13486, 26184, 29870)},
+ { AOM_CDF4(7149, 17871, 23723)}, { AOM_CDF4(2833, 7958, 12259)},
+ { AOM_CDF4(27710, 32528, 32686)}, { AOM_CDF4(20674, 31076, 32268)},
+ { AOM_CDF4(12413, 24955, 29243)}, { AOM_CDF4(6676, 16927, 23097)},
+ { AOM_CDF4(2966, 8333, 12919)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8639, 19339, 24429)}, { AOM_CDF4(24404, 31837, 32525)},
+ { AOM_CDF4(16997, 29425, 31784)}, { AOM_CDF4(11253, 24234, 29149)},
+ { AOM_CDF4(6751, 17394, 24028)}, { AOM_CDF4(3490, 9830, 15191)},
+ { AOM_CDF4(26283, 32471, 32714)}, { AOM_CDF4(19599, 31168, 32442)},
+ { AOM_CDF4(13146, 26954, 30893)}, { AOM_CDF4(8214, 20588, 26890)},
+ { AOM_CDF4(4699, 13081, 19300)}, { AOM_CDF4(28212, 32458, 32669)},
+ { AOM_CDF4(18594, 30316, 32100)}, { AOM_CDF4(11219, 24408, 29234)},
+ { AOM_CDF4(6865, 17656, 24149)}, { AOM_CDF4(3678, 10362, 16006)},
+ { AOM_CDF4(25825, 32136, 32616)}, { AOM_CDF4(17313, 29853, 32021)},
+ { AOM_CDF4(11197, 24471, 29472)}, { AOM_CDF4(6947, 17781, 24405)},
+ { AOM_CDF4(3768, 10660, 16261)}, { AOM_CDF4(27352, 32500, 32706)},
+ { AOM_CDF4(20850, 31468, 32469)}, { AOM_CDF4(14021, 27707, 31133)},
+ { AOM_CDF4(8964, 21748, 27838)}, { AOM_CDF4(5437, 14665, 21187)},
+ { AOM_CDF4(26304, 32492, 32698)}, { AOM_CDF4(20409, 31380, 32385)},
+ { AOM_CDF4(13682, 27222, 30632)}, { AOM_CDF4(8974, 21236, 26685)},
+ { AOM_CDF4(4234, 11665, 16934)}, { AOM_CDF4(26273, 32357, 32711)},
+ { AOM_CDF4(20672, 31242, 32441)}, { AOM_CDF4(14172, 27254, 30902)},
+ { AOM_CDF4(9870, 21898, 27275)}, { AOM_CDF4(5164, 13506, 19270)},
+ { AOM_CDF4(26725, 32459, 32728)}, { AOM_CDF4(20991, 31442, 32527)},
+ { AOM_CDF4(13071, 26434, 30811)}, { AOM_CDF4(8184, 20090, 26742)},
+ { AOM_CDF4(4803, 13255, 19895)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(7555, 14942, 18501)}, { AOM_CDF4(24410, 31178, 32287)},
+ { AOM_CDF4(14394, 26738, 30253)}, { AOM_CDF4(8413, 19554, 25195)},
+ { AOM_CDF4(4766, 12924, 18785)}, { AOM_CDF4(2029, 5806, 9207)},
+ { AOM_CDF4(26776, 32364, 32663)}, { AOM_CDF4(18732, 29967, 31931)},
+ { AOM_CDF4(11005, 23786, 28852)}, { AOM_CDF4(6466, 16909, 23510)},
+ { AOM_CDF4(3044, 8638, 13419)}, { AOM_CDF4(29208, 32582, 32704)},
+ { AOM_CDF4(20068, 30857, 32208)}, { AOM_CDF4(12003, 25085, 29595)},
+ { AOM_CDF4(6947, 17750, 24189)}, { AOM_CDF4(3245, 9103, 14007)},
+ { AOM_CDF4(27359, 32465, 32669)}, { AOM_CDF4(19421, 30614, 32174)},
+ { AOM_CDF4(11915, 25010, 29579)}, { AOM_CDF4(6950, 17676, 24074)},
+ { AOM_CDF4(3007, 8473, 13096)}, { AOM_CDF4(29002, 32676, 32735)},
+ { AOM_CDF4(22102, 31849, 32576)}, { AOM_CDF4(14408, 28009, 31405)},
+ { AOM_CDF4(9027, 21679, 27931)}, { AOM_CDF4(4694, 12678, 18748)},
+ { AOM_CDF4(28216, 32528, 32682)}, { AOM_CDF4(20849, 31264, 32318)},
+ { AOM_CDF4(12756, 25815, 29751)}, { AOM_CDF4(7565, 18801, 24923)},
+ { AOM_CDF4(3509, 9533, 14477)}, { AOM_CDF4(30133, 32687, 32739)},
+ { AOM_CDF4(23063, 31910, 32515)}, { AOM_CDF4(14588, 28051, 31132)},
+ { AOM_CDF4(9085, 21649, 27457)}, { AOM_CDF4(4261, 11654, 17264)},
+ { AOM_CDF4(29518, 32691, 32748)}, { AOM_CDF4(22451, 31959, 32613)},
+ { AOM_CDF4(14864, 28722, 31700)}, { AOM_CDF4(9695, 22964, 28716)},
+ { AOM_CDF4(4932, 13358, 19502)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(6465, 16958, 21688)}, { AOM_CDF4(25199, 31514, 32360)},
+ { AOM_CDF4(14774, 27149, 30607)}, { AOM_CDF4(9257, 21438, 26972)},
+ { AOM_CDF4(5723, 15183, 21882)}, { AOM_CDF4(3150, 8879, 13731)},
+ { AOM_CDF4(26989, 32262, 32682)}, { AOM_CDF4(17396, 29937, 32085)},
+ { AOM_CDF4(11387, 24901, 29784)}, { AOM_CDF4(7289, 18821, 25548)},
+ { AOM_CDF4(3734, 10577, 16086)}, { AOM_CDF4(29728, 32501, 32695)},
+ { AOM_CDF4(17431, 29701, 31903)}, { AOM_CDF4(9921, 22826, 28300)},
+ { AOM_CDF4(5896, 15434, 22068)}, { AOM_CDF4(3430, 9646, 14757)},
+ { AOM_CDF4(28614, 32511, 32705)}, { AOM_CDF4(19364, 30638, 32263)},
+ { AOM_CDF4(13129, 26254, 30402)}, { AOM_CDF4(8754, 20484, 26440)},
+ { AOM_CDF4(4378, 11607, 17110)}, { AOM_CDF4(30292, 32671, 32744)},
+ { AOM_CDF4(21780, 31603, 32501)}, { AOM_CDF4(14314, 27829, 31291)},
+ { AOM_CDF4(9611, 22327, 28263)}, { AOM_CDF4(4890, 13087, 19065)},
+ { AOM_CDF4(25862, 32567, 32733)}, { AOM_CDF4(20794, 32050, 32567)},
+ { AOM_CDF4(17243, 30625, 32254)}, { AOM_CDF4(13283, 27628, 31474)},
+ { AOM_CDF4(9669, 22532, 28918)}, { AOM_CDF4(27435, 32697, 32748)},
+ { AOM_CDF4(24922, 32390, 32714)}, { AOM_CDF4(21449, 31504, 32536)},
+ { AOM_CDF4(16392, 29729, 31832)}, { AOM_CDF4(11692, 24884, 29076)},
+ { AOM_CDF4(24193, 32290, 32735)}, { AOM_CDF4(18909, 31104, 32563)},
+ { AOM_CDF4(12236, 26841, 31403)}, { AOM_CDF4(8171, 21840, 29082)},
+ { AOM_CDF4(7224, 17280, 25275)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(3078, 6839, 9890)}, { AOM_CDF4(13837, 20450, 24479)},
+ { AOM_CDF4(5914, 14222, 19328)}, { AOM_CDF4(3866, 10267, 14762)},
+ { AOM_CDF4(2612, 7208, 11042)}, { AOM_CDF4(1067, 2991, 4776)},
+ { AOM_CDF4(25817, 31646, 32529)}, { AOM_CDF4(13708, 26338, 30385)},
+ { AOM_CDF4(7328, 18585, 24870)}, { AOM_CDF4(4691, 13080, 19276)},
+ { AOM_CDF4(1825, 5253, 8352)}, { AOM_CDF4(29386, 32315, 32624)},
+ { AOM_CDF4(17160, 29001, 31360)}, { AOM_CDF4(9602, 21862, 27396)},
+ { AOM_CDF4(5915, 15772, 22148)}, { AOM_CDF4(2786, 7779, 12047)},
+ { AOM_CDF4(29246, 32450, 32663)}, { AOM_CDF4(18696, 29929, 31818)},
+ { AOM_CDF4(10510, 23369, 28560)}, { AOM_CDF4(6229, 16499, 23125)},
+ { AOM_CDF4(2608, 7448, 11705)}, { AOM_CDF4(30753, 32710, 32748)},
+ { AOM_CDF4(21638, 31487, 32503)}, { AOM_CDF4(12937, 26854, 30870)},
+ { AOM_CDF4(8182, 20596, 26970)}, { AOM_CDF4(3637, 10269, 15497)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(5244, 12150, 16906)}, { AOM_CDF4(20486, 26858, 29701)},
+ { AOM_CDF4(7756, 18317, 23735)}, { AOM_CDF4(3452, 9256, 13146)},
+ { AOM_CDF4(2020, 5206, 8229)}, { AOM_CDF4(1801, 4993, 7903)},
+ { AOM_CDF4(27051, 31858, 32531)}, { AOM_CDF4(15988, 27531, 30619)},
+ { AOM_CDF4(9188, 21484, 26719)}, { AOM_CDF4(6273, 17186, 23800)},
+ { AOM_CDF4(3108, 9355, 14764)}, { AOM_CDF4(31076, 32520, 32680)},
+ { AOM_CDF4(18119, 30037, 31850)}, { AOM_CDF4(10244, 22969, 27472)},
+ { AOM_CDF4(4692, 14077, 19273)}, { AOM_CDF4(3694, 11677, 17556)},
+ { AOM_CDF4(30060, 32581, 32720)}, { AOM_CDF4(21011, 30775, 32120)},
+ { AOM_CDF4(11931, 24820, 29289)}, { AOM_CDF4(7119, 17662, 24356)},
+ { AOM_CDF4(3833, 10706, 16304)}, { AOM_CDF4(31954, 32731, 32748)},
+ { AOM_CDF4(23913, 31724, 32489)}, { AOM_CDF4(15520, 28060, 31286)},
+ { AOM_CDF4(11517, 23008, 28571)}, { AOM_CDF4(6193, 14508, 20629)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(1035, 2807, 4156)}, { AOM_CDF4(13162, 18138, 20939)},
+ { AOM_CDF4(2696, 6633, 8755)}, { AOM_CDF4(1373, 4161, 6853)},
+ { AOM_CDF4(1099, 2746, 4716)}, { AOM_CDF4(340, 1021, 1599)},
+ { AOM_CDF4(22826, 30419, 32135)}, { AOM_CDF4(10395, 21762, 26942)},
+ { AOM_CDF4(4726, 12407, 17361)}, { AOM_CDF4(2447, 7080, 10593)},
+ { AOM_CDF4(1227, 3717, 6011)}, { AOM_CDF4(28156, 31424, 31934)},
+ { AOM_CDF4(16915, 27754, 30373)}, { AOM_CDF4(9148, 20990, 26431)},
+ { AOM_CDF4(5950, 15515, 21148)}, { AOM_CDF4(2492, 7327, 11526)},
+ { AOM_CDF4(30602, 32477, 32670)}, { AOM_CDF4(20026, 29955, 31568)},
+ { AOM_CDF4(11220, 23628, 28105)}, { AOM_CDF4(6652, 17019, 22973)},
+ { AOM_CDF4(3064, 8536, 13043)}, { AOM_CDF4(31769, 32724, 32748)},
+ { AOM_CDF4(22230, 30887, 32373)}, { AOM_CDF4(12234, 25079, 29731)},
+ { AOM_CDF4(7326, 18816, 25353)}, { AOM_CDF4(3933, 10907, 16616)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF4(8896, 16227, 20630)}, { AOM_CDF4(23629, 31782, 32527)},
+ { AOM_CDF4(15173, 27755, 31321)}, { AOM_CDF4(10158, 21233, 27382)},
+ { AOM_CDF4(6420, 14857, 21558)}, { AOM_CDF4(3269, 8155, 12646)},
+ { AOM_CDF4(24835, 32009, 32496)}, { AOM_CDF4(16509, 28421, 31579)},
+ { AOM_CDF4(10957, 21514, 27418)}, { AOM_CDF4(7881, 15930, 22096)},
+ { AOM_CDF4(5388, 10960, 15918)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(20745, 30773, 32093)},
+ { AOM_CDF4(15200, 27221, 30861)}, { AOM_CDF4(13032, 20873, 25667)},
+ { AOM_CDF4(12285, 18663, 23494)}, { AOM_CDF4(11563, 17481, 21489)},
+ { AOM_CDF4(26260, 31982, 32320)}, { AOM_CDF4(15397, 28083, 31100)},
+ { AOM_CDF4(9742, 19217, 24824)}, { AOM_CDF4(3261, 9629, 15362)},
+ { AOM_CDF4(1480, 4322, 7499)}, { AOM_CDF4(27599, 32256, 32460)},
+ { AOM_CDF4(16857, 27659, 30774)}, { AOM_CDF4(9551, 18290, 23748)},
+ { AOM_CDF4(3052, 8933, 14103)}, { AOM_CDF4(2021, 5910, 9787)},
+ { AOM_CDF4(29005, 32015, 32392)}, { AOM_CDF4(17677, 27694, 30863)},
+ { AOM_CDF4(9204, 17356, 23219)}, { AOM_CDF4(2403, 7516, 12814)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(10808, 22056, 26896)}, { AOM_CDF4(25739, 32313, 32676)},
+ { AOM_CDF4(17288, 30203, 32221)}, { AOM_CDF4(11359, 24878, 29896)},
+ { AOM_CDF4(6949, 17767, 24893)}, { AOM_CDF4(4287, 11796, 18071)},
+ { AOM_CDF4(27880, 32521, 32705)}, { AOM_CDF4(19038, 31004, 32414)},
+ { AOM_CDF4(12564, 26345, 30768)}, { AOM_CDF4(8269, 19947, 26779)},
+ { AOM_CDF4(5674, 14657, 21674)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(25742, 32319, 32671)},
+ { AOM_CDF4(19557, 31164, 32454)}, { AOM_CDF4(13381, 26381, 30755)},
+ { AOM_CDF4(10101, 21466, 26722)}, { AOM_CDF4(9209, 19650, 26825)},
+ { AOM_CDF4(27107, 31917, 32432)}, { AOM_CDF4(18056, 28893, 31203)},
+ { AOM_CDF4(10200, 21434, 26764)}, { AOM_CDF4(4660, 12913, 19502)},
+ { AOM_CDF4(2368, 6930, 12504)}, { AOM_CDF4(26960, 32158, 32613)},
+ { AOM_CDF4(18628, 30005, 32031)}, { AOM_CDF4(10233, 22442, 28232)},
+ { AOM_CDF4(5471, 14630, 21516)}, { AOM_CDF4(3235, 10767, 17109)},
+ { AOM_CDF4(27696, 32440, 32692)}, { AOM_CDF4(20032, 31167, 32438)},
+ { AOM_CDF4(8700, 21341, 28442)}, { AOM_CDF4(5662, 14831, 21795)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(9704, 17294, 21132)}, { AOM_CDF4(26762, 32278, 32633)},
+ { AOM_CDF4(18382, 29620, 31819)}, { AOM_CDF4(10891, 23475, 28723)},
+ { AOM_CDF4(6358, 16583, 23309)}, { AOM_CDF4(3248, 9118, 14141)},
+ { AOM_CDF4(27204, 32573, 32699)}, { AOM_CDF4(19818, 30824, 32329)},
+ { AOM_CDF4(11772, 25120, 30041)}, { AOM_CDF4(6995, 18033, 25039)},
+ { AOM_CDF4(3752, 10442, 16098)}, { AOM_CDF4(27222, 32256, 32559)},
+ { AOM_CDF4(15356, 28399, 31475)}, { AOM_CDF4(8821, 20635, 27057)},
+ { AOM_CDF4(5511, 14404, 21239)}, { AOM_CDF4(2935, 8222, 13051)},
+ { AOM_CDF4(24875, 32120, 32529)}, { AOM_CDF4(15233, 28265, 31445)},
+ { AOM_CDF4(8605, 20570, 26932)}, { AOM_CDF4(5431, 14413, 21196)},
+ { AOM_CDF4(2994, 8341, 13223)}, { AOM_CDF4(28201, 32604, 32700)},
+ { AOM_CDF4(21041, 31446, 32456)}, { AOM_CDF4(13221, 26213, 30475)},
+ { AOM_CDF4(8255, 19385, 26037)}, { AOM_CDF4(4930, 12585, 18830)},
+ { AOM_CDF4(28768, 32448, 32627)}, { AOM_CDF4(19705, 30561, 32021)},
+ { AOM_CDF4(11572, 23589, 28220)}, { AOM_CDF4(5532, 15034, 21446)},
+ { AOM_CDF4(2460, 7150, 11456)}, { AOM_CDF4(29874, 32619, 32699)},
+ { AOM_CDF4(21621, 31071, 32201)}, { AOM_CDF4(12511, 24747, 28992)},
+ { AOM_CDF4(6281, 16395, 22748)}, { AOM_CDF4(3246, 9278, 14497)},
+ { AOM_CDF4(29715, 32625, 32712)}, { AOM_CDF4(20958, 31011, 32283)},
+ { AOM_CDF4(11233, 23671, 28806)}, { AOM_CDF4(6012, 16128, 22868)},
+ { AOM_CDF4(3427, 9851, 15414)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(11016, 22111, 26794)}, { AOM_CDF4(25946, 32357, 32677)},
+ { AOM_CDF4(17890, 30452, 32252)}, { AOM_CDF4(11678, 25142, 29816)},
+ { AOM_CDF4(6720, 17534, 24584)}, { AOM_CDF4(4230, 11665, 17820)},
+ { AOM_CDF4(28400, 32623, 32747)}, { AOM_CDF4(21164, 31668, 32575)},
+ { AOM_CDF4(13572, 27388, 31182)}, { AOM_CDF4(8234, 20750, 27358)},
+ { AOM_CDF4(5065, 14055, 20897)}, { AOM_CDF4(28981, 32547, 32705)},
+ { AOM_CDF4(18681, 30543, 32239)}, { AOM_CDF4(10919, 24075, 29286)},
+ { AOM_CDF4(6431, 17199, 24077)}, { AOM_CDF4(3819, 10464, 16618)},
+ { AOM_CDF4(26870, 32467, 32693)}, { AOM_CDF4(19041, 30831, 32347)},
+ { AOM_CDF4(11794, 25211, 30016)}, { AOM_CDF4(6888, 18019, 24970)},
+ { AOM_CDF4(4370, 12363, 18992)}, { AOM_CDF4(29578, 32670, 32744)},
+ { AOM_CDF4(23159, 32007, 32613)}, { AOM_CDF4(15315, 28669, 31676)},
+ { AOM_CDF4(9298, 22607, 28782)}, { AOM_CDF4(6144, 15913, 22968)},
+ { AOM_CDF4(28110, 32499, 32669)}, { AOM_CDF4(21574, 30937, 32015)},
+ { AOM_CDF4(12759, 24818, 28727)}, { AOM_CDF4(6545, 16761, 23042)},
+ { AOM_CDF4(3649, 10597, 16833)}, { AOM_CDF4(28163, 32552, 32728)},
+ { AOM_CDF4(22101, 31469, 32464)}, { AOM_CDF4(13160, 25472, 30143)},
+ { AOM_CDF4(7303, 18684, 25468)}, { AOM_CDF4(5241, 13975, 20955)},
+ { AOM_CDF4(28400, 32631, 32744)}, { AOM_CDF4(22104, 31793, 32603)},
+ { AOM_CDF4(13557, 26571, 30846)}, { AOM_CDF4(7749, 19861, 26675)},
+ { AOM_CDF4(4873, 14030, 21234)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(9800, 17635, 21073)}, { AOM_CDF4(26153, 31885, 32527)},
+ { AOM_CDF4(15038, 27852, 31006)}, { AOM_CDF4(8718, 20564, 26486)},
+ { AOM_CDF4(5128, 14076, 20514)}, { AOM_CDF4(2636, 7566, 11925)},
+ { AOM_CDF4(27551, 32504, 32701)}, { AOM_CDF4(18310, 30054, 32100)},
+ { AOM_CDF4(10211, 23420, 29082)}, { AOM_CDF4(6222, 16876, 23916)},
+ { AOM_CDF4(3462, 9954, 15498)}, { AOM_CDF4(29991, 32633, 32721)},
+ { AOM_CDF4(19883, 30751, 32201)}, { AOM_CDF4(11141, 24184, 29285)},
+ { AOM_CDF4(6420, 16940, 23774)}, { AOM_CDF4(3392, 9753, 15118)},
+ { AOM_CDF4(28465, 32616, 32712)}, { AOM_CDF4(19850, 30702, 32244)},
+ { AOM_CDF4(10983, 24024, 29223)}, { AOM_CDF4(6294, 16770, 23582)},
+ { AOM_CDF4(3244, 9283, 14509)}, { AOM_CDF4(30023, 32717, 32748)},
+ { AOM_CDF4(22940, 32032, 32626)}, { AOM_CDF4(14282, 27928, 31473)},
+ { AOM_CDF4(8562, 21327, 27914)}, { AOM_CDF4(4846, 13393, 19919)},
+ { AOM_CDF4(29981, 32590, 32695)}, { AOM_CDF4(20465, 30963, 32166)},
+ { AOM_CDF4(11479, 23579, 28195)}, { AOM_CDF4(5916, 15648, 22073)},
+ { AOM_CDF4(3031, 8605, 13398)}, { AOM_CDF4(31146, 32691, 32739)},
+ { AOM_CDF4(23106, 31724, 32444)}, { AOM_CDF4(13783, 26738, 30439)},
+ { AOM_CDF4(7852, 19468, 25807)}, { AOM_CDF4(3860, 11124, 16853)},
+ { AOM_CDF4(31014, 32724, 32748)}, { AOM_CDF4(23629, 32109, 32628)},
+ { AOM_CDF4(14747, 28115, 31403)}, { AOM_CDF4(8545, 21242, 27478)},
+ { AOM_CDF4(4574, 12781, 19067)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(9185, 19694, 24688)}, { AOM_CDF4(26081, 31985, 32621)},
+ { AOM_CDF4(16015, 29000, 31787)}, { AOM_CDF4(10542, 23690, 29206)},
+ { AOM_CDF4(6732, 17945, 24677)}, { AOM_CDF4(3916, 11039, 16722)},
+ { AOM_CDF4(28224, 32566, 32744)}, { AOM_CDF4(19100, 31138, 32485)},
+ { AOM_CDF4(12528, 26620, 30879)}, { AOM_CDF4(7741, 20277, 26885)},
+ { AOM_CDF4(4566, 12845, 18990)}, { AOM_CDF4(29933, 32593, 32718)},
+ { AOM_CDF4(17670, 30333, 32155)}, { AOM_CDF4(10385, 23600, 28909)},
+ { AOM_CDF4(6243, 16236, 22407)}, { AOM_CDF4(3976, 10389, 16017)},
+ { AOM_CDF4(28377, 32561, 32738)}, { AOM_CDF4(19366, 31175, 32482)},
+ { AOM_CDF4(13327, 27175, 31094)}, { AOM_CDF4(8258, 20769, 27143)},
+ { AOM_CDF4(4703, 13198, 19527)}, { AOM_CDF4(31086, 32706, 32748)},
+ { AOM_CDF4(22853, 31902, 32583)}, { AOM_CDF4(14759, 28186, 31419)},
+ { AOM_CDF4(9284, 22382, 28348)}, { AOM_CDF4(5585, 15192, 21868)},
+ { AOM_CDF4(28291, 32652, 32746)}, { AOM_CDF4(19849, 32107, 32571)},
+ { AOM_CDF4(14834, 26818, 29214)}, { AOM_CDF4(10306, 22594, 28672)},
+ { AOM_CDF4(6615, 17384, 23384)}, { AOM_CDF4(28947, 32604, 32745)},
+ { AOM_CDF4(25625, 32289, 32646)}, { AOM_CDF4(18758, 28672, 31403)},
+ { AOM_CDF4(10017, 23430, 28523)}, { AOM_CDF4(6862, 15269, 22131)},
+ { AOM_CDF4(23933, 32509, 32739)}, { AOM_CDF4(19927, 31495, 32631)},
+ { AOM_CDF4(11903, 26023, 30621)}, { AOM_CDF4(7026, 20094, 27252)},
+ { AOM_CDF4(5998, 18106, 24437)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(4456, 11274, 15533)}, { AOM_CDF4(21219, 29079, 31616)},
+ { AOM_CDF4(11173, 23774, 28567)}, { AOM_CDF4(7282, 18293, 24263)},
+ { AOM_CDF4(4890, 13286, 19115)}, { AOM_CDF4(1890, 5508, 8659)},
+ { AOM_CDF4(26651, 32136, 32647)}, { AOM_CDF4(14630, 28254, 31455)},
+ { AOM_CDF4(8716, 21287, 27395)}, { AOM_CDF4(5615, 15331, 22008)},
+ { AOM_CDF4(2675, 7700, 12150)}, { AOM_CDF4(29954, 32526, 32690)},
+ { AOM_CDF4(16126, 28982, 31633)}, { AOM_CDF4(9030, 21361, 27352)},
+ { AOM_CDF4(5411, 14793, 21271)}, { AOM_CDF4(2943, 8422, 13163)},
+ { AOM_CDF4(29539, 32601, 32730)}, { AOM_CDF4(18125, 30385, 32201)},
+ { AOM_CDF4(10422, 24090, 29468)}, { AOM_CDF4(6468, 17487, 24438)},
+ { AOM_CDF4(2970, 8653, 13531)}, { AOM_CDF4(30912, 32715, 32748)},
+ { AOM_CDF4(20666, 31373, 32497)}, { AOM_CDF4(12509, 26640, 30917)},
+ { AOM_CDF4(8058, 20629, 27290)}, { AOM_CDF4(4231, 12006, 18052)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(10202, 20633, 25484)}, { AOM_CDF4(27336, 31445, 32352)},
+ { AOM_CDF4(12420, 24384, 28552)}, { AOM_CDF4(7648, 18115, 23856)},
+ { AOM_CDF4(5662, 14341, 19902)}, { AOM_CDF4(3611, 10328, 15390)},
+ { AOM_CDF4(30945, 32616, 32736)}, { AOM_CDF4(18682, 30505, 32253)},
+ { AOM_CDF4(11513, 25336, 30203)}, { AOM_CDF4(7449, 19452, 26148)},
+ { AOM_CDF4(4482, 13051, 18886)}, { AOM_CDF4(32022, 32690, 32747)},
+ { AOM_CDF4(18578, 30501, 32146)}, { AOM_CDF4(11249, 23368, 28631)},
+ { AOM_CDF4(5645, 16958, 22158)}, { AOM_CDF4(5009, 11444, 16637)},
+ { AOM_CDF4(31357, 32710, 32748)}, { AOM_CDF4(21552, 31494, 32504)},
+ { AOM_CDF4(13891, 27677, 31340)}, { AOM_CDF4(9051, 22098, 28172)},
+ { AOM_CDF4(5190, 13377, 19486)}, { AOM_CDF4(32364, 32740, 32748)},
+ { AOM_CDF4(24839, 31907, 32551)}, { AOM_CDF4(17160, 28779, 31696)},
+ { AOM_CDF4(12452, 24137, 29602)}, { AOM_CDF4(6165, 15389, 22477)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(2575, 7281, 11077)}, { AOM_CDF4(14002, 20866, 25402)},
+ { AOM_CDF4(6343, 15056, 19658)}, { AOM_CDF4(4474, 11858, 17041)},
+ { AOM_CDF4(2865, 8299, 12534)}, { AOM_CDF4(1344, 3949, 6391)},
+ { AOM_CDF4(24720, 31239, 32459)}, { AOM_CDF4(12585, 25356, 29968)},
+ { AOM_CDF4(7181, 18246, 24444)}, { AOM_CDF4(5025, 13667, 19885)},
+ { AOM_CDF4(2521, 7304, 11605)}, { AOM_CDF4(29908, 32252, 32584)},
+ { AOM_CDF4(17421, 29156, 31575)}, { AOM_CDF4(9889, 22188, 27782)},
+ { AOM_CDF4(5878, 15647, 22123)}, { AOM_CDF4(2814, 8665, 13323)},
+ { AOM_CDF4(30183, 32568, 32713)}, { AOM_CDF4(18528, 30195, 32049)},
+ { AOM_CDF4(10982, 24606, 29657)}, { AOM_CDF4(6957, 18165, 25231)},
+ { AOM_CDF4(3508, 10118, 15468)}, { AOM_CDF4(31761, 32736, 32748)},
+ { AOM_CDF4(21041, 31328, 32546)}, { AOM_CDF4(12568, 26732, 31166)},
+ { AOM_CDF4(8052, 20720, 27733)}, { AOM_CDF4(4336, 12192, 18396)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF4(7062, 16472, 22319)}, { AOM_CDF4(24538, 32261, 32674)},
+ { AOM_CDF4(13675, 28041, 31779)}, { AOM_CDF4(8590, 20674, 27631)},
+ { AOM_CDF4(5685, 14675, 22013)}, { AOM_CDF4(3655, 9898, 15731)},
+ { AOM_CDF4(26493, 32418, 32658)}, { AOM_CDF4(16376, 29342, 32090)},
+ { AOM_CDF4(10594, 22649, 28970)}, { AOM_CDF4(8176, 17170, 24303)},
+ { AOM_CDF4(5605, 12694, 19139)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(23888, 31902, 32542)},
+ { AOM_CDF4(18612, 29687, 31987)}, { AOM_CDF4(16245, 24852, 29249)},
+ { AOM_CDF4(15765, 22608, 27559)}, { AOM_CDF4(19895, 24699, 27510)},
+ { AOM_CDF4(28401, 32212, 32457)}, { AOM_CDF4(15274, 27825, 30980)},
+ { AOM_CDF4(9364, 18128, 24332)}, { AOM_CDF4(2283, 8193, 15082)},
+ { AOM_CDF4(1228, 3972, 7881)}, { AOM_CDF4(29455, 32469, 32620)},
+ { AOM_CDF4(17981, 28245, 31388)}, { AOM_CDF4(10921, 20098, 26240)},
+ { AOM_CDF4(3743, 11829, 18657)}, { AOM_CDF4(2374, 9593, 15715)},
+ { AOM_CDF4(31068, 32466, 32635)}, { AOM_CDF4(20321, 29572, 31971)},
+ { AOM_CDF4(10771, 20255, 27119)}, { AOM_CDF4(2795, 10410, 17361)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(9320, 22102, 27840)}, { AOM_CDF4(27057, 32464, 32724)},
+ { AOM_CDF4(16331, 30268, 32309)}, { AOM_CDF4(10319, 23935, 29720)},
+ { AOM_CDF4(6189, 16448, 24106)}, { AOM_CDF4(3589, 10884, 18808)},
+ { AOM_CDF4(29026, 32624, 32748)}, { AOM_CDF4(19226, 31507, 32587)},
+ { AOM_CDF4(12692, 26921, 31203)}, { AOM_CDF4(7049, 19532, 27635)},
+ { AOM_CDF4(7727, 15669, 23252)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(28056, 32625, 32748)},
+ { AOM_CDF4(22383, 32075, 32669)}, { AOM_CDF4(15417, 27098, 31749)},
+ { AOM_CDF4(18127, 26493, 27190)}, { AOM_CDF4(5461, 16384, 21845)},
+ { AOM_CDF4(27982, 32091, 32584)}, { AOM_CDF4(19045, 29868, 31972)},
+ { AOM_CDF4(10397, 22266, 27932)}, { AOM_CDF4(5990, 13697, 21500)},
+ { AOM_CDF4(1792, 6912, 15104)}, { AOM_CDF4(28198, 32501, 32718)},
+ { AOM_CDF4(21534, 31521, 32569)}, { AOM_CDF4(11109, 25217, 30017)},
+ { AOM_CDF4(5671, 15124, 26151)}, { AOM_CDF4(4681, 14043, 18725)},
+ { AOM_CDF4(28688, 32580, 32741)}, { AOM_CDF4(22576, 32079, 32661)},
+ { AOM_CDF4(10627, 22141, 28340)}, { AOM_CDF4(9362, 14043, 28087)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(7754, 16948, 22142)}, { AOM_CDF4(25670, 32330, 32691)},
+ { AOM_CDF4(15663, 29225, 31994)}, { AOM_CDF4(9878, 23288, 29158)},
+ { AOM_CDF4(6419, 17088, 24336)}, { AOM_CDF4(3859, 11003, 17039)},
+ { AOM_CDF4(27562, 32595, 32725)}, { AOM_CDF4(17575, 30588, 32399)},
+ { AOM_CDF4(10819, 24838, 30309)}, { AOM_CDF4(7124, 18686, 25916)},
+ { AOM_CDF4(4479, 12688, 19340)}, { AOM_CDF4(28385, 32476, 32673)},
+ { AOM_CDF4(15306, 29005, 31938)}, { AOM_CDF4(8937, 21615, 28322)},
+ { AOM_CDF4(5982, 15603, 22786)}, { AOM_CDF4(3620, 10267, 16136)},
+ { AOM_CDF4(27280, 32464, 32667)}, { AOM_CDF4(15607, 29160, 32004)},
+ { AOM_CDF4(9091, 22135, 28740)}, { AOM_CDF4(6232, 16632, 24020)},
+ { AOM_CDF4(4047, 11377, 17672)}, { AOM_CDF4(29220, 32630, 32718)},
+ { AOM_CDF4(19650, 31220, 32462)}, { AOM_CDF4(13050, 26312, 30827)},
+ { AOM_CDF4(9228, 20870, 27468)}, { AOM_CDF4(6146, 15149, 21971)},
+ { AOM_CDF4(30169, 32481, 32623)}, { AOM_CDF4(17212, 29311, 31554)},
+ { AOM_CDF4(9911, 21311, 26882)}, { AOM_CDF4(4487, 13314, 20372)},
+ { AOM_CDF4(2570, 7772, 12889)}, { AOM_CDF4(30924, 32613, 32708)},
+ { AOM_CDF4(19490, 30206, 32107)}, { AOM_CDF4(11232, 23998, 29276)},
+ { AOM_CDF4(6769, 17955, 25035)}, { AOM_CDF4(4398, 12623, 19214)},
+ { AOM_CDF4(30609, 32627, 32722)}, { AOM_CDF4(19370, 30582, 32287)},
+ { AOM_CDF4(10457, 23619, 29409)}, { AOM_CDF4(6443, 17637, 24834)},
+ { AOM_CDF4(4645, 13236, 20106)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8626, 20271, 26216)}, { AOM_CDF4(26707, 32406, 32711)},
+ { AOM_CDF4(16999, 30329, 32286)}, { AOM_CDF4(11445, 25123, 30286)},
+ { AOM_CDF4(6411, 18828, 25601)}, { AOM_CDF4(6801, 12458, 20248)},
+ { AOM_CDF4(29918, 32682, 32748)}, { AOM_CDF4(20649, 31739, 32618)},
+ { AOM_CDF4(12879, 27773, 31581)}, { AOM_CDF4(7896, 21751, 28244)},
+ { AOM_CDF4(5260, 14870, 23698)}, { AOM_CDF4(29252, 32593, 32731)},
+ { AOM_CDF4(17072, 30460, 32294)}, { AOM_CDF4(10653, 24143, 29365)},
+ { AOM_CDF4(6536, 17490, 23983)}, { AOM_CDF4(4929, 13170, 20085)},
+ { AOM_CDF4(28137, 32518, 32715)}, { AOM_CDF4(18171, 30784, 32407)},
+ { AOM_CDF4(11437, 25436, 30459)}, { AOM_CDF4(7252, 18534, 26176)},
+ { AOM_CDF4(4126, 13353, 20978)}, { AOM_CDF4(31162, 32726, 32748)},
+ { AOM_CDF4(23017, 32222, 32701)}, { AOM_CDF4(15629, 29233, 32046)},
+ { AOM_CDF4(9387, 22621, 29480)}, { AOM_CDF4(6922, 17616, 25010)},
+ { AOM_CDF4(28838, 32265, 32614)}, { AOM_CDF4(19701, 30206, 31920)},
+ { AOM_CDF4(11214, 22410, 27933)}, { AOM_CDF4(5320, 14177, 23034)},
+ { AOM_CDF4(5049, 12881, 17827)}, { AOM_CDF4(27484, 32471, 32734)},
+ { AOM_CDF4(21076, 31526, 32561)}, { AOM_CDF4(12707, 26303, 31211)},
+ { AOM_CDF4(8169, 21722, 28219)}, { AOM_CDF4(6045, 19406, 27042)},
+ { AOM_CDF4(27753, 32572, 32745)}, { AOM_CDF4(20832, 31878, 32653)},
+ { AOM_CDF4(13250, 27356, 31674)}, { AOM_CDF4(7718, 21508, 29858)},
+ { AOM_CDF4(7209, 18350, 25559)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(7876, 16901, 21741)}, { AOM_CDF4(24001, 31898, 32625)},
+ { AOM_CDF4(14529, 27959, 31451)}, { AOM_CDF4(8273, 20818, 27258)},
+ { AOM_CDF4(5278, 14673, 21510)}, { AOM_CDF4(2983, 8843, 14039)},
+ { AOM_CDF4(28016, 32574, 32732)}, { AOM_CDF4(17471, 30306, 32301)},
+ { AOM_CDF4(10224, 24063, 29728)}, { AOM_CDF4(6602, 17954, 25052)},
+ { AOM_CDF4(4002, 11585, 17759)}, { AOM_CDF4(30190, 32634, 32739)},
+ { AOM_CDF4(17497, 30282, 32270)}, { AOM_CDF4(10229, 23729, 29538)},
+ { AOM_CDF4(6344, 17211, 24440)}, { AOM_CDF4(3849, 11189, 17108)},
+ { AOM_CDF4(28570, 32583, 32726)}, { AOM_CDF4(17521, 30161, 32238)},
+ { AOM_CDF4(10153, 23565, 29378)}, { AOM_CDF4(6455, 17341, 24443)},
+ { AOM_CDF4(3907, 11042, 17024)}, { AOM_CDF4(30689, 32715, 32748)},
+ { AOM_CDF4(21546, 31840, 32610)}, { AOM_CDF4(13547, 27581, 31459)},
+ { AOM_CDF4(8912, 21757, 28309)}, { AOM_CDF4(5548, 15080, 22046)},
+ { AOM_CDF4(30783, 32540, 32685)}, { AOM_CDF4(17540, 29528, 31668)},
+ { AOM_CDF4(10160, 21468, 26783)}, { AOM_CDF4(4724, 13393, 20054)},
+ { AOM_CDF4(2702, 8174, 13102)}, { AOM_CDF4(31648, 32686, 32742)},
+ { AOM_CDF4(20954, 31094, 32337)}, { AOM_CDF4(12420, 25698, 30179)},
+ { AOM_CDF4(7304, 19320, 26248)}, { AOM_CDF4(4366, 12261, 18864)},
+ { AOM_CDF4(31581, 32723, 32748)}, { AOM_CDF4(21373, 31586, 32525)},
+ { AOM_CDF4(12744, 26625, 30885)}, { AOM_CDF4(7431, 20322, 26950)},
+ { AOM_CDF4(4692, 13323, 20111)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(7833, 18369, 24095)}, { AOM_CDF4(26650, 32273, 32702)},
+ { AOM_CDF4(16371, 29961, 32191)}, { AOM_CDF4(11055, 24082, 29629)},
+ { AOM_CDF4(6892, 18644, 25400)}, { AOM_CDF4(5006, 13057, 19240)},
+ { AOM_CDF4(29834, 32666, 32748)}, { AOM_CDF4(19577, 31335, 32570)},
+ { AOM_CDF4(12253, 26509, 31122)}, { AOM_CDF4(7991, 20772, 27711)},
+ { AOM_CDF4(5677, 15910, 23059)}, { AOM_CDF4(30109, 32532, 32720)},
+ { AOM_CDF4(16747, 30166, 32252)}, { AOM_CDF4(10134, 23542, 29184)},
+ { AOM_CDF4(5791, 16176, 23556)}, { AOM_CDF4(4362, 10414, 17284)},
+ { AOM_CDF4(29492, 32626, 32748)}, { AOM_CDF4(19894, 31402, 32525)},
+ { AOM_CDF4(12942, 27071, 30869)}, { AOM_CDF4(8346, 21216, 27405)},
+ { AOM_CDF4(6572, 17087, 23859)}, { AOM_CDF4(32035, 32735, 32748)},
+ { AOM_CDF4(22957, 31838, 32618)}, { AOM_CDF4(14724, 28572, 31772)},
+ { AOM_CDF4(10364, 23999, 29553)}, { AOM_CDF4(7004, 18433, 25655)},
+ { AOM_CDF4(27528, 32277, 32681)}, { AOM_CDF4(16959, 31171, 32096)},
+ { AOM_CDF4(10486, 23593, 27962)}, { AOM_CDF4(8192, 16384, 23211)},
+ { AOM_CDF4(8937, 17873, 20852)}, { AOM_CDF4(27715, 32002, 32615)},
+ { AOM_CDF4(15073, 29491, 31676)}, { AOM_CDF4(11264, 24576, 28672)},
+ { AOM_CDF4(2341, 18725, 23406)}, { AOM_CDF4(7282, 18204, 25486)},
+ { AOM_CDF4(28547, 32213, 32657)}, { AOM_CDF4(20788, 29773, 32239)},
+ { AOM_CDF4(6780, 21469, 30508)}, { AOM_CDF4(5958, 14895, 23831)},
+ { AOM_CDF4(16384, 21845, 27307)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(5992, 14304, 19765)}, { AOM_CDF4(22612, 31238, 32456)},
+ { AOM_CDF4(13456, 27162, 31087)}, { AOM_CDF4(8001, 20062, 26504)},
+ { AOM_CDF4(5168, 14105, 20764)}, { AOM_CDF4(2632, 7771, 12385)},
+ { AOM_CDF4(27034, 32344, 32709)}, { AOM_CDF4(15850, 29415, 31997)},
+ { AOM_CDF4(9494, 22776, 28841)}, { AOM_CDF4(6151, 16830, 23969)},
+ { AOM_CDF4(3461, 10039, 15722)}, { AOM_CDF4(30134, 32569, 32731)},
+ { AOM_CDF4(15638, 29422, 31945)}, { AOM_CDF4(9150, 21865, 28218)},
+ { AOM_CDF4(5647, 15719, 22676)}, { AOM_CDF4(3402, 9772, 15477)},
+ { AOM_CDF4(28530, 32586, 32735)}, { AOM_CDF4(17139, 30298, 32292)},
+ { AOM_CDF4(10200, 24039, 29685)}, { AOM_CDF4(6419, 17674, 24786)},
+ { AOM_CDF4(3544, 10225, 15824)}, { AOM_CDF4(31333, 32726, 32748)},
+ { AOM_CDF4(20618, 31487, 32544)}, { AOM_CDF4(12901, 27217, 31232)},
+ { AOM_CDF4(8624, 21734, 28171)}, { AOM_CDF4(5104, 14191, 20748)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(11206, 21090, 26561)}, { AOM_CDF4(28759, 32279, 32671)},
+ { AOM_CDF4(14171, 27952, 31569)}, { AOM_CDF4(9743, 22907, 29141)},
+ { AOM_CDF4(6871, 17886, 24868)}, { AOM_CDF4(4960, 13152, 19315)},
+ { AOM_CDF4(31077, 32661, 32748)}, { AOM_CDF4(19400, 31195, 32515)},
+ { AOM_CDF4(12752, 26858, 31040)}, { AOM_CDF4(8370, 22098, 28591)},
+ { AOM_CDF4(5457, 15373, 22298)}, { AOM_CDF4(31697, 32706, 32748)},
+ { AOM_CDF4(17860, 30657, 32333)}, { AOM_CDF4(12510, 24812, 29261)},
+ { AOM_CDF4(6180, 19124, 24722)}, { AOM_CDF4(5041, 13548, 17959)},
+ { AOM_CDF4(31552, 32716, 32748)}, { AOM_CDF4(21908, 31769, 32623)},
+ { AOM_CDF4(14470, 28201, 31565)}, { AOM_CDF4(9493, 22982, 28608)},
+ { AOM_CDF4(6858, 17240, 24137)}, { AOM_CDF4(32543, 32752, 32756)},
+ { AOM_CDF4(24286, 32097, 32666)}, { AOM_CDF4(15958, 29217, 32024)},
+ { AOM_CDF4(10207, 24234, 29958)}, { AOM_CDF4(6929, 18305, 25652)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ },
+ {
+ {
+ { AOM_CDF4(4137, 10847, 15682)}, { AOM_CDF4(17824, 27001, 30058)},
+ { AOM_CDF4(10204, 22796, 28291)}, { AOM_CDF4(6076, 15935, 22125)},
+ { AOM_CDF4(3852, 10937, 16816)}, { AOM_CDF4(2252, 6324, 10131)},
+ { AOM_CDF4(25840, 32016, 32662)}, { AOM_CDF4(15109, 28268, 31531)},
+ { AOM_CDF4(9385, 22231, 28340)}, { AOM_CDF4(6082, 16672, 23479)},
+ { AOM_CDF4(3318, 9427, 14681)}, { AOM_CDF4(30594, 32574, 32718)},
+ { AOM_CDF4(16836, 29552, 31859)}, { AOM_CDF4(9556, 22542, 28356)},
+ { AOM_CDF4(6305, 16725, 23540)}, { AOM_CDF4(3376, 9895, 15184)},
+ { AOM_CDF4(29383, 32617, 32745)}, { AOM_CDF4(18891, 30809, 32401)},
+ { AOM_CDF4(11688, 25942, 30687)}, { AOM_CDF4(7468, 19469, 26651)},
+ { AOM_CDF4(3909, 11358, 17012)}, { AOM_CDF4(31564, 32736, 32748)},
+ { AOM_CDF4(20906, 31611, 32600)}, { AOM_CDF4(13191, 27621, 31537)},
+ { AOM_CDF4(8768, 22029, 28676)}, { AOM_CDF4(5079, 14109, 20906)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ },
+ {
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
+ { AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
+ }
+ }
+ }
+};
+
+static const u16 av1_default_coeff_base_eob_multi_cdfs[TOKEN_CDF_Q_CTXS][TX_SIZES]
+ [PLANE_TYPES][SIG_COEF_CONTEXTS_EOB][CDF_SIZE(NUM_BASE_LEVELS + 1)] = {
+ {
+ {
+ {
+ { AOM_CDF3(17837, 29055)},
+ { AOM_CDF3(29600, 31446)},
+ { AOM_CDF3(30844, 31878)},
+ { AOM_CDF3(24926, 28948)}
+ },
+ {
+ { AOM_CDF3(21365, 30026)},
+ { AOM_CDF3(30512, 32423)},
+ { AOM_CDF3(31658, 32621)},
+ { AOM_CDF3(29630, 31881)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(5717, 26477)},
+ { AOM_CDF3(30491, 31703)},
+ { AOM_CDF3(31550, 32158)},
+ { AOM_CDF3(29648, 31491)}
+ },
+ {
+ { AOM_CDF3(12608, 27820)},
+ { AOM_CDF3(30680, 32225)},
+ { AOM_CDF3(30809, 32335)},
+ { AOM_CDF3(31299, 32423)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(1786, 12612)},
+ { AOM_CDF3(30663, 31625)},
+ { AOM_CDF3(32339, 32468)},
+ { AOM_CDF3(31148, 31833)}
+ },
+ {
+ { AOM_CDF3(18857, 23865)},
+ { AOM_CDF3(31428, 32428)},
+ { AOM_CDF3(31744, 32373)},
+ { AOM_CDF3(31775, 32526)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(1787, 2532)},
+ { AOM_CDF3(30832, 31662)},
+ { AOM_CDF3(31824, 32682)},
+ { AOM_CDF3(32133, 32569)}
+ },
+ {
+ { AOM_CDF3(13751, 22235)},
+ { AOM_CDF3(32089, 32409)},
+ { AOM_CDF3(27084, 27920)},
+ { AOM_CDF3(29291, 32594)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(1725, 3449)},
+ { AOM_CDF3(31102, 31935)},
+ { AOM_CDF3(32457, 32613)},
+ { AOM_CDF3(32412, 32649)}
+ },
+ {
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF3(17560, 29888)},
+ { AOM_CDF3(29671, 31549)},
+ { AOM_CDF3(31007, 32056)},
+ { AOM_CDF3(27286, 30006)}
+ },
+ {
+ { AOM_CDF3(26594, 31212)},
+ { AOM_CDF3(31208, 32582)},
+ { AOM_CDF3(31835, 32637)},
+ { AOM_CDF3(30595, 32206)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(15239, 29932)},
+ { AOM_CDF3(31315, 32095)},
+ { AOM_CDF3(32130, 32434)},
+ { AOM_CDF3(30864, 31996)}
+ },
+ {
+ { AOM_CDF3(26279, 30968)},
+ { AOM_CDF3(31142, 32495)},
+ { AOM_CDF3(31713, 32540)},
+ { AOM_CDF3(31929, 32594)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(2644, 25198)},
+ { AOM_CDF3(32038, 32451)},
+ { AOM_CDF3(32639, 32695)},
+ { AOM_CDF3(32166, 32518)}
+ },
+ {
+ { AOM_CDF3(17187, 27668)},
+ { AOM_CDF3(31714, 32550)},
+ { AOM_CDF3(32283, 32678)},
+ { AOM_CDF3(31930, 32563)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(1044, 2257)},
+ { AOM_CDF3(30755, 31923)},
+ { AOM_CDF3(32208, 32693)},
+ { AOM_CDF3(32244, 32615)}
+ },
+ {
+ { AOM_CDF3(21317, 26207)},
+ { AOM_CDF3(29133, 30868)},
+ { AOM_CDF3(29311, 31231)},
+ { AOM_CDF3(29657, 31087)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(478, 1834)},
+ { AOM_CDF3(31005, 31987)},
+ { AOM_CDF3(32317, 32724)},
+ { AOM_CDF3(30865, 32648)}
+ },
+ {
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF3(20092, 30774)},
+ { AOM_CDF3(30695, 32020)},
+ { AOM_CDF3(31131, 32103)},
+ { AOM_CDF3(28666, 30870)}
+ },
+ {
+ { AOM_CDF3(27258, 31095)},
+ { AOM_CDF3(31804, 32623)},
+ { AOM_CDF3(31763, 32528)},
+ { AOM_CDF3(31438, 32506)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(18049, 30489)},
+ { AOM_CDF3(31706, 32286)},
+ { AOM_CDF3(32163, 32473)},
+ { AOM_CDF3(31550, 32184)}
+ },
+ {
+ { AOM_CDF3(27116, 30842)},
+ { AOM_CDF3(31971, 32598)},
+ { AOM_CDF3(32088, 32576)},
+ { AOM_CDF3(32067, 32664)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(12854, 29093)},
+ { AOM_CDF3(32272, 32558)},
+ { AOM_CDF3(32667, 32729)},
+ { AOM_CDF3(32306, 32585)}
+ },
+ {
+ { AOM_CDF3(25476, 30366)},
+ { AOM_CDF3(32169, 32687)},
+ { AOM_CDF3(32479, 32689)},
+ { AOM_CDF3(31673, 32634)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(2809, 19301)},
+ { AOM_CDF3(32205, 32622)},
+ { AOM_CDF3(32338, 32730)},
+ { AOM_CDF3(31786, 32616)}
+ },
+ {
+ { AOM_CDF3(22737, 29105)},
+ { AOM_CDF3(30810, 32362)},
+ { AOM_CDF3(30014, 32627)},
+ { AOM_CDF3(30528, 32574)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(935, 3382)},
+ { AOM_CDF3(30789, 31909)},
+ { AOM_CDF3(32466, 32756)},
+ { AOM_CDF3(30860, 32513)}
+ },
+ {
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)}
+ }
+ }
+ },
+ {
+ {
+ {
+ { AOM_CDF3(22497, 31198)},
+ { AOM_CDF3(31715, 32495)},
+ { AOM_CDF3(31606, 32337)},
+ { AOM_CDF3(30388, 31990)}
+ },
+ {
+ { AOM_CDF3(27877, 31584)},
+ { AOM_CDF3(32170, 32728)},
+ { AOM_CDF3(32155, 32688)},
+ { AOM_CDF3(32219, 32702)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(21457, 31043)},
+ { AOM_CDF3(31951, 32483)},
+ { AOM_CDF3(32153, 32562)},
+ { AOM_CDF3(31473, 32215)}
+ },
+ {
+ { AOM_CDF3(27558, 31151)},
+ { AOM_CDF3(32020, 32640)},
+ { AOM_CDF3(32097, 32575)},
+ { AOM_CDF3(32242, 32719)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(19980, 30591)},
+ { AOM_CDF3(32219, 32597)},
+ { AOM_CDF3(32581, 32706)},
+ { AOM_CDF3(31803, 32287)}
+ },
+ {
+ { AOM_CDF3(26473, 30507)},
+ { AOM_CDF3(32431, 32723)},
+ { AOM_CDF3(32196, 32611)},
+ { AOM_CDF3(31588, 32528)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(24647, 30463)},
+ { AOM_CDF3(32412, 32695)},
+ { AOM_CDF3(32468, 32720)},
+ { AOM_CDF3(31269, 32523)}
+ },
+ {
+ { AOM_CDF3(28482, 31505)},
+ { AOM_CDF3(32152, 32701)},
+ { AOM_CDF3(31732, 32598)},
+ { AOM_CDF3(31767, 32712)}
+ }
+ },
+ {
+ {
+ { AOM_CDF3(12358, 24977)},
+ { AOM_CDF3(31331, 32385)},
+ { AOM_CDF3(32634, 32756)},
+ { AOM_CDF3(30411, 32548)}
+ },
+ {
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)},
+ { AOM_CDF3(10923, 21845)}
+ }
+ }
+ }
+};
+
+static const u16 default_joint_cdf[] = { ICDF(4096), ICDF(11264), ICDF(19328)};
+static const u16 default_clsss_cdf[][10] = {
+ // Vertical component
+ {
+ ICDF(28672), ICDF(30976), ICDF(31858), ICDF(32320), ICDF(32551),
+ ICDF(32656), ICDF(32740), ICDF(32757), ICDF(32762), ICDF(32767)
+ },
+ // Horizontal component
+ {
+ ICDF(28672), ICDF(30976), ICDF(31858), ICDF(32320), ICDF(32551),
+ ICDF(32656), ICDF(32740), ICDF(32757), ICDF(32762), ICDF(32767)
+ }
+};
+
+static const u16 default_clsss0_fp_cdf[][2][3] = {
+ // Vertical component
+ {
+ { ICDF(16384), ICDF(24576), ICDF(26624)},
+ { ICDF(12288), ICDF(21248), ICDF(24128)}
+ },
+ // Horizontal component
+ {
+ { ICDF(16384), ICDF(24576), ICDF(26624)},
+ { ICDF(12288), ICDF(21248), ICDF(24128)}
+ }
+};
+
+static const u16 default_fp_cdf[][3] = {
+ // Vertical component
+ {
+ ICDF(8192), ICDF(17408), ICDF(21248)
+ },
+ // Horizontal component
+ {
+ ICDF(8192), ICDF(17408), ICDF(21248)
+ }
+};
+
+static const u16 default_sign_cdf[] = { ICDF(128 * 128), ICDF(128 * 128)};
+static const u16 default_class0_hp_cdf[] = { ICDF(160 * 128), ICDF(160 * 128)};
+static const u16 default_hp_cdf[] = { ICDF(128 * 128), ICDF(128 * 128)};
+static const u16 default_class0_cdf[] = { ICDF(216 * 128), ICDF(216 * 128)};
+static const u16 default_bits_cdf[][10] = {
+ {
+ ICDF(128 * 136), ICDF(128 * 140), ICDF(128 * 148), ICDF(128 * 160),
+ ICDF(128 * 176), ICDF(128 * 192), ICDF(128 * 224), ICDF(128 * 234),
+ ICDF(128 * 234), ICDF(128 * 240)
+ },
+ {
+ ICDF(128 * 136), ICDF(128 * 140), ICDF(128 * 148), ICDF(128 * 160),
+ ICDF(128 * 176), ICDF(128 * 192), ICDF(128 * 224), ICDF(128 * 234),
+ ICDF(128 * 234), ICDF(128 * 240)
+ }
+};
+
+static int rockchip_av1_get_q_ctx(int q)
+{
+ if (q <= 20)
+ return 0;
+ if (q <= 60)
+ return 1;
+ if (q <= 120)
+ return 2;
+ return 3;
+}
+
+void rockchip_av1_default_coeff_probs(u32 base_qindex, void *ptr)
+{
+ struct av1cdfs *cdfs = (struct av1cdfs *)ptr;
+ const int index = rockchip_av1_get_q_ctx(base_qindex);
+
+ memcpy(cdfs->txb_skip_cdf, av1_default_txb_skip_cdfs[index],
+ sizeof(av1_default_txb_skip_cdfs[0]));
+ memcpy(cdfs->eob_extra_cdf, av1_default_eob_extra_cdfs[index],
+ sizeof(av1_default_eob_extra_cdfs[0]));
+ memcpy(cdfs->dc_sign_cdf, av1_default_dc_sign_cdfs[index],
+ sizeof(av1_default_dc_sign_cdfs[0]));
+ memcpy(cdfs->coeff_br_cdf, av1_default_coeff_lps_multi_cdfs[index],
+ sizeof(av1_default_coeff_lps_multi_cdfs[0]));
+ memcpy(cdfs->coeff_base_cdf, av1_default_coeff_base_multi_cdfs[index],
+ sizeof(av1_default_coeff_base_multi_cdfs[0]));
+ memcpy(cdfs->coeff_base_eob_cdf,
+ av1_default_coeff_base_eob_multi_cdfs[index],
+ sizeof(av1_default_coeff_base_eob_multi_cdfs[0]));
+ memcpy(cdfs->eob_flag_cdf16, av1_default_eob_multi16_cdfs[index],
+ sizeof(av1_default_eob_multi16_cdfs[0]));
+ memcpy(cdfs->eob_flag_cdf32, av1_default_eob_multi32_cdfs[index],
+ sizeof(av1_default_eob_multi32_cdfs[0]));
+ memcpy(cdfs->eob_flag_cdf64, av1_default_eob_multi64_cdfs[index],
+ sizeof(av1_default_eob_multi64_cdfs[0]));
+ memcpy(cdfs->eob_flag_cdf128, av1_default_eob_multi128_cdfs[index],
+ sizeof(av1_default_eob_multi128_cdfs[0]));
+ memcpy(cdfs->eob_flag_cdf256, av1_default_eob_multi256_cdfs[index],
+ sizeof(av1_default_eob_multi256_cdfs[0]));
+ memcpy(cdfs->eob_flag_cdf512, av1_default_eob_multi512_cdfs[index],
+ sizeof(av1_default_eob_multi512_cdfs[0]));
+ memcpy(cdfs->eob_flag_cdf1024, av1_default_eob_multi1024_cdfs[index],
+ sizeof(av1_default_eob_multi1024_cdfs[0]));
+}
+
+void rockchip_av1_set_default_cdfs(struct av1cdfs *cdfs,
+ struct mvcdfs *cdfs_ndvc)
+{
+ memcpy(cdfs->partition_cdf, default_partition_cdf,
+ sizeof(cdfs->partition_cdf));
+
+ memcpy(cdfs->tx_type_intra0_cdf, default_intra_ext_tx0_cdf,
+ sizeof(cdfs->tx_type_intra0_cdf));
+ memcpy(cdfs->tx_type_intra1_cdf, default_intra_ext_tx1_cdf,
+ sizeof(cdfs->tx_type_intra1_cdf));
+ memcpy(cdfs->tx_type_inter_cdf, default_inter_ext_tx_cdf,
+ sizeof(cdfs->tx_type_inter_cdf));
+
+ memcpy(cdfs->vartx_part_cdf, default_txfm_partition_cdf,
+ sizeof(cdfs->vartx_part_cdf));
+ memcpy(cdfs->mbskip_cdf, default_skip_cdfs, sizeof(cdfs->mbskip_cdf));
+ memcpy(cdfs->delta_q_cdf, default_delta_q_cdf,
+ sizeof(cdfs->delta_q_cdf));
+ memcpy(cdfs->delta_lf_multi_cdf, default_delta_lf_multi_cdf,
+ sizeof(cdfs->delta_lf_multi_cdf));
+ memcpy(cdfs->delta_lf_cdf, default_delta_lf_cdf,
+ sizeof(cdfs->delta_lf_cdf));
+
+ memcpy(cdfs->segment_pred_cdf, default_segment_pred_cdf,
+ sizeof(cdfs->segment_pred_cdf));
+
+ memcpy(cdfs->spatial_pred_seg_tree_cdf,
+ default_spatial_pred_seg_tree_cdf,
+ sizeof(cdfs->spatial_pred_seg_tree_cdf));
+
+ memcpy(cdfs->skip_mode_cdf, default_skip_mode_cdfs,
+ sizeof(cdfs->skip_mode_cdf));
+
+ memcpy(cdfs->tx_size_cdf, default_tx_size_cdf,
+ sizeof(cdfs->tx_size_cdf));
+
+ memcpy(cdfs->kf_ymode_cdf, default_kf_y_mode_cdf,
+ sizeof(cdfs->kf_ymode_cdf));
+ memcpy(cdfs->uv_mode_cdf, default_uv_mode_cdf,
+ sizeof(cdfs->uv_mode_cdf));
+ memcpy(cdfs->if_ymode_cdf, default_if_y_mode_cdf,
+ sizeof(cdfs->if_ymode_cdf));
+
+ memcpy(cdfs->intra_inter_cdf, default_intra_inter_cdf,
+ sizeof(cdfs->intra_inter_cdf));
+
+ memcpy(cdfs->comp_ref_cdf, default_comp_ref_cdf,
+ sizeof(cdfs->comp_ref_cdf));
+ memcpy(cdfs->comp_bwdref_cdf, default_comp_bwdref_cdf,
+ sizeof(cdfs->comp_bwdref_cdf));
+
+ memcpy(cdfs->comp_inter_cdf, default_comp_inter_cdf,
+ sizeof(cdfs->comp_inter_cdf));
+
+ memcpy(cdfs->single_ref_cdf, default_single_ref_cdf,
+ sizeof(cdfs->single_ref_cdf));
+ memcpy(cdfs->comp_ref_type_cdf, default_comp_ref_type_cdf,
+ sizeof(cdfs->comp_ref_type_cdf));
+ memcpy(cdfs->uni_comp_ref_cdf, default_uni_comp_ref_cdf,
+ sizeof(cdfs->uni_comp_ref_cdf));
+
+ memcpy(cdfs->newmv_cdf, default_newmv_cdf, sizeof(cdfs->newmv_cdf));
+ memcpy(cdfs->zeromv_cdf, default_zeromv_cdf, sizeof(cdfs->zeromv_cdf));
+ memcpy(cdfs->refmv_cdf, default_refmv_cdf, sizeof(cdfs->refmv_cdf));
+ memcpy(cdfs->drl_cdf, default_drl_cdf, sizeof(cdfs->drl_cdf));
+
+ memcpy(cdfs->interp_filter_cdf, default_switchable_interp_cdf,
+ sizeof(cdfs->interp_filter_cdf));
+
+ // Regular MV cdfs
+ memcpy(cdfs->mv_cdf.joint_cdf, default_joint_cdf,
+ sizeof(cdfs->mv_cdf.joint_cdf));
+ memcpy(cdfs->mv_cdf.sign_cdf, default_sign_cdf,
+ sizeof(cdfs->mv_cdf.sign_cdf));
+ memcpy(cdfs->mv_cdf.clsss_cdf, default_clsss_cdf,
+ sizeof(cdfs->mv_cdf.clsss_cdf));
+ memcpy(cdfs->mv_cdf.clsss0_fp_cdf, default_clsss0_fp_cdf,
+ sizeof(cdfs->mv_cdf.clsss0_fp_cdf));
+ memcpy(cdfs->mv_cdf.fp_cdf, default_fp_cdf,
+ sizeof(cdfs->mv_cdf.fp_cdf));
+ memcpy(cdfs->mv_cdf.class0_hp_cdf, default_class0_hp_cdf,
+ sizeof(cdfs->mv_cdf.class0_hp_cdf));
+ memcpy(cdfs->mv_cdf.hp_cdf, default_hp_cdf,
+ sizeof(cdfs->mv_cdf.hp_cdf));
+ memcpy(cdfs->mv_cdf.class0_cdf, default_class0_cdf,
+ sizeof(cdfs->mv_cdf.class0_cdf));
+ memcpy(cdfs->mv_cdf.bits_cdf, default_bits_cdf,
+ sizeof(cdfs->mv_cdf.bits_cdf));
+
+ // Intrabc cdfs
+ memcpy(cdfs_ndvc->joint_cdf, default_joint_cdf,
+ sizeof(cdfs_ndvc->joint_cdf));
+ memcpy(cdfs_ndvc->sign_cdf, default_sign_cdf,
+ sizeof(cdfs_ndvc->sign_cdf));
+ memcpy(cdfs_ndvc->clsss_cdf, default_clsss_cdf,
+ sizeof(cdfs_ndvc->clsss_cdf));
+ memcpy(cdfs_ndvc->clsss0_fp_cdf, default_clsss0_fp_cdf,
+ sizeof(cdfs_ndvc->clsss0_fp_cdf));
+ memcpy(cdfs_ndvc->fp_cdf, default_fp_cdf, sizeof(cdfs_ndvc->fp_cdf));
+ memcpy(cdfs_ndvc->class0_hp_cdf, default_class0_hp_cdf,
+ sizeof(cdfs_ndvc->class0_hp_cdf));
+ memcpy(cdfs_ndvc->hp_cdf, default_hp_cdf, sizeof(cdfs_ndvc->hp_cdf));
+ memcpy(cdfs_ndvc->class0_cdf, default_class0_cdf,
+ sizeof(cdfs_ndvc->class0_cdf));
+ memcpy(cdfs_ndvc->bits_cdf, default_bits_cdf,
+ sizeof(cdfs_ndvc->bits_cdf));
+
+ memcpy(cdfs->obmc_cdf, default_obmc_cdf, sizeof(cdfs->obmc_cdf));
+ memcpy(cdfs->motion_mode_cdf, default_motion_mode_cdf,
+ sizeof(cdfs->motion_mode_cdf));
+
+ memcpy(cdfs->inter_compound_mode_cdf, default_inter_compound_mode_cdf,
+ sizeof(cdfs->inter_compound_mode_cdf));
+ memcpy(cdfs->compound_type_cdf, default_compound_type_cdf,
+ sizeof(cdfs->compound_type_cdf));
+ memcpy(cdfs->interintra_cdf, default_interintra_cdf,
+ sizeof(cdfs->interintra_cdf));
+ memcpy(cdfs->interintra_mode_cdf, default_interintra_mode_cdf,
+ sizeof(cdfs->interintra_mode_cdf));
+ memcpy(cdfs->wedge_interintra_cdf, default_wedge_interintra_cdf,
+ sizeof(cdfs->wedge_interintra_cdf));
+ memcpy(cdfs->wedge_idx_cdf, default_wedge_idx_cdf,
+ sizeof(cdfs->wedge_idx_cdf));
+
+ memcpy(cdfs->palette_y_mode_cdf, default_palette_y_mode_cdf,
+ sizeof(cdfs->palette_y_mode_cdf));
+ memcpy(cdfs->palette_uv_mode_cdf, default_palette_uv_mode_cdf,
+ sizeof(cdfs->palette_uv_mode_cdf));
+ memcpy(cdfs->palette_y_size_cdf, default_palette_y_size_cdf,
+ sizeof(cdfs->palette_y_size_cdf));
+ memcpy(cdfs->palette_uv_size_cdf, default_palette_uv_size_cdf,
+ sizeof(cdfs->palette_uv_size_cdf));
+ memcpy(cdfs->palette_y_color_index_cdf,
+ default_palette_y_color_index_cdf,
+ sizeof(cdfs->palette_y_color_index_cdf));
+ memcpy(cdfs->palette_uv_color_index_cdf,
+ default_palette_uv_color_index_cdf,
+ sizeof(cdfs->palette_uv_color_index_cdf));
+
+ memcpy(cdfs->cfl_sign_cdf, default_cfl_sign_cdf,
+ sizeof(cdfs->cfl_sign_cdf));
+ memcpy(cdfs->cfl_alpha_cdf, default_cfl_alpha_cdf,
+ sizeof(cdfs->cfl_alpha_cdf));
+
+ memcpy(cdfs->intrabc_cdf, default_intrabc_cdf,
+ sizeof(cdfs->intrabc_cdf));
+ memcpy(cdfs->angle_delta_cdf, default_angle_delta_cdf,
+ sizeof(cdfs->angle_delta_cdf));
+ memcpy(cdfs->filter_intra_mode_cdf, default_filter_intra_mode_cdf,
+ sizeof(cdfs->filter_intra_mode_cdf));
+ memcpy(cdfs->filter_intra_cdf, default_filter_intra_cdfs,
+ sizeof(cdfs->filter_intra_cdf));
+ memcpy(cdfs->comp_group_idx_cdf, default_comp_group_idx_cdfs,
+ sizeof(cdfs->comp_group_idx_cdf));
+ memcpy(cdfs->compound_idx_cdf, default_compound_idx_cdfs,
+ sizeof(cdfs->compound_idx_cdf));
+}
+
+void rockchip_av1_get_cdfs(struct hantro_ctx *ctx, u32 ref_idx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+
+ av1_dec->cdfs = &av1_dec->cdfs_last[ref_idx];
+ av1_dec->cdfs_ndvc = &av1_dec->cdfs_last_ndvc[ref_idx];
+}
+
+void rockchip_av1_store_cdfs(struct hantro_ctx *ctx,
+ u32 refresh_frame_flags)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ int i;
+
+ for (i = 0; i < NUM_REF_FRAMES; i++) {
+ if (refresh_frame_flags & (1 << i)) {
+ if (&av1_dec->cdfs_last[i] != av1_dec->cdfs) {
+ av1_dec->cdfs_last[i] = *av1_dec->cdfs;
+ av1_dec->cdfs_last_ndvc[i] =
+ *av1_dec->cdfs_ndvc;
+ }
+ }
+ }
+}
diff --git a/drivers/media/platform/verisilicon/rockchip_av1_entropymode.h b/drivers/media/platform/verisilicon/rockchip_av1_entropymode.h
new file mode 100644
index 0000000000..bbf8424c7d
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_av1_entropymode.h
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ROCKCHIP_AV1_ENTROPYMODE_H_
+#define _ROCKCHIP_AV1_ENTROPYMODE_H_
+
+#include <linux/types.h>
+
+struct hantro_ctx;
+
+#define AV1_INTER_MODE_CONTEXTS 15
+#define AV1_INTRA_MODES 13
+#define AV1_REF_CONTEXTS 3
+#define AV1_SWITCHABLE_FILTERS 3 /* number of switchable filters */
+#define AV1_TX_SIZE_CONTEXTS 3
+#define BLOCK_SIZE_GROUPS 4
+#define BR_CDF_SIZE 4
+#define BWD_REFS 3
+#define CFL_ALLOWED_TYPES 2
+#define CFL_ALPHA_CONTEXTS 6
+#define CFL_ALPHABET_SIZE 16
+#define CFL_JOINT_SIGNS 8
+#define CDF_SIZE(x) ((x) - 1)
+#define COMP_GROUP_IDX_CONTEXTS 7
+#define COMP_INDEX_CONTEXTS 6
+#define COMP_INTER_CONTEXTS 5
+#define COMP_REF_TYPE_CONTEXTS 5
+#define COMPOUND_TYPES 3
+#define DC_SIGN_CONTEXTS 3
+#define DELTA_LF_PROBS 3
+#define DELTA_Q_PROBS 3
+#define DIRECTIONAL_MODES 8
+#define DRL_MODE_CONTEXTS 3
+#define EOB_COEF_CONTEXTS 9
+#define EXT_TX_SIZES 3
+#define EXT_TX_TYPES 16
+#define EXTTX_SIZES 4
+#define FRAME_LF_COUNT 4
+#define FWD_REFS 4
+#define GLOBALMV_MODE_CONTEXTS 2
+#define ICDF(x) (32768U - (x))
+#define INTER_COMPOUND_MODES 8
+#define INTERINTRA_MODES 4
+#define INTRA_INTER_CONTEXTS 4
+#define KF_MODE_CONTEXTS 5
+#define LEVEL_CONTEXTS 21
+#define MAX_ANGLE_DELTA 3
+#define MAX_MB_SEGMENTS 8
+#define MAX_SEGMENTS 8
+#define MAX_TX_CATS 4
+#define MAX_TX_DEPTH 2
+#define MBSKIP_CONTEXTS 3
+#define MOTION_MODES 3
+#define MOTION_MODE_CONTEXTS 10
+#define NEWMV_MODE_CONTEXTS 6
+#define NUM_BASE_LEVELS 2
+#define NUM_REF_FRAMES 8
+#define PALETTE_BLOCK_SIZES 7
+#define PALETTE_IDX_CONTEXTS 18
+#define PALETTE_SIZES 7
+#define PALETTE_UV_MODE_CONTEXTS 2
+#define PALETTE_Y_MODE_CONTEXTS 3
+#define PARTITION_PLOFFSET 4
+#define NUM_PARTITION_CONTEXTS (4 * PARTITION_PLOFFSET)
+#define PLANE_TYPES 2
+#define PREDICTION_PROBS 3
+#define REF_CONTEXTS 5
+#define REFMV_MODE_CONTEXTS 9
+#define SEG_TEMPORAL_PRED_CTXS 3
+#define SIG_COEF_CONTEXTS 42
+#define SIG_COEF_CONTEXTS_EOB 4
+#define SINGLE_REFS 7
+#define SKIP_CONTEXTS 3
+#define SKIP_MODE_CONTEXTS 3
+#define SPATIAL_PREDICTION_PROBS 3
+#define SWITCHABLE_FILTER_CONTEXTS ((AV1_SWITCHABLE_FILTERS + 1) * 4)
+#define TOKEN_CDF_Q_CTXS 4
+#define TX_SIZES 5
+#define TX_SIZE_CONTEXTS 2
+#define TX_TYPES 4
+#define TXB_SKIP_CONTEXTS 13
+#define TXFM_PARTITION_CONTEXTS 22
+#define UNI_COMP_REF_CONTEXTS 3
+#define UNIDIR_COMP_REFS 4
+#define UV_INTRA_MODES 14
+#define VARTX_PART_CONTEXTS 22
+#define ZEROMV_MODE_CONTEXTS 2
+
+enum blocksizetype {
+ BLOCK_SIZE_AB4X4,
+ BLOCK_SIZE_SB4X8,
+ BLOCK_SIZE_SB8X4,
+ BLOCK_SIZE_SB8X8,
+ BLOCK_SIZE_SB8X16,
+ BLOCK_SIZE_SB16X8,
+ BLOCK_SIZE_MB16X16,
+ BLOCK_SIZE_SB16X32,
+ BLOCK_SIZE_SB32X16,
+ BLOCK_SIZE_SB32X32,
+ BLOCK_SIZE_SB32X64,
+ BLOCK_SIZE_SB64X32,
+ BLOCK_SIZE_SB64X64,
+ BLOCK_SIZE_SB64X128,
+ BLOCK_SIZE_SB128X64,
+ BLOCK_SIZE_SB128X128,
+ BLOCK_SIZE_SB4X16,
+ BLOCK_SIZE_SB16X4,
+ BLOCK_SIZE_SB8X32,
+ BLOCK_SIZE_SB32X8,
+ BLOCK_SIZE_SB16X64,
+ BLOCK_SIZE_SB64X16,
+ BLOCK_SIZE_TYPES,
+ BLOCK_SIZES_ALL = BLOCK_SIZE_TYPES
+};
+
+enum filterintramodetype {
+ FILTER_DC_PRED,
+ FILTER_V_PRED,
+ FILTER_H_PRED,
+ FILTER_D153_PRED,
+ FILTER_PAETH_PRED,
+ FILTER_INTRA_MODES,
+ FILTER_INTRA_UNUSED = 7
+};
+
+enum frametype {
+ KEY_FRAME = 0,
+ INTER_FRAME = 1,
+ NUM_FRAME_TYPES,
+};
+
+enum txsize {
+ TX_4X4 = 0,
+ TX_8X8 = 1,
+ TX_16X16 = 2,
+ TX_32X32 = 3,
+ TX_SIZE_MAX_SB,
+};
+
+enum { SIMPLE_TRANSLATION, OBMC_CAUSAL, MOTION_MODE_COUNT };
+
+enum mb_prediction_mode {
+ DC_PRED, /* average of above and left pixels */
+ V_PRED, /* vertical prediction */
+ H_PRED, /* horizontal prediction */
+ D45_PRED, /* Directional 45 deg prediction [anti-clockwise from 0 deg hor] */
+ D135_PRED, /* Directional 135 deg prediction [anti-clockwise from 0 deg hor] */
+ D117_PRED, /* Directional 112 deg prediction [anti-clockwise from 0 deg hor] */
+ D153_PRED, /* Directional 157 deg prediction [anti-clockwise from 0 deg hor] */
+ D27_PRED, /* Directional 22 deg prediction [anti-clockwise from 0 deg hor] */
+ D63_PRED, /* Directional 67 deg prediction [anti-clockwise from 0 deg hor] */
+ SMOOTH_PRED,
+ TM_PRED_AV1 = SMOOTH_PRED,
+ SMOOTH_V_PRED, // Vertical interpolation
+ SMOOTH_H_PRED, // Horizontal interpolation
+ TM_PRED, /* Truemotion prediction */
+ PAETH_PRED = TM_PRED,
+ NEARESTMV,
+ NEARMV,
+ ZEROMV,
+ NEWMV,
+ NEAREST_NEARESTMV,
+ NEAR_NEARMV,
+ NEAREST_NEWMV,
+ NEW_NEARESTMV,
+ NEAR_NEWMV,
+ NEW_NEARMV,
+ ZERO_ZEROMV,
+ NEW_NEWMV,
+ SPLITMV,
+ MB_MODE_COUNT
+};
+
+enum partitiontype {
+ PARTITION_NONE,
+ PARTITION_HORZ,
+ PARTITION_VERT,
+ PARTITION_SPLIT,
+ PARTITION_TYPES
+};
+
+struct mvcdfs {
+ u16 joint_cdf[3];
+ u16 sign_cdf[2];
+ u16 clsss_cdf[2][10];
+ u16 clsss0_fp_cdf[2][2][3];
+ u16 fp_cdf[2][3];
+ u16 class0_hp_cdf[2];
+ u16 hp_cdf[2];
+ u16 class0_cdf[2];
+ u16 bits_cdf[2][10];
+};
+
+struct av1cdfs {
+ u16 partition_cdf[13][16];
+ u16 kf_ymode_cdf[KF_MODE_CONTEXTS][KF_MODE_CONTEXTS][AV1_INTRA_MODES - 1];
+ u16 segment_pred_cdf[PREDICTION_PROBS];
+ u16 spatial_pred_seg_tree_cdf[SPATIAL_PREDICTION_PROBS][MAX_MB_SEGMENTS - 1];
+ u16 mbskip_cdf[MBSKIP_CONTEXTS];
+ u16 delta_q_cdf[DELTA_Q_PROBS];
+ u16 delta_lf_multi_cdf[FRAME_LF_COUNT][DELTA_LF_PROBS];
+ u16 delta_lf_cdf[DELTA_LF_PROBS];
+ u16 skip_mode_cdf[SKIP_MODE_CONTEXTS];
+ u16 vartx_part_cdf[VARTX_PART_CONTEXTS][1];
+ u16 tx_size_cdf[MAX_TX_CATS][AV1_TX_SIZE_CONTEXTS][MAX_TX_DEPTH];
+ u16 if_ymode_cdf[BLOCK_SIZE_GROUPS][AV1_INTRA_MODES - 1];
+ u16 uv_mode_cdf[2][AV1_INTRA_MODES][AV1_INTRA_MODES - 1 + 1];
+ u16 intra_inter_cdf[INTRA_INTER_CONTEXTS];
+ u16 comp_inter_cdf[COMP_INTER_CONTEXTS];
+ u16 single_ref_cdf[AV1_REF_CONTEXTS][SINGLE_REFS - 1];
+ u16 comp_ref_type_cdf[COMP_REF_TYPE_CONTEXTS][1];
+ u16 uni_comp_ref_cdf[UNI_COMP_REF_CONTEXTS][UNIDIR_COMP_REFS - 1][1];
+ u16 comp_ref_cdf[AV1_REF_CONTEXTS][FWD_REFS - 1];
+ u16 comp_bwdref_cdf[AV1_REF_CONTEXTS][BWD_REFS - 1];
+ u16 newmv_cdf[NEWMV_MODE_CONTEXTS];
+ u16 zeromv_cdf[ZEROMV_MODE_CONTEXTS];
+ u16 refmv_cdf[REFMV_MODE_CONTEXTS];
+ u16 drl_cdf[DRL_MODE_CONTEXTS];
+ u16 interp_filter_cdf[SWITCHABLE_FILTER_CONTEXTS][AV1_SWITCHABLE_FILTERS - 1];
+ struct mvcdfs mv_cdf;
+ u16 obmc_cdf[BLOCK_SIZE_TYPES];
+ u16 motion_mode_cdf[BLOCK_SIZE_TYPES][2];
+ u16 inter_compound_mode_cdf[AV1_INTER_MODE_CONTEXTS][INTER_COMPOUND_MODES - 1];
+ u16 compound_type_cdf[BLOCK_SIZE_TYPES][CDF_SIZE(COMPOUND_TYPES - 1)];
+ u16 interintra_cdf[BLOCK_SIZE_GROUPS];
+ u16 interintra_mode_cdf[BLOCK_SIZE_GROUPS][INTERINTRA_MODES - 1];
+ u16 wedge_interintra_cdf[BLOCK_SIZE_TYPES];
+ u16 wedge_idx_cdf[BLOCK_SIZE_TYPES][CDF_SIZE(16)];
+ u16 palette_y_mode_cdf[PALETTE_BLOCK_SIZES][PALETTE_Y_MODE_CONTEXTS][1];
+ u16 palette_uv_mode_cdf[PALETTE_UV_MODE_CONTEXTS][1];
+ u16 palette_y_size_cdf[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
+ u16 palette_uv_size_cdf[PALETTE_BLOCK_SIZES][PALETTE_SIZES - 1];
+ u16 cfl_sign_cdf[CFL_JOINT_SIGNS - 1];
+ u16 cfl_alpha_cdf[CFL_ALPHA_CONTEXTS][CFL_ALPHABET_SIZE - 1];
+ u16 intrabc_cdf[1];
+ u16 angle_delta_cdf[DIRECTIONAL_MODES][6];
+ u16 filter_intra_mode_cdf[FILTER_INTRA_MODES - 1];
+ u16 filter_intra_cdf[BLOCK_SIZES_ALL];
+ u16 comp_group_idx_cdf[COMP_GROUP_IDX_CONTEXTS][CDF_SIZE(2)];
+ u16 compound_idx_cdf[COMP_INDEX_CONTEXTS][CDF_SIZE(2)];
+ u16 dummy0[14];
+ // Palette index contexts; sizes 1/7, 2/6, 3/5 packed together
+ u16 palette_y_color_index_cdf[PALETTE_IDX_CONTEXTS][8];
+ u16 palette_uv_color_index_cdf[PALETTE_IDX_CONTEXTS][8];
+ u16 tx_type_intra0_cdf[EXTTX_SIZES][AV1_INTRA_MODES][8];
+ u16 tx_type_intra1_cdf[EXTTX_SIZES][AV1_INTRA_MODES][4];
+ u16 tx_type_inter_cdf[2][EXTTX_SIZES][EXT_TX_TYPES];
+ u16 txb_skip_cdf[TX_SIZES][TXB_SKIP_CONTEXTS][CDF_SIZE(2)];
+ u16 eob_extra_cdf[TX_SIZES][PLANE_TYPES][EOB_COEF_CONTEXTS][CDF_SIZE(2)];
+ u16 dummy1[5];
+ u16 eob_flag_cdf16[PLANE_TYPES][2][4];
+ u16 eob_flag_cdf32[PLANE_TYPES][2][8];
+ u16 eob_flag_cdf64[PLANE_TYPES][2][8];
+ u16 eob_flag_cdf128[PLANE_TYPES][2][8];
+ u16 eob_flag_cdf256[PLANE_TYPES][2][8];
+ u16 eob_flag_cdf512[PLANE_TYPES][2][16];
+ u16 eob_flag_cdf1024[PLANE_TYPES][2][16];
+ u16 coeff_base_eob_cdf[TX_SIZES][PLANE_TYPES][SIG_COEF_CONTEXTS_EOB][CDF_SIZE(3)];
+ u16 coeff_base_cdf[TX_SIZES][PLANE_TYPES][SIG_COEF_CONTEXTS][CDF_SIZE(4) + 1];
+ u16 dc_sign_cdf[PLANE_TYPES][DC_SIGN_CONTEXTS][CDF_SIZE(2)];
+ u16 dummy2[2];
+ u16 coeff_br_cdf[TX_SIZES][PLANE_TYPES][LEVEL_CONTEXTS][CDF_SIZE(BR_CDF_SIZE) + 1];
+ u16 dummy3[16];
+};
+
+void rockchip_av1_store_cdfs(struct hantro_ctx *ctx,
+ u32 refresh_frame_flags);
+void rockchip_av1_get_cdfs(struct hantro_ctx *ctx, u32 ref_idx);
+void rockchip_av1_set_default_cdfs(struct av1cdfs *cdfs,
+ struct mvcdfs *cdfs_ndvc);
+void rockchip_av1_default_coeff_probs(u32 base_qindex, void *ptr);
+
+#endif /* _ROCKCHIP_AV1_ENTROPYMODE_H_ */
diff --git a/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.c b/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.c
new file mode 100644
index 0000000000..f2ae84f0b4
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0-only or Apache-2.0
+
+#include "rockchip_av1_filmgrain.h"
+
+static const s32 gaussian_sequence[2048] = {
+ 56, 568, -180, 172, 124, -84, 172, -64, -900, 24, 820,
+ 224, 1248, 996, 272, -8, -916, -388, -732, -104, -188, 800,
+ 112, -652, -320, -376, 140, -252, 492, -168, 44, -788, 588,
+ -584, 500, -228, 12, 680, 272, -476, 972, -100, 652, 368,
+ 432, -196, -720, -192, 1000, -332, 652, -136, -552, -604, -4,
+ 192, -220, -136, 1000, -52, 372, -96, -624, 124, -24, 396,
+ 540, -12, -104, 640, 464, 244, -208, -84, 368, -528, -740,
+ 248, -968, -848, 608, 376, -60, -292, -40, -156, 252, -292,
+ 248, 224, -280, 400, -244, 244, -60, 76, -80, 212, 532,
+ 340, 128, -36, 824, -352, -60, -264, -96, -612, 416, -704,
+ 220, -204, 640, -160, 1220, -408, 900, 336, 20, -336, -96,
+ -792, 304, 48, -28, -1232, -1172, -448, 104, -292, -520, 244,
+ 60, -948, 0, -708, 268, 108, 356, -548, 488, -344, -136,
+ 488, -196, -224, 656, -236, -1128, 60, 4, 140, 276, -676,
+ -376, 168, -108, 464, 8, 564, 64, 240, 308, -300, -400,
+ -456, -136, 56, 120, -408, -116, 436, 504, -232, 328, 844,
+ -164, -84, 784, -168, 232, -224, 348, -376, 128, 568, 96,
+ -1244, -288, 276, 848, 832, -360, 656, 464, -384, -332, -356,
+ 728, -388, 160, -192, 468, 296, 224, 140, -776, -100, 280,
+ 4, 196, 44, -36, -648, 932, 16, 1428, 28, 528, 808,
+ 772, 20, 268, 88, -332, -284, 124, -384, -448, 208, -228,
+ -1044, -328, 660, 380, -148, -300, 588, 240, 540, 28, 136,
+ -88, -436, 256, 296, -1000, 1400, 0, -48, 1056, -136, 264,
+ -528, -1108, 632, -484, -592, -344, 796, 124, -668, -768, 388,
+ 1296, -232, -188, -200, -288, -4, 308, 100, -168, 256, -500,
+ 204, -508, 648, -136, 372, -272, -120, -1004, -552, -548, -384,
+ 548, -296, 428, -108, -8, -912, -324, -224, -88, -112, -220,
+ -100, 996, -796, 548, 360, -216, 180, 428, -200, -212, 148,
+ 96, 148, 284, 216, -412, -320, 120, -300, -384, -604, -572,
+ -332, -8, -180, -176, 696, 116, -88, 628, 76, 44, -516,
+ 240, -208, -40, 100, -592, 344, -308, -452, -228, 20, 916,
+ -1752, -136, -340, -804, 140, 40, 512, 340, 248, 184, -492,
+ 896, -156, 932, -628, 328, -688, -448, -616, -752, -100, 560,
+ -1020, 180, -800, -64, 76, 576, 1068, 396, 660, 552, -108,
+ -28, 320, -628, 312, -92, -92, -472, 268, 16, 560, 516,
+ -672, -52, 492, -100, 260, 384, 284, 292, 304, -148, 88,
+ -152, 1012, 1064, -228, 164, -376, -684, 592, -392, 156, 196,
+ -524, -64, -884, 160, -176, 636, 648, 404, -396, -436, 864,
+ 424, -728, 988, -604, 904, -592, 296, -224, 536, -176, -920,
+ 436, -48, 1176, -884, 416, -776, -824, -884, 524, -548, -564,
+ -68, -164, -96, 692, 364, -692, -1012, -68, 260, -480, 876,
+ -1116, 452, -332, -352, 892, -1088, 1220, -676, 12, -292, 244,
+ 496, 372, -32, 280, 200, 112, -440, -96, 24, -644, -184,
+ 56, -432, 224, -980, 272, -260, 144, -436, 420, 356, 364,
+ -528, 76, 172, -744, -368, 404, -752, -416, 684, -688, 72,
+ 540, 416, 92, 444, 480, -72, -1416, 164, -1172, -68, 24,
+ 424, 264, 1040, 128, -912, -524, -356, 64, 876, -12, 4,
+ -88, 532, 272, -524, 320, 276, -508, 940, 24, -400, -120,
+ 756, 60, 236, -412, 100, 376, -484, 400, -100, -740, -108,
+ -260, 328, -268, 224, -200, -416, 184, -604, -564, -20, 296,
+ 60, 892, -888, 60, 164, 68, -760, 216, -296, 904, -336,
+ -28, 404, -356, -568, -208, -1480, -512, 296, 328, -360, -164,
+ -1560, -776, 1156, -428, 164, -504, -112, 120, -216, -148, -264,
+ 308, 32, 64, -72, 72, 116, 176, -64, -272, 460, -536,
+ -784, -280, 348, 108, -752, -132, 524, -540, -776, 116, -296,
+ -1196, -288, -560, 1040, -472, 116, -848, -1116, 116, 636, 696,
+ 284, -176, 1016, 204, -864, -648, -248, 356, 972, -584, -204,
+ 264, 880, 528, -24, -184, 116, 448, -144, 828, 524, 212,
+ -212, 52, 12, 200, 268, -488, -404, -880, 824, -672, -40,
+ 908, -248, 500, 716, -576, 492, -576, 16, 720, -108, 384,
+ 124, 344, 280, 576, -500, 252, 104, -308, 196, -188, -8,
+ 1268, 296, 1032, -1196, 436, 316, 372, -432, -200, -660, 704,
+ -224, 596, -132, 268, 32, -452, 884, 104, -1008, 424, -1348,
+ -280, 4, -1168, 368, 476, 696, 300, -8, 24, 180, -592,
+ -196, 388, 304, 500, 724, -160, 244, -84, 272, -256, -420,
+ 320, 208, -144, -156, 156, 364, 452, 28, 540, 316, 220,
+ -644, -248, 464, 72, 360, 32, -388, 496, -680, -48, 208,
+ -116, -408, 60, -604, -392, 548, -840, 784, -460, 656, -544,
+ -388, -264, 908, -800, -628, -612, -568, 572, -220, 164, 288,
+ -16, -308, 308, -112, -636, -760, 280, -668, 432, 364, 240,
+ -196, 604, 340, 384, 196, 592, -44, -500, 432, -580, -132,
+ 636, -76, 392, 4, -412, 540, 508, 328, -356, -36, 16,
+ -220, -64, -248, -60, 24, -192, 368, 1040, 92, -24, -1044,
+ -32, 40, 104, 148, 192, -136, -520, 56, -816, -224, 732,
+ 392, 356, 212, -80, -424, -1008, -324, 588, -1496, 576, 460,
+ -816, -848, 56, -580, -92, -1372, -112, -496, 200, 364, 52,
+ -140, 48, -48, -60, 84, 72, 40, 132, -356, -268, -104,
+ -284, -404, 732, -520, 164, -304, -540, 120, 328, -76, -460,
+ 756, 388, 588, 236, -436, -72, -176, -404, -316, -148, 716,
+ -604, 404, -72, -88, -888, -68, 944, 88, -220, -344, 960,
+ 472, 460, -232, 704, 120, 832, -228, 692, -508, 132, -476,
+ 844, -748, -364, -44, 1116, -1104, -1056, 76, 428, 552, -692,
+ 60, 356, 96, -384, -188, -612, -576, 736, 508, 892, 352,
+ -1132, 504, -24, -352, 324, 332, -600, -312, 292, 508, -144,
+ -8, 484, 48, 284, -260, -240, 256, -100, -292, -204, -44,
+ 472, -204, 908, -188, -1000, -256, 92, 1164, -392, 564, 356,
+ 652, -28, -884, 256, 484, -192, 760, -176, 376, -524, -452,
+ -436, 860, -736, 212, 124, 504, -476, 468, 76, -472, 552,
+ -692, -944, -620, 740, -240, 400, 132, 20, 192, -196, 264,
+ -668, -1012, -60, 296, -316, -828, 76, -156, 284, -768, -448,
+ -832, 148, 248, 652, 616, 1236, 288, -328, -400, -124, 588,
+ 220, 520, -696, 1032, 768, -740, -92, -272, 296, 448, -464,
+ 412, -200, 392, 440, -200, 264, -152, -260, 320, 1032, 216,
+ 320, -8, -64, 156, -1016, 1084, 1172, 536, 484, -432, 132,
+ 372, -52, -256, 84, 116, -352, 48, 116, 304, -384, 412,
+ 924, -300, 528, 628, 180, 648, 44, -980, -220, 1320, 48,
+ 332, 748, 524, -268, -720, 540, -276, 564, -344, -208, -196,
+ 436, 896, 88, -392, 132, 80, -964, -288, 568, 56, -48,
+ -456, 888, 8, 552, -156, -292, 948, 288, 128, -716, -292,
+ 1192, -152, 876, 352, -600, -260, -812, -468, -28, -120, -32,
+ -44, 1284, 496, 192, 464, 312, -76, -516, -380, -456, -1012,
+ -48, 308, -156, 36, 492, -156, -808, 188, 1652, 68, -120,
+ -116, 316, 160, -140, 352, 808, -416, 592, 316, -480, 56,
+ 528, -204, -568, 372, -232, 752, -344, 744, -4, 324, -416,
+ -600, 768, 268, -248, -88, -132, -420, -432, 80, -288, 404,
+ -316, -1216, -588, 520, -108, 92, -320, 368, -480, -216, -92,
+ 1688, -300, 180, 1020, -176, 820, -68, -228, -260, 436, -904,
+ 20, 40, -508, 440, -736, 312, 332, 204, 760, -372, 728,
+ 96, -20, -632, -520, -560, 336, 1076, -64, -532, 776, 584,
+ 192, 396, -728, -520, 276, -188, 80, -52, -612, -252, -48,
+ 648, 212, -688, 228, -52, -260, 428, -412, -272, -404, 180,
+ 816, -796, 48, 152, 484, -88, -216, 988, 696, 188, -528,
+ 648, -116, -180, 316, 476, 12, -564, 96, 476, -252, -364,
+ -376, -392, 556, -256, -576, 260, -352, 120, -16, -136, -260,
+ -492, 72, 556, 660, 580, 616, 772, 436, 424, -32, -324,
+ -1268, 416, -324, -80, 920, 160, 228, 724, 32, -516, 64,
+ 384, 68, -128, 136, 240, 248, -204, -68, 252, -932, -120,
+ -480, -628, -84, 192, 852, -404, -288, -132, 204, 100, 168,
+ -68, -196, -868, 460, 1080, 380, -80, 244, 0, 484, -888,
+ 64, 184, 352, 600, 460, 164, 604, -196, 320, -64, 588,
+ -184, 228, 12, 372, 48, -848, -344, 224, 208, -200, 484,
+ 128, -20, 272, -468, -840, 384, 256, -720, -520, -464, -580,
+ 112, -120, 644, -356, -208, -608, -528, 704, 560, -424, 392,
+ 828, 40, 84, 200, -152, 0, -144, 584, 280, -120, 80,
+ -556, -972, -196, -472, 724, 80, 168, -32, 88, 160, -688,
+ 0, 160, 356, 372, -776, 740, -128, 676, -248, -480, 4,
+ -364, 96, 544, 232, -1032, 956, 236, 356, 20, -40, 300,
+ 24, -676, -596, 132, 1120, -104, 532, -1096, 568, 648, 444,
+ 508, 380, 188, -376, -604, 1488, 424, 24, 756, -220, -192,
+ 716, 120, 920, 688, 168, 44, -460, 568, 284, 1144, 1160,
+ 600, 424, 888, 656, -356, -320, 220, 316, -176, -724, -188,
+ -816, -628, -348, -228, -380, 1012, -452, -660, 736, 928, 404,
+ -696, -72, -268, -892, 128, 184, -344, -780, 360, 336, 400,
+ 344, 428, 548, -112, 136, -228, -216, -820, -516, 340, 92,
+ -136, 116, -300, 376, -244, 100, -316, -520, -284, -12, 824,
+ 164, -548, -180, -128, 116, -924, -828, 268, -368, -580, 620,
+ 192, 160, 0, -1676, 1068, 424, -56, -360, 468, -156, 720,
+ 288, -528, 556, -364, 548, -148, 504, 316, 152, -648, -620,
+ -684, -24, -376, -384, -108, -920, -1032, 768, 180, -264, -508,
+ -1268, -260, -60, 300, -240, 988, 724, -376, -576, -212, -736,
+ 556, 192, 1092, -620, -880, 376, -56, -4, -216, -32, 836,
+ 268, 396, 1332, 864, -600, 100, 56, -412, -92, 356, 180,
+ 884, -468, -436, 292, -388, -804, -704, -840, 368, -348, 140,
+ -724, 1536, 940, 372, 112, -372, 436, -480, 1136, 296, -32,
+ -228, 132, -48, -220, 868, -1016, -60, -1044, -464, 328, 916,
+ 244, 12, -736, -296, 360, 468, -376, -108, -92, 788, 368,
+ -56, 544, 400, -672, -420, 728, 16, 320, 44, -284, -380,
+ -796, 488, 132, 204, -596, -372, 88, -152, -908, -636, -572,
+ -624, -116, -692, -200, -56, 276, -88, 484, -324, 948, 864,
+ 1000, -456, -184, -276, 292, -296, 156, 676, 320, 160, 908,
+ -84, -1236, -288, -116, 260, -372, -644, 732, -756, -96, 84,
+ 344, -520, 348, -688, 240, -84, 216, -1044, -136, -676, -396,
+ -1500, 960, -40, 176, 168, 1516, 420, -504, -344, -364, -360,
+ 1216, -940, -380, -212, 252, -660, -708, 484, -444, -152, 928,
+ -120, 1112, 476, -260, 560, -148, -344, 108, -196, 228, -288,
+ 504, 560, -328, -88, 288, -1008, 460, -228, 468, -836, -196,
+ 76, 388, 232, 412, -1168, -716, -644, 756, -172, -356, -504,
+ 116, 432, 528, 48, 476, -168, -608, 448, 160, -532, -272,
+ 28, -676, -12, 828, 980, 456, 520, 104, -104, 256, -344,
+ -4, -28, -368, -52, -524, -572, -556, -200, 768, 1124, -208,
+ -512, 176, 232, 248, -148, -888, 604, -600, -304, 804, -156,
+ -212, 488, -192, -804, -256, 368, -360, -916, -328, 228, -240,
+ -448, -472, 856, -556, -364, 572, -12, -156, -368, -340, 432,
+ 252, -752, -152, 288, 268, -580, -848, -592, 108, -76, 244,
+ 312, -716, 592, -80, 436, 360, 4, -248, 160, 516, 584,
+ 732, 44, -468, -280, -292, -156, -588, 28, 308, 912, 24,
+ 124, 156, 180, -252, 944, -924, -772, -520, -428, -624, 300,
+ -212, -1144, 32, -724, 800, -1128, -212, -1288, -848, 180, -416,
+ 440, 192, -576, -792, -76, -1080, 80, -532, -352, -132, 380,
+ -820, 148, 1112, 128, 164, 456, 700, -924, 144, -668, -384,
+ 648, -832, 508, 552, -52, -100, -656, 208, -568, 748, -88,
+ 680, 232, 300, 192, -408, -1012, -152, -252, -268, 272, -876,
+ -664, -648, -332, -136, 16, 12, 1152, -28, 332, -536, 320,
+ -672, -460, -316, 532, -260, 228, -40, 1052, -816, 180, 88,
+ -496, -556, -672, -368, 428, 92, 356, 404, -408, 252, 196,
+ -176, -556, 792, 268, 32, 372, 40, 96, -332, 328, 120,
+ 372, -900, -40, 472, -264, -592, 952, 128, 656, 112, 664,
+ -232, 420, 4, -344, -464, 556, 244, -416, -32, 252, 0,
+ -412, 188, -696, 508, -476, 324, -1096, 656, -312, 560, 264,
+ -136, 304, 160, -64, -580, 248, 336, -720, 560, -348, -288,
+ -276, -196, -500, 852, -544, -236, -1128, -992, -776, 116, 56,
+ 52, 860, 884, 212, -12, 168, 1020, 512, -552, 924, -148,
+ 716, 188, 164, -340, -520, -184, 880, -152, -680, -208, -1156,
+ -300, -528, -472, 364, 100, -744, -1056, -32, 540, 280, 144,
+ -676, -32, -232, -280, -224, 96, 568, -76, 172, 148, 148,
+ 104, 32, -296, -32, 788, -80, 32, -16, 280, 288, 944,
+ 428, -484
+};
+
+static inline s32 clamp(s32 value, s32 low, s32 high)
+{
+ return value < low ? low : (value > high ? high : value);
+}
+
+static inline s32 round_power_of_two(const s32 val, s32 n)
+{
+ const s32 a = (s32)1 << (n - 1);
+
+ return (val + a) >> n;
+}
+
+static void rockchip_av1_init_random_generator(u8 luma_num, u16 seed,
+ u16 *random_register)
+{
+ u16 random_reg = seed;
+
+ random_reg ^= ((luma_num * 37 + 178) & 255) << 8;
+ random_reg ^= ((luma_num * 173 + 105) & 255);
+ *random_register = random_reg;
+}
+
+static inline void rockchip_av1_update_random_register(u16 *random_register)
+{
+ u16 bit;
+ u16 random_reg = *random_register;
+
+ bit = ((random_reg >> 0) ^ (random_reg >> 1) ^ (random_reg >> 3) ^
+ (random_reg >> 12)) & 1;
+ *random_register = (random_reg >> 1) | (bit << 15);
+}
+
+static inline s32 rockchip_av1_get_random_number(u16 random_register)
+{
+ return (random_register >> 5) & ((1 << 11) - 1);
+}
+
+void rockchip_av1_generate_luma_grain_block(s32 (*luma_grain_block)[73][82],
+ s32 bitdepth,
+ u8 num_y_points,
+ s32 grain_scale_shift,
+ s32 ar_coeff_lag,
+ s32 (*ar_coeffs_y)[24],
+ s32 ar_coeff_shift,
+ s32 grain_min,
+ s32 grain_max,
+ u16 random_seed)
+{
+ s32 gauss_sec_shift = 12 - bitdepth + grain_scale_shift;
+ u16 grain_random_register = random_seed;
+ s32 i, j;
+
+ for (i = 0; i < 73; i++) {
+ for (j = 0; j < 82; j++) {
+ if (num_y_points > 0) {
+ rockchip_av1_update_random_register
+ (&grain_random_register);
+ (*luma_grain_block)[i][j] =
+ round_power_of_two(gaussian_sequence
+ [rockchip_av1_get_random_number
+ (grain_random_register)],
+ gauss_sec_shift);
+ } else {
+ (*luma_grain_block)[i][j] = 0;
+ }
+ }
+ }
+
+ for (i = 3; i < 73; i++)
+ for (j = 3; j < 82 - 3; j++) {
+ s32 pos = 0;
+ s32 wsum = 0;
+ s32 deltarow, deltacol;
+
+ for (deltarow = -ar_coeff_lag; deltarow <= 0;
+ deltarow++) {
+ for (deltacol = -ar_coeff_lag;
+ deltacol <= ar_coeff_lag; deltacol++) {
+ if (deltarow == 0 && deltacol == 0)
+ break;
+ wsum = wsum + (*ar_coeffs_y)[pos] *
+ (*luma_grain_block)[i + deltarow][j + deltacol];
+ ++pos;
+ }
+ }
+ (*luma_grain_block)[i][j] =
+ clamp((*luma_grain_block)[i][j] +
+ round_power_of_two(wsum, ar_coeff_shift),
+ grain_min, grain_max);
+ }
+}
+
+// Calculate chroma grain noise once per frame
+void rockchip_av1_generate_chroma_grain_block(s32 (*luma_grain_block)[73][82],
+ s32 (*cb_grain_block)[38][44],
+ s32 (*cr_grain_block)[38][44],
+ s32 bitdepth,
+ u8 num_y_points,
+ u8 num_cb_points,
+ u8 num_cr_points,
+ s32 grain_scale_shift,
+ s32 ar_coeff_lag,
+ s32 (*ar_coeffs_cb)[25],
+ s32 (*ar_coeffs_cr)[25],
+ s32 ar_coeff_shift,
+ s32 grain_min,
+ s32 grain_max,
+ u8 chroma_scaling_from_luma,
+ u16 random_seed)
+{
+ s32 gauss_sec_shift = 12 - bitdepth + grain_scale_shift;
+ u16 grain_random_register = 0;
+ s32 i, j;
+
+ rockchip_av1_init_random_generator(7, random_seed,
+ &grain_random_register);
+ for (i = 0; i < 38; i++) {
+ for (j = 0; j < 44; j++) {
+ if (num_cb_points || chroma_scaling_from_luma) {
+ rockchip_av1_update_random_register
+ (&grain_random_register);
+ (*cb_grain_block)[i][j] =
+ round_power_of_two(gaussian_sequence
+ [rockchip_av1_get_random_number
+ (grain_random_register)],
+ gauss_sec_shift);
+ } else {
+ (*cb_grain_block)[i][j] = 0;
+ }
+ }
+ }
+
+ rockchip_av1_init_random_generator(11, random_seed,
+ &grain_random_register);
+ for (i = 0; i < 38; i++) {
+ for (j = 0; j < 44; j++) {
+ if (num_cr_points || chroma_scaling_from_luma) {
+ rockchip_av1_update_random_register
+ (&grain_random_register);
+ (*cr_grain_block)[i][j] =
+ round_power_of_two(gaussian_sequence
+ [rockchip_av1_get_random_number
+ (grain_random_register)],
+ gauss_sec_shift);
+ } else {
+ (*cr_grain_block)[i][j] = 0;
+ }
+ }
+ }
+
+ for (i = 3; i < 38; i++) {
+ for (j = 3; j < 44 - 3; j++) {
+ s32 wsum_cb = 0;
+ s32 wsum_cr = 0;
+ s32 pos = 0;
+ s32 deltarow, deltacol;
+
+ for (deltarow = -ar_coeff_lag; deltarow <= 0;
+ deltarow++) {
+ for (deltacol = -ar_coeff_lag;
+ deltacol <= ar_coeff_lag; deltacol++) {
+ if (deltarow == 0 && deltacol == 0)
+ break;
+ wsum_cb = wsum_cb + (*ar_coeffs_cb)[pos] *
+ (*cb_grain_block)[i + deltarow][j + deltacol];
+ wsum_cr =
+ wsum_cr +
+ (*ar_coeffs_cr)[pos] *
+ (*cr_grain_block)[i + deltarow][j + deltacol];
+ ++pos;
+ }
+ }
+
+ if (num_y_points > 0) {
+ s32 av_luma = 0;
+ s32 luma_coord_y = (i << 1) - 3;
+ s32 luma_coord_x = (j << 1) - 3;
+
+ av_luma +=
+ (*luma_grain_block)[luma_coord_y][luma_coord_x];
+ av_luma +=
+ (*luma_grain_block)[luma_coord_y][luma_coord_x + 1];
+ av_luma +=
+ (*luma_grain_block)[luma_coord_y + 1][luma_coord_x];
+ av_luma +=
+ (*luma_grain_block)[(luma_coord_y + 1)][luma_coord_x + 1];
+ av_luma = round_power_of_two(av_luma, 2);
+
+ wsum_cb = wsum_cb + (*ar_coeffs_cb)[pos] * av_luma;
+ wsum_cr = wsum_cr + (*ar_coeffs_cr)[pos] * av_luma;
+ }
+
+ if (num_cb_points || chroma_scaling_from_luma) {
+ (*cb_grain_block)[i][j] =
+ clamp((*cb_grain_block)[i][j] +
+ round_power_of_two(wsum_cb, ar_coeff_shift),
+ grain_min, grain_max);
+ }
+ if (num_cr_points || chroma_scaling_from_luma) {
+ (*cr_grain_block)[i][j] =
+ clamp((*cr_grain_block)[i][j] +
+ round_power_of_two(wsum_cr, ar_coeff_shift),
+ grain_min, grain_max);
+ }
+ }
+ }
+}
diff --git a/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.h b/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.h
new file mode 100644
index 0000000000..31a8b7920c
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_av1_filmgrain.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ROCKCHIP_AV1_FILMGRAIN_H_
+#define _ROCKCHIP_AV1_FILMGRAIN_H_
+
+#include <linux/types.h>
+
+void rockchip_av1_generate_luma_grain_block(s32 (*luma_grain_block)[73][82],
+ s32 bitdepth,
+ u8 num_y_points,
+ s32 grain_scale_shift,
+ s32 ar_coeff_lag,
+ s32 (*ar_coeffs_y)[24],
+ s32 ar_coeff_shift,
+ s32 grain_min,
+ s32 grain_max,
+ u16 random_seed);
+
+void rockchip_av1_generate_chroma_grain_block(s32 (*luma_grain_block)[73][82],
+ s32 (*cb_grain_block)[38][44],
+ s32 (*cr_grain_block)[38][44],
+ s32 bitdepth,
+ u8 num_y_points,
+ u8 num_cb_points,
+ u8 num_cr_points,
+ s32 grain_scale_shift,
+ s32 ar_coeff_lag,
+ s32 (*ar_coeffs_cb)[25],
+ s32 (*ar_coeffs_cr)[25],
+ s32 ar_coeff_shift,
+ s32 grain_min,
+ s32 grain_max,
+ u8 chroma_scaling_from_luma,
+ u16 random_seed);
+
+#endif
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c
new file mode 100644
index 0000000000..46c1a83bcc
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (c) 2014 Rockchip Electronics Co., Ltd.
+ * Hertz Wong <hertz.wong@rock-chips.com>
+ * Herman Chen <herman.chen@rock-chips.com>
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#include <linux/types.h>
+#include <linux/sort.h>
+
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro_hw.h"
+#include "hantro_v4l2.h"
+
+#define VDPU_SWREG(nr) ((nr) * 4)
+
+#define VDPU_REG_DEC_OUT_BASE VDPU_SWREG(63)
+#define VDPU_REG_RLC_VLC_BASE VDPU_SWREG(64)
+#define VDPU_REG_QTABLE_BASE VDPU_SWREG(61)
+#define VDPU_REG_DIR_MV_BASE VDPU_SWREG(62)
+#define VDPU_REG_REFER_BASE(i) (VDPU_SWREG(84 + (i)))
+#define VDPU_REG_DEC_E(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_DEC_ADV_PRE_DIS(v) ((v) ? BIT(11) : 0)
+#define VDPU_REG_DEC_SCMD_DIS(v) ((v) ? BIT(10) : 0)
+#define VDPU_REG_FILTERING_DIS(v) ((v) ? BIT(8) : 0)
+#define VDPU_REG_PIC_FIXED_QUANT(v) ((v) ? BIT(7) : 0)
+#define VDPU_REG_DEC_LATENCY(v) (((v) << 1) & GENMASK(6, 1))
+
+#define VDPU_REG_INIT_QP(v) (((v) << 25) & GENMASK(30, 25))
+#define VDPU_REG_STREAM_LEN(v) (((v) << 0) & GENMASK(23, 0))
+
+#define VDPU_REG_APF_THRESHOLD(v) (((v) << 17) & GENMASK(30, 17))
+#define VDPU_REG_STARTMB_X(v) (((v) << 8) & GENMASK(16, 8))
+#define VDPU_REG_STARTMB_Y(v) (((v) << 0) & GENMASK(7, 0))
+
+#define VDPU_REG_DEC_MODE(v) (((v) << 0) & GENMASK(3, 0))
+
+#define VDPU_REG_DEC_STRENDIAN_E(v) ((v) ? BIT(5) : 0)
+#define VDPU_REG_DEC_STRSWAP32_E(v) ((v) ? BIT(4) : 0)
+#define VDPU_REG_DEC_OUTSWAP32_E(v) ((v) ? BIT(3) : 0)
+#define VDPU_REG_DEC_INSWAP32_E(v) ((v) ? BIT(2) : 0)
+#define VDPU_REG_DEC_OUT_ENDIAN(v) ((v) ? BIT(1) : 0)
+#define VDPU_REG_DEC_IN_ENDIAN(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_DEC_DATA_DISC_E(v) ((v) ? BIT(22) : 0)
+#define VDPU_REG_DEC_MAX_BURST(v) (((v) << 16) & GENMASK(20, 16))
+#define VDPU_REG_DEC_AXI_WR_ID(v) (((v) << 8) & GENMASK(15, 8))
+#define VDPU_REG_DEC_AXI_RD_ID(v) (((v) << 0) & GENMASK(7, 0))
+
+#define VDPU_REG_START_CODE_E(v) ((v) ? BIT(22) : 0)
+#define VDPU_REG_CH_8PIX_ILEAV_E(v) ((v) ? BIT(21) : 0)
+#define VDPU_REG_RLC_MODE_E(v) ((v) ? BIT(20) : 0)
+#define VDPU_REG_PIC_INTERLACE_E(v) ((v) ? BIT(17) : 0)
+#define VDPU_REG_PIC_FIELDMODE_E(v) ((v) ? BIT(16) : 0)
+#define VDPU_REG_PIC_TOPFIELD_E(v) ((v) ? BIT(13) : 0)
+#define VDPU_REG_WRITE_MVS_E(v) ((v) ? BIT(10) : 0)
+#define VDPU_REG_SEQ_MBAFF_E(v) ((v) ? BIT(7) : 0)
+#define VDPU_REG_PICORD_COUNT_E(v) ((v) ? BIT(6) : 0)
+#define VDPU_REG_DEC_TIMEOUT_E(v) ((v) ? BIT(5) : 0)
+#define VDPU_REG_DEC_CLK_GATE_E(v) ((v) ? BIT(4) : 0)
+
+#define VDPU_REG_PRED_BC_TAP_0_0(v) (((v) << 22) & GENMASK(31, 22))
+#define VDPU_REG_PRED_BC_TAP_0_1(v) (((v) << 12) & GENMASK(21, 12))
+#define VDPU_REG_PRED_BC_TAP_0_2(v) (((v) << 2) & GENMASK(11, 2))
+
+#define VDPU_REG_REFBU_E(v) ((v) ? BIT(31) : 0)
+
+#define VDPU_REG_PINIT_RLIST_F9(v) (((v) << 25) & GENMASK(29, 25))
+#define VDPU_REG_PINIT_RLIST_F8(v) (((v) << 20) & GENMASK(24, 20))
+#define VDPU_REG_PINIT_RLIST_F7(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_PINIT_RLIST_F6(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_PINIT_RLIST_F5(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_PINIT_RLIST_F4(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_PINIT_RLIST_F15(v) (((v) << 25) & GENMASK(29, 25))
+#define VDPU_REG_PINIT_RLIST_F14(v) (((v) << 20) & GENMASK(24, 20))
+#define VDPU_REG_PINIT_RLIST_F13(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_PINIT_RLIST_F12(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_PINIT_RLIST_F11(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_PINIT_RLIST_F10(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_REFER1_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER0_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFER3_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER2_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFER5_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER4_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFER7_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER6_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFER9_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER8_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFER11_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER10_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFER13_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER12_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFER15_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER14_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_BINIT_RLIST_F5(v) (((v) << 25) & GENMASK(29, 25))
+#define VDPU_REG_BINIT_RLIST_F4(v) (((v) << 20) & GENMASK(24, 20))
+#define VDPU_REG_BINIT_RLIST_F3(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_BINIT_RLIST_F2(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_BINIT_RLIST_F1(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_BINIT_RLIST_F0(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_BINIT_RLIST_F11(v) (((v) << 25) & GENMASK(29, 25))
+#define VDPU_REG_BINIT_RLIST_F10(v) (((v) << 20) & GENMASK(24, 20))
+#define VDPU_REG_BINIT_RLIST_F9(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_BINIT_RLIST_F8(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_BINIT_RLIST_F7(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_BINIT_RLIST_F6(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_BINIT_RLIST_F15(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_BINIT_RLIST_F14(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_BINIT_RLIST_F13(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_BINIT_RLIST_F12(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_BINIT_RLIST_B5(v) (((v) << 25) & GENMASK(29, 25))
+#define VDPU_REG_BINIT_RLIST_B4(v) (((v) << 20) & GENMASK(24, 20))
+#define VDPU_REG_BINIT_RLIST_B3(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_BINIT_RLIST_B2(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_BINIT_RLIST_B1(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_BINIT_RLIST_B0(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_BINIT_RLIST_B11(v) (((v) << 25) & GENMASK(29, 25))
+#define VDPU_REG_BINIT_RLIST_B10(v) (((v) << 20) & GENMASK(24, 20))
+#define VDPU_REG_BINIT_RLIST_B9(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_BINIT_RLIST_B8(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_BINIT_RLIST_B7(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_BINIT_RLIST_B6(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_BINIT_RLIST_B15(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_BINIT_RLIST_B14(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_BINIT_RLIST_B13(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_BINIT_RLIST_B12(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_PINIT_RLIST_F3(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_PINIT_RLIST_F2(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_PINIT_RLIST_F1(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_PINIT_RLIST_F0(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_REFER_LTERM_E(v) (((v) << 0) & GENMASK(31, 0))
+
+#define VDPU_REG_REFER_VALID_E(v) (((v) << 0) & GENMASK(31, 0))
+
+#define VDPU_REG_STRM_START_BIT(v) (((v) << 0) & GENMASK(5, 0))
+
+#define VDPU_REG_CH_QP_OFFSET2(v) (((v) << 22) & GENMASK(26, 22))
+#define VDPU_REG_CH_QP_OFFSET(v) (((v) << 17) & GENMASK(21, 17))
+#define VDPU_REG_PIC_MB_HEIGHT_P(v) (((v) << 9) & GENMASK(16, 9))
+#define VDPU_REG_PIC_MB_WIDTH(v) (((v) << 0) & GENMASK(8, 0))
+
+#define VDPU_REG_WEIGHT_BIPR_IDC(v) (((v) << 16) & GENMASK(17, 16))
+#define VDPU_REG_REF_FRAMES(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_FILT_CTRL_PRES(v) ((v) ? BIT(31) : 0)
+#define VDPU_REG_RDPIC_CNT_PRES(v) ((v) ? BIT(30) : 0)
+#define VDPU_REG_FRAMENUM_LEN(v) (((v) << 16) & GENMASK(20, 16))
+#define VDPU_REG_FRAMENUM(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFPIC_MK_LEN(v) (((v) << 16) & GENMASK(26, 16))
+#define VDPU_REG_IDR_PIC_ID(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_PPS_ID(v) (((v) << 24) & GENMASK(31, 24))
+#define VDPU_REG_REFIDX1_ACTIVE(v) (((v) << 19) & GENMASK(23, 19))
+#define VDPU_REG_REFIDX0_ACTIVE(v) (((v) << 14) & GENMASK(18, 14))
+#define VDPU_REG_POC_LENGTH(v) (((v) << 0) & GENMASK(7, 0))
+
+#define VDPU_REG_IDR_PIC_E(v) ((v) ? BIT(8) : 0)
+#define VDPU_REG_DIR_8X8_INFER_E(v) ((v) ? BIT(7) : 0)
+#define VDPU_REG_BLACKWHITE_E(v) ((v) ? BIT(6) : 0)
+#define VDPU_REG_CABAC_E(v) ((v) ? BIT(5) : 0)
+#define VDPU_REG_WEIGHT_PRED_E(v) ((v) ? BIT(4) : 0)
+#define VDPU_REG_CONST_INTRA_E(v) ((v) ? BIT(3) : 0)
+#define VDPU_REG_8X8TRANS_FLAG_E(v) ((v) ? BIT(2) : 0)
+#define VDPU_REG_TYPE1_QUANT_E(v) ((v) ? BIT(1) : 0)
+#define VDPU_REG_FIELDPIC_FLAG_E(v) ((v) ? BIT(0) : 0)
+
+static void set_params(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ const struct v4l2_ctrl_h264_decode_params *dec_param = ctrls->decode;
+ const struct v4l2_ctrl_h264_sps *sps = ctrls->sps;
+ const struct v4l2_ctrl_h264_pps *pps = ctrls->pps;
+ struct hantro_dev *vpu = ctx->dev;
+ u32 reg;
+
+ reg = VDPU_REG_DEC_ADV_PRE_DIS(0) |
+ VDPU_REG_DEC_SCMD_DIS(0) |
+ VDPU_REG_FILTERING_DIS(0) |
+ VDPU_REG_PIC_FIXED_QUANT(0) |
+ VDPU_REG_DEC_LATENCY(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(50));
+
+ reg = VDPU_REG_INIT_QP(pps->pic_init_qp_minus26 + 26) |
+ VDPU_REG_STREAM_LEN(vb2_get_plane_payload(&src_buf->vb2_buf, 0));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(51));
+
+ reg = VDPU_REG_APF_THRESHOLD(8) |
+ VDPU_REG_STARTMB_X(0) |
+ VDPU_REG_STARTMB_Y(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(52));
+
+ reg = VDPU_REG_DEC_MODE(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(53));
+
+ reg = VDPU_REG_DEC_STRENDIAN_E(1) |
+ VDPU_REG_DEC_STRSWAP32_E(1) |
+ VDPU_REG_DEC_OUTSWAP32_E(1) |
+ VDPU_REG_DEC_INSWAP32_E(1) |
+ VDPU_REG_DEC_OUT_ENDIAN(1) |
+ VDPU_REG_DEC_IN_ENDIAN(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(54));
+
+ reg = VDPU_REG_DEC_DATA_DISC_E(0) |
+ VDPU_REG_DEC_MAX_BURST(16) |
+ VDPU_REG_DEC_AXI_WR_ID(0) |
+ VDPU_REG_DEC_AXI_RD_ID(0xff);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(56));
+
+ reg = VDPU_REG_START_CODE_E(1) |
+ VDPU_REG_CH_8PIX_ILEAV_E(0) |
+ VDPU_REG_RLC_MODE_E(0) |
+ VDPU_REG_PIC_INTERLACE_E(!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) &&
+ (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD ||
+ dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)) |
+ VDPU_REG_PIC_FIELDMODE_E(dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC) |
+ VDPU_REG_PIC_TOPFIELD_E(!(dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)) |
+ VDPU_REG_WRITE_MVS_E((sps->profile_idc > 66) && dec_param->nal_ref_idc) |
+ VDPU_REG_SEQ_MBAFF_E(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD) |
+ VDPU_REG_PICORD_COUNT_E(sps->profile_idc > 66) |
+ VDPU_REG_DEC_TIMEOUT_E(1) |
+ VDPU_REG_DEC_CLK_GATE_E(1);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(57));
+
+ reg = VDPU_REG_PRED_BC_TAP_0_0(1) |
+ VDPU_REG_PRED_BC_TAP_0_1((u32)-5) |
+ VDPU_REG_PRED_BC_TAP_0_2(20);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(59));
+
+ reg = VDPU_REG_REFBU_E(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(65));
+
+ reg = VDPU_REG_STRM_START_BIT(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(109));
+
+ reg = VDPU_REG_CH_QP_OFFSET2(pps->second_chroma_qp_index_offset) |
+ VDPU_REG_CH_QP_OFFSET(pps->chroma_qp_index_offset) |
+ VDPU_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->src_fmt.height)) |
+ VDPU_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(110));
+
+ reg = VDPU_REG_WEIGHT_BIPR_IDC(pps->weighted_bipred_idc) |
+ VDPU_REG_REF_FRAMES(sps->max_num_ref_frames);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(111));
+
+ reg = VDPU_REG_FILT_CTRL_PRES(pps->flags & V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT) |
+ VDPU_REG_RDPIC_CNT_PRES(pps->flags & V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT) |
+ VDPU_REG_FRAMENUM_LEN(sps->log2_max_frame_num_minus4 + 4) |
+ VDPU_REG_FRAMENUM(dec_param->frame_num);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(112));
+
+ reg = VDPU_REG_REFPIC_MK_LEN(dec_param->dec_ref_pic_marking_bit_size) |
+ VDPU_REG_IDR_PIC_ID(dec_param->idr_pic_id);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(113));
+
+ reg = VDPU_REG_PPS_ID(pps->pic_parameter_set_id) |
+ VDPU_REG_REFIDX1_ACTIVE(pps->num_ref_idx_l1_default_active_minus1 + 1) |
+ VDPU_REG_REFIDX0_ACTIVE(pps->num_ref_idx_l0_default_active_minus1 + 1) |
+ VDPU_REG_POC_LENGTH(dec_param->pic_order_cnt_bit_size);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(114));
+
+ reg = VDPU_REG_IDR_PIC_E(dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC) |
+ VDPU_REG_DIR_8X8_INFER_E(sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE) |
+ VDPU_REG_BLACKWHITE_E(sps->profile_idc >= 100 && sps->chroma_format_idc == 0) |
+ VDPU_REG_CABAC_E(pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE) |
+ VDPU_REG_WEIGHT_PRED_E(pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED) |
+ VDPU_REG_CONST_INTRA_E(pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED) |
+ VDPU_REG_8X8TRANS_FLAG_E(pps->flags & V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE) |
+ VDPU_REG_TYPE1_QUANT_E(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT) |
+ VDPU_REG_FIELDPIC_FLAG_E(!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(115));
+}
+
+static void set_ref(struct hantro_ctx *ctx)
+{
+ const struct v4l2_h264_reference *b0_reflist, *b1_reflist, *p_reflist;
+ struct hantro_dev *vpu = ctx->dev;
+ u32 reg;
+ int i;
+
+ b0_reflist = ctx->h264_dec.reflists.b0;
+ b1_reflist = ctx->h264_dec.reflists.b1;
+ p_reflist = ctx->h264_dec.reflists.p;
+
+ reg = VDPU_REG_PINIT_RLIST_F9(p_reflist[9].index) |
+ VDPU_REG_PINIT_RLIST_F8(p_reflist[8].index) |
+ VDPU_REG_PINIT_RLIST_F7(p_reflist[7].index) |
+ VDPU_REG_PINIT_RLIST_F6(p_reflist[6].index) |
+ VDPU_REG_PINIT_RLIST_F5(p_reflist[5].index) |
+ VDPU_REG_PINIT_RLIST_F4(p_reflist[4].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(74));
+
+ reg = VDPU_REG_PINIT_RLIST_F15(p_reflist[15].index) |
+ VDPU_REG_PINIT_RLIST_F14(p_reflist[14].index) |
+ VDPU_REG_PINIT_RLIST_F13(p_reflist[13].index) |
+ VDPU_REG_PINIT_RLIST_F12(p_reflist[12].index) |
+ VDPU_REG_PINIT_RLIST_F11(p_reflist[11].index) |
+ VDPU_REG_PINIT_RLIST_F10(p_reflist[10].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(75));
+
+ reg = VDPU_REG_REFER1_NBR(hantro_h264_get_ref_nbr(ctx, 1)) |
+ VDPU_REG_REFER0_NBR(hantro_h264_get_ref_nbr(ctx, 0));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(76));
+
+ reg = VDPU_REG_REFER3_NBR(hantro_h264_get_ref_nbr(ctx, 3)) |
+ VDPU_REG_REFER2_NBR(hantro_h264_get_ref_nbr(ctx, 2));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(77));
+
+ reg = VDPU_REG_REFER5_NBR(hantro_h264_get_ref_nbr(ctx, 5)) |
+ VDPU_REG_REFER4_NBR(hantro_h264_get_ref_nbr(ctx, 4));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(78));
+
+ reg = VDPU_REG_REFER7_NBR(hantro_h264_get_ref_nbr(ctx, 7)) |
+ VDPU_REG_REFER6_NBR(hantro_h264_get_ref_nbr(ctx, 6));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(79));
+
+ reg = VDPU_REG_REFER9_NBR(hantro_h264_get_ref_nbr(ctx, 9)) |
+ VDPU_REG_REFER8_NBR(hantro_h264_get_ref_nbr(ctx, 8));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(80));
+
+ reg = VDPU_REG_REFER11_NBR(hantro_h264_get_ref_nbr(ctx, 11)) |
+ VDPU_REG_REFER10_NBR(hantro_h264_get_ref_nbr(ctx, 10));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(81));
+
+ reg = VDPU_REG_REFER13_NBR(hantro_h264_get_ref_nbr(ctx, 13)) |
+ VDPU_REG_REFER12_NBR(hantro_h264_get_ref_nbr(ctx, 12));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(82));
+
+ reg = VDPU_REG_REFER15_NBR(hantro_h264_get_ref_nbr(ctx, 15)) |
+ VDPU_REG_REFER14_NBR(hantro_h264_get_ref_nbr(ctx, 14));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(83));
+
+ reg = VDPU_REG_BINIT_RLIST_F5(b0_reflist[5].index) |
+ VDPU_REG_BINIT_RLIST_F4(b0_reflist[4].index) |
+ VDPU_REG_BINIT_RLIST_F3(b0_reflist[3].index) |
+ VDPU_REG_BINIT_RLIST_F2(b0_reflist[2].index) |
+ VDPU_REG_BINIT_RLIST_F1(b0_reflist[1].index) |
+ VDPU_REG_BINIT_RLIST_F0(b0_reflist[0].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(100));
+
+ reg = VDPU_REG_BINIT_RLIST_F11(b0_reflist[11].index) |
+ VDPU_REG_BINIT_RLIST_F10(b0_reflist[10].index) |
+ VDPU_REG_BINIT_RLIST_F9(b0_reflist[9].index) |
+ VDPU_REG_BINIT_RLIST_F8(b0_reflist[8].index) |
+ VDPU_REG_BINIT_RLIST_F7(b0_reflist[7].index) |
+ VDPU_REG_BINIT_RLIST_F6(b0_reflist[6].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(101));
+
+ reg = VDPU_REG_BINIT_RLIST_F15(b0_reflist[15].index) |
+ VDPU_REG_BINIT_RLIST_F14(b0_reflist[14].index) |
+ VDPU_REG_BINIT_RLIST_F13(b0_reflist[13].index) |
+ VDPU_REG_BINIT_RLIST_F12(b0_reflist[12].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(102));
+
+ reg = VDPU_REG_BINIT_RLIST_B5(b1_reflist[5].index) |
+ VDPU_REG_BINIT_RLIST_B4(b1_reflist[4].index) |
+ VDPU_REG_BINIT_RLIST_B3(b1_reflist[3].index) |
+ VDPU_REG_BINIT_RLIST_B2(b1_reflist[2].index) |
+ VDPU_REG_BINIT_RLIST_B1(b1_reflist[1].index) |
+ VDPU_REG_BINIT_RLIST_B0(b1_reflist[0].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(103));
+
+ reg = VDPU_REG_BINIT_RLIST_B11(b1_reflist[11].index) |
+ VDPU_REG_BINIT_RLIST_B10(b1_reflist[10].index) |
+ VDPU_REG_BINIT_RLIST_B9(b1_reflist[9].index) |
+ VDPU_REG_BINIT_RLIST_B8(b1_reflist[8].index) |
+ VDPU_REG_BINIT_RLIST_B7(b1_reflist[7].index) |
+ VDPU_REG_BINIT_RLIST_B6(b1_reflist[6].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(104));
+
+ reg = VDPU_REG_BINIT_RLIST_B15(b1_reflist[15].index) |
+ VDPU_REG_BINIT_RLIST_B14(b1_reflist[14].index) |
+ VDPU_REG_BINIT_RLIST_B13(b1_reflist[13].index) |
+ VDPU_REG_BINIT_RLIST_B12(b1_reflist[12].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(105));
+
+ reg = VDPU_REG_PINIT_RLIST_F3(p_reflist[3].index) |
+ VDPU_REG_PINIT_RLIST_F2(p_reflist[2].index) |
+ VDPU_REG_PINIT_RLIST_F1(p_reflist[1].index) |
+ VDPU_REG_PINIT_RLIST_F0(p_reflist[0].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(106));
+
+ reg = VDPU_REG_REFER_LTERM_E(ctx->h264_dec.dpb_longterm);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(107));
+
+ reg = VDPU_REG_REFER_VALID_E(ctx->h264_dec.dpb_valid);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(108));
+
+ /* Set up addresses of DPB buffers. */
+ for (i = 0; i < HANTRO_H264_DPB_SIZE; i++) {
+ dma_addr_t dma_addr = hantro_h264_get_ref_buf(ctx, i);
+
+ vdpu_write_relaxed(vpu, dma_addr, VDPU_REG_REFER_BASE(i));
+ }
+}
+
+static void set_buffers(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ struct vb2_v4l2_buffer *dst_buf;
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t src_dma, dst_dma;
+ size_t offset = 0;
+
+ /* Source (stream) buffer. */
+ src_dma = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ vdpu_write_relaxed(vpu, src_dma, VDPU_REG_RLC_VLC_BASE);
+
+ /* Destination (decoded frame) buffer. */
+ dst_buf = hantro_get_dst_buf(ctx);
+ dst_dma = hantro_get_dec_buf_addr(ctx, &dst_buf->vb2_buf);
+ /* Adjust dma addr to start at second line for bottom field */
+ if (ctrls->decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
+ offset = ALIGN(ctx->src_fmt.width, MB_DIM);
+ vdpu_write_relaxed(vpu, dst_dma + offset, VDPU_REG_DEC_OUT_BASE);
+
+ /* Higher profiles require DMV buffer appended to reference frames. */
+ if (ctrls->sps->profile_idc > 66 && ctrls->decode->nal_ref_idc) {
+ unsigned int bytes_per_mb = 384;
+
+ /* DMV buffer for monochrome start directly after Y-plane */
+ if (ctrls->sps->profile_idc >= 100 &&
+ ctrls->sps->chroma_format_idc == 0)
+ bytes_per_mb = 256;
+ offset = bytes_per_mb * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+
+ /*
+ * DMV buffer is split in two for field encoded frames,
+ * adjust offset for bottom field
+ */
+ if (ctrls->decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
+ offset += 32 * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+ vdpu_write_relaxed(vpu, dst_dma + offset, VDPU_REG_DIR_MV_BASE);
+ }
+
+ /* Auxiliary buffer prepared in hantro_g1_h264_dec_prepare_table(). */
+ vdpu_write_relaxed(vpu, ctx->h264_dec.priv.dma, VDPU_REG_QTABLE_BASE);
+}
+
+int rockchip_vpu2_h264_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf;
+ u32 reg;
+ int ret;
+
+ /* Prepare the H264 decoder context. */
+ ret = hantro_h264_dec_prepare_run(ctx);
+ if (ret)
+ return ret;
+
+ src_buf = hantro_get_src_buf(ctx);
+ set_params(ctx, src_buf);
+ set_ref(ctx);
+ set_buffers(ctx, src_buf);
+
+ hantro_end_prepare_run(ctx);
+
+ /* Start decoding! */
+ reg = vdpu_read(vpu, VDPU_SWREG(57)) | VDPU_REG_DEC_E(1);
+ vdpu_write(vpu, reg, VDPU_SWREG(57));
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c
new file mode 100644
index 0000000000..8395c4d48d
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ *
+ * JPEG encoder
+ * ------------
+ * The VPU JPEG encoder produces JPEG baseline sequential format.
+ * The quantization coefficients are 8-bit values, complying with
+ * the baseline specification. Therefore, it requires
+ * luma and chroma quantization tables. The hardware does entropy
+ * encoding using internal Huffman tables, as specified in the JPEG
+ * specification.
+ *
+ * In other words, only the luma and chroma quantization tables are
+ * required for the encoding operation.
+ *
+ * Quantization luma table values are written to registers
+ * VEPU_swreg_0-VEPU_swreg_15, and chroma table values to
+ * VEPU_swreg_16-VEPU_swreg_31. A special order is needed, neither
+ * zigzag, nor linear.
+ */
+
+#include <asm/unaligned.h>
+#include <media/v4l2-mem2mem.h>
+#include "hantro_jpeg.h"
+#include "hantro.h"
+#include "hantro_v4l2.h"
+#include "hantro_hw.h"
+#include "rockchip_vpu2_regs.h"
+
+#define VEPU_JPEG_QUANT_TABLE_COUNT 16
+
+static void rockchip_vpu2_set_src_img_ctrl(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
+{
+ u32 overfill_r, overfill_b;
+ u32 reg;
+
+ /*
+ * The format width and height are already macroblock aligned
+ * by .vidioc_s_fmt_vid_cap_mplane() callback. Destination
+ * format width and height can be further modified by
+ * .vidioc_s_selection(), and the width is 4-aligned.
+ */
+ overfill_r = ctx->src_fmt.width - ctx->dst_fmt.width;
+ overfill_b = ctx->src_fmt.height - ctx->dst_fmt.height;
+
+ reg = VEPU_REG_IN_IMG_CTRL_ROW_LEN(ctx->src_fmt.width);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_INPUT_LUMA_INFO);
+
+ reg = VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(overfill_r / 4) |
+ VEPU_REG_IN_IMG_CTRL_OVRFLB(overfill_b);
+ /*
+ * This register controls the input crop, as the offset
+ * from the right/bottom within the last macroblock. The offset from the
+ * right must be divided by 4 and so the crop must be aligned to 4 pixels
+ * horizontally.
+ */
+ vepu_write_relaxed(vpu, reg, VEPU_REG_ENC_OVER_FILL_STRM_OFFSET);
+
+ reg = VEPU_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_ENC_CTRL1);
+}
+
+static void rockchip_vpu2_jpeg_enc_set_buffers(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf)
+{
+ struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ dma_addr_t src[3];
+ u32 size_left;
+
+ size_left = vb2_plane_size(dst_buf, 0) - ctx->vpu_dst_fmt->header_size;
+ if (WARN_ON(vb2_plane_size(dst_buf, 0) < ctx->vpu_dst_fmt->header_size))
+ size_left = 0;
+
+ WARN_ON(pix_fmt->num_planes > 3);
+
+ vepu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(dst_buf, 0) +
+ ctx->vpu_dst_fmt->header_size,
+ VEPU_REG_ADDR_OUTPUT_STREAM);
+ vepu_write_relaxed(vpu, size_left, VEPU_REG_STR_BUF_LIMIT);
+
+ if (pix_fmt->num_planes == 1) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
+ } else if (pix_fmt->num_planes == 2) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], VEPU_REG_ADDR_IN_PLANE_1);
+ } else {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ src[2] = vb2_dma_contig_plane_dma_addr(src_buf, 2);
+ vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], VEPU_REG_ADDR_IN_PLANE_1);
+ vepu_write_relaxed(vpu, src[2], VEPU_REG_ADDR_IN_PLANE_2);
+ }
+}
+
+static void
+rockchip_vpu2_jpeg_enc_set_qtable(struct hantro_dev *vpu,
+ unsigned char *luma_qtable,
+ unsigned char *chroma_qtable)
+{
+ u32 reg, i;
+ __be32 *luma_qtable_p;
+ __be32 *chroma_qtable_p;
+
+ luma_qtable_p = (__be32 *)luma_qtable;
+ chroma_qtable_p = (__be32 *)chroma_qtable;
+
+ /*
+ * Quantization table registers must be written in contiguous blocks.
+ * DO NOT collapse the below two "for" loops into one.
+ */
+ for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) {
+ reg = get_unaligned_be32(&luma_qtable_p[i]);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_LUMA_QUAT(i));
+ }
+
+ for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) {
+ reg = get_unaligned_be32(&chroma_qtable_p[i]);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_CHROMA_QUAT(i));
+ }
+}
+
+int rockchip_vpu2_jpeg_enc_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct hantro_jpeg_ctx jpeg_ctx;
+ u32 reg;
+
+ src_buf = hantro_get_src_buf(ctx);
+ dst_buf = hantro_get_dst_buf(ctx);
+
+ hantro_start_prepare_run(ctx);
+
+ memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
+ jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ if (!jpeg_ctx.buffer)
+ return -ENOMEM;
+
+ jpeg_ctx.width = ctx->dst_fmt.width;
+ jpeg_ctx.height = ctx->dst_fmt.height;
+ jpeg_ctx.quality = ctx->jpeg_quality;
+ hantro_jpeg_header_assemble(&jpeg_ctx);
+
+ /* Switch to JPEG encoder mode before writing registers */
+ vepu_write_relaxed(vpu, VEPU_REG_ENCODE_FORMAT_JPEG,
+ VEPU_REG_ENCODE_START);
+
+ rockchip_vpu2_set_src_img_ctrl(vpu, ctx);
+ rockchip_vpu2_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf,
+ &dst_buf->vb2_buf);
+ rockchip_vpu2_jpeg_enc_set_qtable(vpu, jpeg_ctx.hw_luma_qtable,
+ jpeg_ctx.hw_chroma_qtable);
+
+ reg = VEPU_REG_OUTPUT_SWAP32
+ | VEPU_REG_OUTPUT_SWAP16
+ | VEPU_REG_OUTPUT_SWAP8
+ | VEPU_REG_INPUT_SWAP8
+ | VEPU_REG_INPUT_SWAP16
+ | VEPU_REG_INPUT_SWAP32;
+ /* Make sure that all registers are written at this point. */
+ vepu_write(vpu, reg, VEPU_REG_DATA_ENDIAN);
+
+ reg = VEPU_REG_AXI_CTRL_BURST_LEN(16);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_AXI_CTRL);
+
+ reg = VEPU_REG_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width))
+ | VEPU_REG_MB_HEIGHT(MB_HEIGHT(ctx->src_fmt.height))
+ | VEPU_REG_FRAME_TYPE_INTRA
+ | VEPU_REG_ENCODE_FORMAT_JPEG
+ | VEPU_REG_ENCODE_ENABLE;
+
+ /* Kick the watchdog and start encoding */
+ hantro_end_prepare_run(ctx);
+ vepu_write(vpu, reg, VEPU_REG_ENCODE_START);
+
+ return 0;
+}
+
+void rockchip_vpu2_jpeg_enc_done(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ u32 bytesused = vepu_read(vpu, VEPU_REG_STR_BUF_LIMIT) / 8;
+ struct vb2_v4l2_buffer *dst_buf = hantro_get_dst_buf(ctx);
+
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ ctx->vpu_dst_fmt->header_size + bytesused);
+}
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c
new file mode 100644
index 0000000000..b66737fab4
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <media/v4l2-mem2mem.h>
+#include "hantro.h"
+#include "hantro_hw.h"
+
+#define VDPU_SWREG(nr) ((nr) * 4)
+
+#define VDPU_REG_DEC_OUT_BASE VDPU_SWREG(63)
+#define VDPU_REG_RLC_VLC_BASE VDPU_SWREG(64)
+#define VDPU_REG_QTABLE_BASE VDPU_SWREG(61)
+#define VDPU_REG_REFER0_BASE VDPU_SWREG(131)
+#define VDPU_REG_REFER2_BASE VDPU_SWREG(134)
+#define VDPU_REG_REFER3_BASE VDPU_SWREG(135)
+#define VDPU_REG_REFER1_BASE VDPU_SWREG(148)
+#define VDPU_REG_DEC_E(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_DEC_ADV_PRE_DIS(v) ((v) ? BIT(11) : 0)
+#define VDPU_REG_DEC_SCMD_DIS(v) ((v) ? BIT(10) : 0)
+#define VDPU_REG_FILTERING_DIS(v) ((v) ? BIT(8) : 0)
+#define VDPU_REG_DEC_LATENCY(v) (((v) << 1) & GENMASK(6, 1))
+
+#define VDPU_REG_INIT_QP(v) (((v) << 25) & GENMASK(30, 25))
+#define VDPU_REG_STREAM_LEN(v) (((v) << 0) & GENMASK(23, 0))
+
+#define VDPU_REG_APF_THRESHOLD(v) (((v) << 17) & GENMASK(30, 17))
+#define VDPU_REG_STARTMB_X(v) (((v) << 8) & GENMASK(16, 8))
+#define VDPU_REG_STARTMB_Y(v) (((v) << 0) & GENMASK(7, 0))
+
+#define VDPU_REG_DEC_MODE(v) (((v) << 0) & GENMASK(3, 0))
+
+#define VDPU_REG_DEC_STRENDIAN_E(v) ((v) ? BIT(5) : 0)
+#define VDPU_REG_DEC_STRSWAP32_E(v) ((v) ? BIT(4) : 0)
+#define VDPU_REG_DEC_OUTSWAP32_E(v) ((v) ? BIT(3) : 0)
+#define VDPU_REG_DEC_INSWAP32_E(v) ((v) ? BIT(2) : 0)
+#define VDPU_REG_DEC_OUT_ENDIAN(v) ((v) ? BIT(1) : 0)
+#define VDPU_REG_DEC_IN_ENDIAN(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_DEC_DATA_DISC_E(v) ((v) ? BIT(22) : 0)
+#define VDPU_REG_DEC_MAX_BURST(v) (((v) << 16) & GENMASK(20, 16))
+#define VDPU_REG_DEC_AXI_WR_ID(v) (((v) << 8) & GENMASK(15, 8))
+#define VDPU_REG_DEC_AXI_RD_ID(v) (((v) << 0) & GENMASK(7, 0))
+
+#define VDPU_REG_RLC_MODE_E(v) ((v) ? BIT(20) : 0)
+#define VDPU_REG_PIC_INTERLACE_E(v) ((v) ? BIT(17) : 0)
+#define VDPU_REG_PIC_FIELDMODE_E(v) ((v) ? BIT(16) : 0)
+#define VDPU_REG_PIC_B_E(v) ((v) ? BIT(15) : 0)
+#define VDPU_REG_PIC_INTER_E(v) ((v) ? BIT(14) : 0)
+#define VDPU_REG_PIC_TOPFIELD_E(v) ((v) ? BIT(13) : 0)
+#define VDPU_REG_FWD_INTERLACE_E(v) ((v) ? BIT(12) : 0)
+#define VDPU_REG_WRITE_MVS_E(v) ((v) ? BIT(10) : 0)
+#define VDPU_REG_DEC_TIMEOUT_E(v) ((v) ? BIT(5) : 0)
+#define VDPU_REG_DEC_CLK_GATE_E(v) ((v) ? BIT(4) : 0)
+
+#define VDPU_REG_PIC_MB_WIDTH(v) (((v) << 23) & GENMASK(31, 23))
+#define VDPU_REG_PIC_MB_HEIGHT_P(v) (((v) << 11) & GENMASK(18, 11))
+#define VDPU_REG_ALT_SCAN_E(v) ((v) ? BIT(6) : 0)
+#define VDPU_REG_TOPFIELDFIRST_E(v) ((v) ? BIT(5) : 0)
+
+#define VDPU_REG_STRM_START_BIT(v) (((v) << 26) & GENMASK(31, 26))
+#define VDPU_REG_QSCALE_TYPE(v) ((v) ? BIT(24) : 0)
+#define VDPU_REG_CON_MV_E(v) ((v) ? BIT(4) : 0)
+#define VDPU_REG_INTRA_DC_PREC(v) (((v) << 2) & GENMASK(3, 2))
+#define VDPU_REG_INTRA_VLC_TAB(v) ((v) ? BIT(1) : 0)
+#define VDPU_REG_FRAME_PRED_DCT(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_ALT_SCAN_FLAG_E(v) ((v) ? BIT(19) : 0)
+#define VDPU_REG_FCODE_FWD_HOR(v) (((v) << 15) & GENMASK(18, 15))
+#define VDPU_REG_FCODE_FWD_VER(v) (((v) << 11) & GENMASK(14, 11))
+#define VDPU_REG_FCODE_BWD_HOR(v) (((v) << 7) & GENMASK(10, 7))
+#define VDPU_REG_FCODE_BWD_VER(v) (((v) << 3) & GENMASK(6, 3))
+#define VDPU_REG_MV_ACCURACY_FWD(v) ((v) ? BIT(2) : 0)
+#define VDPU_REG_MV_ACCURACY_BWD(v) ((v) ? BIT(1) : 0)
+
+static void
+rockchip_vpu2_mpeg2_dec_set_quantisation(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
+{
+ struct v4l2_ctrl_mpeg2_quantisation *q;
+
+ q = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_MPEG2_QUANTISATION);
+ hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, q);
+ vdpu_write_relaxed(vpu, ctx->mpeg2_dec.qtable.dma, VDPU_REG_QTABLE_BASE);
+}
+
+static void
+rockchip_vpu2_mpeg2_dec_set_buffers(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf,
+ const struct v4l2_ctrl_mpeg2_sequence *seq,
+ const struct v4l2_ctrl_mpeg2_picture *pic)
+{
+ dma_addr_t forward_addr = 0, backward_addr = 0;
+ dma_addr_t current_addr, addr;
+
+ switch (pic->picture_coding_type) {
+ case V4L2_MPEG2_PIC_CODING_TYPE_B:
+ backward_addr = hantro_get_ref(ctx, pic->backward_ref_ts);
+ fallthrough;
+ case V4L2_MPEG2_PIC_CODING_TYPE_P:
+ forward_addr = hantro_get_ref(ctx, pic->forward_ref_ts);
+ }
+
+ /* Source bitstream buffer */
+ addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ vdpu_write_relaxed(vpu, addr, VDPU_REG_RLC_VLC_BASE);
+
+ /* Destination frame buffer */
+ addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ current_addr = addr;
+
+ if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD)
+ addr += ALIGN(ctx->dst_fmt.width, 16);
+ vdpu_write_relaxed(vpu, addr, VDPU_REG_DEC_OUT_BASE);
+
+ if (!forward_addr)
+ forward_addr = current_addr;
+ if (!backward_addr)
+ backward_addr = current_addr;
+
+ /* Set forward ref frame (top/bottom field) */
+ if (pic->picture_structure == V4L2_MPEG2_PIC_FRAME ||
+ pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B ||
+ (pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD &&
+ pic->flags & V4L2_MPEG2_PIC_TOP_FIELD) ||
+ (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD &&
+ !(pic->flags & V4L2_MPEG2_PIC_TOP_FIELD))) {
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER1_BASE);
+ } else if (pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) {
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, current_addr, VDPU_REG_REFER1_BASE);
+ } else if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD) {
+ vdpu_write_relaxed(vpu, current_addr, VDPU_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER1_BASE);
+ }
+
+ /* Set backward ref frame (top/bottom field) */
+ vdpu_write_relaxed(vpu, backward_addr, VDPU_REG_REFER2_BASE);
+ vdpu_write_relaxed(vpu, backward_addr, VDPU_REG_REFER3_BASE);
+}
+
+int rockchip_vpu2_mpeg2_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ const struct v4l2_ctrl_mpeg2_sequence *seq;
+ const struct v4l2_ctrl_mpeg2_picture *pic;
+ u32 reg;
+
+ src_buf = hantro_get_src_buf(ctx);
+ dst_buf = hantro_get_dst_buf(ctx);
+
+ hantro_start_prepare_run(ctx);
+
+ seq = hantro_get_ctrl(ctx,
+ V4L2_CID_STATELESS_MPEG2_SEQUENCE);
+ pic = hantro_get_ctrl(ctx,
+ V4L2_CID_STATELESS_MPEG2_PICTURE);
+
+ reg = VDPU_REG_DEC_ADV_PRE_DIS(0) |
+ VDPU_REG_DEC_SCMD_DIS(0) |
+ VDPU_REG_FILTERING_DIS(1) |
+ VDPU_REG_DEC_LATENCY(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(50));
+
+ reg = VDPU_REG_INIT_QP(1) |
+ VDPU_REG_STREAM_LEN(vb2_get_plane_payload(&src_buf->vb2_buf, 0));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(51));
+
+ reg = VDPU_REG_APF_THRESHOLD(8) |
+ VDPU_REG_STARTMB_X(0) |
+ VDPU_REG_STARTMB_Y(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(52));
+
+ reg = VDPU_REG_DEC_MODE(5);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(53));
+
+ reg = VDPU_REG_DEC_STRENDIAN_E(1) |
+ VDPU_REG_DEC_STRSWAP32_E(1) |
+ VDPU_REG_DEC_OUTSWAP32_E(1) |
+ VDPU_REG_DEC_INSWAP32_E(1) |
+ VDPU_REG_DEC_OUT_ENDIAN(1) |
+ VDPU_REG_DEC_IN_ENDIAN(1);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(54));
+
+ reg = VDPU_REG_DEC_DATA_DISC_E(0) |
+ VDPU_REG_DEC_MAX_BURST(16) |
+ VDPU_REG_DEC_AXI_WR_ID(0) |
+ VDPU_REG_DEC_AXI_RD_ID(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(56));
+
+ reg = VDPU_REG_RLC_MODE_E(0) |
+ VDPU_REG_PIC_INTERLACE_E(!(seq->flags & V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE)) |
+ VDPU_REG_PIC_FIELDMODE_E(pic->picture_structure != V4L2_MPEG2_PIC_FRAME) |
+ VDPU_REG_PIC_B_E(pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B) |
+ VDPU_REG_PIC_INTER_E(pic->picture_coding_type != V4L2_MPEG2_PIC_CODING_TYPE_I) |
+ VDPU_REG_PIC_TOPFIELD_E(pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) |
+ VDPU_REG_FWD_INTERLACE_E(0) |
+ VDPU_REG_WRITE_MVS_E(0) |
+ VDPU_REG_DEC_TIMEOUT_E(1) |
+ VDPU_REG_DEC_CLK_GATE_E(1);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(57));
+
+ reg = VDPU_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
+ VDPU_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
+ VDPU_REG_ALT_SCAN_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
+ VDPU_REG_TOPFIELDFIRST_E(pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(120));
+
+ reg = VDPU_REG_STRM_START_BIT(0) |
+ VDPU_REG_QSCALE_TYPE(pic->flags & V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE) |
+ VDPU_REG_CON_MV_E(pic->flags & V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV) |
+ VDPU_REG_INTRA_DC_PREC(pic->intra_dc_precision) |
+ VDPU_REG_INTRA_VLC_TAB(pic->flags & V4L2_MPEG2_PIC_FLAG_INTRA_VLC) |
+ VDPU_REG_FRAME_PRED_DCT(pic->flags & V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(122));
+
+ reg = VDPU_REG_ALT_SCAN_FLAG_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
+ VDPU_REG_FCODE_FWD_HOR(pic->f_code[0][0]) |
+ VDPU_REG_FCODE_FWD_VER(pic->f_code[0][1]) |
+ VDPU_REG_FCODE_BWD_HOR(pic->f_code[1][0]) |
+ VDPU_REG_FCODE_BWD_VER(pic->f_code[1][1]) |
+ VDPU_REG_MV_ACCURACY_FWD(1) |
+ VDPU_REG_MV_ACCURACY_BWD(1);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(136));
+
+ rockchip_vpu2_mpeg2_dec_set_quantisation(vpu, ctx);
+
+ rockchip_vpu2_mpeg2_dec_set_buffers(vpu, ctx, &src_buf->vb2_buf,
+ &dst_buf->vb2_buf, seq, pic);
+
+ /* Kick the watchdog and start decoding */
+ hantro_end_prepare_run(ctx);
+
+ reg = vdpu_read(vpu, VDPU_SWREG(57)) | VDPU_REG_DEC_E(1);
+ vdpu_write(vpu, reg, VDPU_SWREG(57));
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
new file mode 100644
index 0000000000..d079075448
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip VPU codec vp8 decode driver
+ *
+ * Copyright (C) 2014 Rockchip Electronics Co., Ltd.
+ * ZhiChao Yu <zhichao.yu@rock-chips.com>
+ *
+ * Copyright (C) 2014 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Copyright (C) 2015 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <alpha.lin@rock-chips.com>
+ */
+
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro_hw.h"
+#include "hantro.h"
+#include "hantro_g1_regs.h"
+
+#define VDPU_REG_DEC_CTRL0 0x0c8
+#define VDPU_REG_STREAM_LEN 0x0cc
+#define VDPU_REG_DEC_FORMAT 0x0d4
+#define VDPU_REG_DEC_CTRL0_DEC_MODE(x) (((x) & 0xf) << 0)
+#define VDPU_REG_DATA_ENDIAN 0x0d8
+#define VDPU_REG_CONFIG_DEC_STRENDIAN_E BIT(5)
+#define VDPU_REG_CONFIG_DEC_STRSWAP32_E BIT(4)
+#define VDPU_REG_CONFIG_DEC_OUTSWAP32_E BIT(3)
+#define VDPU_REG_CONFIG_DEC_INSWAP32_E BIT(2)
+#define VDPU_REG_CONFIG_DEC_OUT_ENDIAN BIT(1)
+#define VDPU_REG_CONFIG_DEC_IN_ENDIAN BIT(0)
+#define VDPU_REG_AXI_CTRL 0x0e0
+#define VDPU_REG_CONFIG_DEC_MAX_BURST(x) (((x) & 0x1f) << 16)
+#define VDPU_REG_EN_FLAGS 0x0e4
+#define VDPU_REG_DEC_CTRL0_PIC_INTER_E BIT(14)
+#define VDPU_REG_CONFIG_DEC_TIMEOUT_E BIT(5)
+#define VDPU_REG_CONFIG_DEC_CLK_GATE_E BIT(4)
+#define VDPU_REG_PRED_FLT 0x0ec
+#define VDPU_REG_ADDR_QTABLE 0x0f4
+#define VDPU_REG_ADDR_DST 0x0fc
+#define VDPU_REG_ADDR_STR 0x100
+#define VDPU_REG_VP8_PIC_MB_SIZE 0x1e0
+#define VDPU_REG_VP8_DCT_START_BIT 0x1e4
+#define VDPU_REG_DEC_CTRL4_VC1_HEIGHT_EXT BIT(13)
+#define VDPU_REG_DEC_CTRL4_BILIN_MC_E BIT(12)
+#define VDPU_REG_VP8_CTRL0 0x1e8
+#define VDPU_REG_VP8_DATA_VAL 0x1f0
+#define VDPU_REG_PRED_FLT7 0x1f4
+#define VDPU_REG_PRED_FLT8 0x1f8
+#define VDPU_REG_PRED_FLT9 0x1fc
+#define VDPU_REG_PRED_FLT10 0x200
+#define VDPU_REG_FILTER_LEVEL 0x204
+#define VDPU_REG_VP8_QUANTER0 0x208
+#define VDPU_REG_VP8_ADDR_REF0 0x20c
+#define VDPU_REG_FILTER_MB_ADJ 0x210
+#define VDPU_REG_REF_PIC_FILT_TYPE_E BIT(31)
+#define VDPU_REG_REF_PIC_FILT_SHARPNESS(x) (((x) & 0x7) << 28)
+#define VDPU_REG_FILTER_REF_ADJ 0x214
+#define VDPU_REG_VP8_ADDR_REF2_5(i) (0x218 + ((i) * 0x4))
+#define VDPU_REG_VP8_GREF_SIGN_BIAS BIT(0)
+#define VDPU_REG_VP8_AREF_SIGN_BIAS BIT(0)
+#define VDPU_REG_VP8_DCT_BASE(i) \
+ (0x230 + ((((i) < 5) ? (i) : ((i) + 1)) * 0x4))
+#define VDPU_REG_VP8_ADDR_CTRL_PART 0x244
+#define VDPU_REG_VP8_SEGMENT_VAL 0x254
+#define VDPU_REG_FWD_PIC1_SEGMENT_BASE(x) ((x) << 0)
+#define VDPU_REG_FWD_PIC1_SEGMENT_UPD_E BIT(1)
+#define VDPU_REG_FWD_PIC1_SEGMENT_E BIT(0)
+#define VDPU_REG_VP8_DCT_START_BIT2 0x258
+#define VDPU_REG_VP8_QUANTER1 0x25c
+#define VDPU_REG_VP8_QUANTER2 0x260
+#define VDPU_REG_PRED_FLT1 0x264
+#define VDPU_REG_PRED_FLT2 0x268
+#define VDPU_REG_PRED_FLT3 0x26c
+#define VDPU_REG_PRED_FLT4 0x270
+#define VDPU_REG_PRED_FLT5 0x274
+#define VDPU_REG_PRED_FLT6 0x278
+
+static const struct hantro_reg vp8_dec_dct_base[8] = {
+ { VDPU_REG_ADDR_STR, 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(0), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(1), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(2), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(3), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(4), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(5), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(6), 0, 0xffffffff },
+};
+
+static const struct hantro_reg vp8_dec_lf_level[4] = {
+ { VDPU_REG_FILTER_LEVEL, 18, 0x3f },
+ { VDPU_REG_FILTER_LEVEL, 12, 0x3f },
+ { VDPU_REG_FILTER_LEVEL, 6, 0x3f },
+ { VDPU_REG_FILTER_LEVEL, 0, 0x3f },
+};
+
+static const struct hantro_reg vp8_dec_mb_adj[4] = {
+ { VDPU_REG_FILTER_MB_ADJ, 21, 0x7f },
+ { VDPU_REG_FILTER_MB_ADJ, 14, 0x7f },
+ { VDPU_REG_FILTER_MB_ADJ, 7, 0x7f },
+ { VDPU_REG_FILTER_MB_ADJ, 0, 0x7f },
+};
+
+static const struct hantro_reg vp8_dec_ref_adj[4] = {
+ { VDPU_REG_FILTER_REF_ADJ, 21, 0x7f },
+ { VDPU_REG_FILTER_REF_ADJ, 14, 0x7f },
+ { VDPU_REG_FILTER_REF_ADJ, 7, 0x7f },
+ { VDPU_REG_FILTER_REF_ADJ, 0, 0x7f },
+};
+
+static const struct hantro_reg vp8_dec_quant[4] = {
+ { VDPU_REG_VP8_QUANTER0, 11, 0x7ff },
+ { VDPU_REG_VP8_QUANTER0, 0, 0x7ff },
+ { VDPU_REG_VP8_QUANTER1, 11, 0x7ff },
+ { VDPU_REG_VP8_QUANTER1, 0, 0x7ff },
+};
+
+static const struct hantro_reg vp8_dec_quant_delta[5] = {
+ { VDPU_REG_VP8_QUANTER0, 27, 0x1f },
+ { VDPU_REG_VP8_QUANTER0, 22, 0x1f },
+ { VDPU_REG_VP8_QUANTER1, 27, 0x1f },
+ { VDPU_REG_VP8_QUANTER1, 22, 0x1f },
+ { VDPU_REG_VP8_QUANTER2, 27, 0x1f },
+};
+
+static const struct hantro_reg vp8_dec_dct_start_bits[8] = {
+ { VDPU_REG_VP8_CTRL0, 26, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT, 26, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT, 20, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT2, 24, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT2, 18, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT2, 12, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT2, 6, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT2, 0, 0x3f },
+};
+
+static const struct hantro_reg vp8_dec_pred_bc_tap[8][6] = {
+ {
+ { 0, 0, 0},
+ { VDPU_REG_PRED_FLT, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT1, 22, 0x3ff },
+ { 0, 0, 0},
+ }, {
+ { 0, 0, 0},
+ { VDPU_REG_PRED_FLT1, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT1, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT2, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT2, 12, 0x3ff },
+ { 0, 0, 0},
+ }, {
+ { VDPU_REG_PRED_FLT10, 10, 0x3 },
+ { VDPU_REG_PRED_FLT2, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT3, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT3, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT3, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT10, 8, 0x3},
+ }, {
+ { 0, 0, 0},
+ { VDPU_REG_PRED_FLT4, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT4, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT4, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT5, 22, 0x3ff },
+ { 0, 0, 0},
+ }, {
+ { VDPU_REG_PRED_FLT10, 6, 0x3 },
+ { VDPU_REG_PRED_FLT5, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT5, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT6, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT6, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT10, 4, 0x3 },
+ }, {
+ { 0, 0, 0},
+ { VDPU_REG_PRED_FLT6, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT7, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT7, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT7, 2, 0x3ff },
+ { 0, 0, 0},
+ }, {
+ { VDPU_REG_PRED_FLT10, 2, 0x3 },
+ { VDPU_REG_PRED_FLT8, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT8, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT8, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT9, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT10, 0, 0x3 },
+ }, {
+ { 0, 0, 0},
+ { VDPU_REG_PRED_FLT9, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT9, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT10, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT10, 12, 0x3ff },
+ { 0, 0, 0},
+ },
+};
+
+static const struct hantro_reg vp8_dec_mb_start_bit = {
+ .base = VDPU_REG_VP8_CTRL0,
+ .shift = 18,
+ .mask = 0x3f
+};
+
+static const struct hantro_reg vp8_dec_mb_aligned_data_len = {
+ .base = VDPU_REG_VP8_DATA_VAL,
+ .shift = 0,
+ .mask = 0x3fffff
+};
+
+static const struct hantro_reg vp8_dec_num_dct_partitions = {
+ .base = VDPU_REG_VP8_DATA_VAL,
+ .shift = 24,
+ .mask = 0xf
+};
+
+static const struct hantro_reg vp8_dec_stream_len = {
+ .base = VDPU_REG_STREAM_LEN,
+ .shift = 0,
+ .mask = 0xffffff
+};
+
+static const struct hantro_reg vp8_dec_mb_width = {
+ .base = VDPU_REG_VP8_PIC_MB_SIZE,
+ .shift = 23,
+ .mask = 0x1ff
+};
+
+static const struct hantro_reg vp8_dec_mb_height = {
+ .base = VDPU_REG_VP8_PIC_MB_SIZE,
+ .shift = 11,
+ .mask = 0xff
+};
+
+static const struct hantro_reg vp8_dec_mb_width_ext = {
+ .base = VDPU_REG_VP8_PIC_MB_SIZE,
+ .shift = 3,
+ .mask = 0x7
+};
+
+static const struct hantro_reg vp8_dec_mb_height_ext = {
+ .base = VDPU_REG_VP8_PIC_MB_SIZE,
+ .shift = 0,
+ .mask = 0x7
+};
+
+static const struct hantro_reg vp8_dec_bool_range = {
+ .base = VDPU_REG_VP8_CTRL0,
+ .shift = 0,
+ .mask = 0xff
+};
+
+static const struct hantro_reg vp8_dec_bool_value = {
+ .base = VDPU_REG_VP8_CTRL0,
+ .shift = 8,
+ .mask = 0xff
+};
+
+static const struct hantro_reg vp8_dec_filter_disable = {
+ .base = VDPU_REG_DEC_CTRL0,
+ .shift = 8,
+ .mask = 1
+};
+
+static const struct hantro_reg vp8_dec_skip_mode = {
+ .base = VDPU_REG_DEC_CTRL0,
+ .shift = 9,
+ .mask = 1
+};
+
+static const struct hantro_reg vp8_dec_start_dec = {
+ .base = VDPU_REG_EN_FLAGS,
+ .shift = 0,
+ .mask = 1
+};
+
+static void cfg_lf(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ const struct v4l2_vp8_segment *seg = &hdr->segment;
+ const struct v4l2_vp8_loop_filter *lf = &hdr->lf;
+ struct hantro_dev *vpu = ctx->dev;
+ unsigned int i;
+ u32 reg;
+
+ if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) {
+ hantro_reg_write(vpu, &vp8_dec_lf_level[0], lf->level);
+ } else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) {
+ for (i = 0; i < 4; i++) {
+ u32 lf_level = clamp(lf->level + seg->lf_update[i],
+ 0, 63);
+
+ hantro_reg_write(vpu, &vp8_dec_lf_level[i], lf_level);
+ }
+ } else {
+ for (i = 0; i < 4; i++)
+ hantro_reg_write(vpu, &vp8_dec_lf_level[i],
+ seg->lf_update[i]);
+ }
+
+ reg = VDPU_REG_REF_PIC_FILT_SHARPNESS(lf->sharpness_level);
+ if (lf->flags & V4L2_VP8_LF_FILTER_TYPE_SIMPLE)
+ reg |= VDPU_REG_REF_PIC_FILT_TYPE_E;
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_FILTER_MB_ADJ);
+
+ if (lf->flags & V4L2_VP8_LF_ADJ_ENABLE) {
+ for (i = 0; i < 4; i++) {
+ hantro_reg_write(vpu, &vp8_dec_mb_adj[i],
+ lf->mb_mode_delta[i]);
+ hantro_reg_write(vpu, &vp8_dec_ref_adj[i],
+ lf->ref_frm_delta[i]);
+ }
+ }
+}
+
+static void cfg_qp(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ const struct v4l2_vp8_quantization *q = &hdr->quant;
+ const struct v4l2_vp8_segment *seg = &hdr->segment;
+ struct hantro_dev *vpu = ctx->dev;
+ unsigned int i;
+
+ if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) {
+ hantro_reg_write(vpu, &vp8_dec_quant[0], q->y_ac_qi);
+ } else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) {
+ for (i = 0; i < 4; i++) {
+ u32 quant = clamp(q->y_ac_qi + seg->quant_update[i],
+ 0, 127);
+
+ hantro_reg_write(vpu, &vp8_dec_quant[i], quant);
+ }
+ } else {
+ for (i = 0; i < 4; i++)
+ hantro_reg_write(vpu, &vp8_dec_quant[i],
+ seg->quant_update[i]);
+ }
+
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[0], q->y_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[1], q->y2_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[2], q->y2_ac_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[3], q->uv_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[4], q->uv_ac_delta);
+}
+
+static void cfg_parts(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_src;
+ u32 first_part_offset = V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) ? 10 : 3;
+ u32 mb_size, mb_offset_bytes, mb_offset_bits, mb_start_bits;
+ u32 dct_size_part_size, dct_part_offset;
+ dma_addr_t src_dma;
+ u32 dct_part_total_len = 0;
+ u32 count = 0;
+ unsigned int i;
+
+ vb2_src = hantro_get_src_buf(ctx);
+ src_dma = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
+
+ /*
+ * Calculate control partition mb data info
+ * @first_part_header_bits: bits offset of mb data from first
+ * part start pos
+ * @mb_offset_bits: bits offset of mb data from src_dma
+ * base addr
+ * @mb_offset_byte: bytes offset of mb data from src_dma
+ * base addr
+ * @mb_start_bits: bits offset of mb data from mb data
+ * 64bits alignment addr
+ */
+ mb_offset_bits = first_part_offset * 8 +
+ hdr->first_part_header_bits + 8;
+ mb_offset_bytes = mb_offset_bits / 8;
+ mb_start_bits = mb_offset_bits -
+ (mb_offset_bytes & (~DEC_8190_ALIGN_MASK)) * 8;
+ mb_size = hdr->first_part_size -
+ (mb_offset_bytes - first_part_offset) +
+ (mb_offset_bytes & DEC_8190_ALIGN_MASK);
+
+ /* Macroblock data aligned base addr */
+ vdpu_write_relaxed(vpu, (mb_offset_bytes & (~DEC_8190_ALIGN_MASK)) +
+ src_dma, VDPU_REG_VP8_ADDR_CTRL_PART);
+ hantro_reg_write(vpu, &vp8_dec_mb_start_bit, mb_start_bits);
+ hantro_reg_write(vpu, &vp8_dec_mb_aligned_data_len, mb_size);
+
+ /*
+ * Calculate DCT partition info
+ * @dct_size_part_size: Containing sizes of DCT part, every DCT part
+ * has 3 bytes to store its size, except the last
+ * DCT part
+ * @dct_part_offset: bytes offset of DCT parts from src_dma base addr
+ * @dct_part_total_len: total size of all DCT parts
+ */
+ dct_size_part_size = (hdr->num_dct_parts - 1) * 3;
+ dct_part_offset = first_part_offset + hdr->first_part_size;
+ for (i = 0; i < hdr->num_dct_parts; i++)
+ dct_part_total_len += hdr->dct_part_sizes[i];
+ dct_part_total_len += dct_size_part_size;
+ dct_part_total_len += (dct_part_offset & DEC_8190_ALIGN_MASK);
+
+ /* Number of DCT partitions */
+ hantro_reg_write(vpu, &vp8_dec_num_dct_partitions,
+ hdr->num_dct_parts - 1);
+
+ /* DCT partition length */
+ hantro_reg_write(vpu, &vp8_dec_stream_len, dct_part_total_len);
+
+ /* DCT partitions base address */
+ for (i = 0; i < hdr->num_dct_parts; i++) {
+ u32 byte_offset = dct_part_offset + dct_size_part_size + count;
+ u32 base_addr = byte_offset + src_dma;
+
+ hantro_reg_write(vpu, &vp8_dec_dct_base[i],
+ base_addr & (~DEC_8190_ALIGN_MASK));
+
+ hantro_reg_write(vpu, &vp8_dec_dct_start_bits[i],
+ (byte_offset & DEC_8190_ALIGN_MASK) * 8);
+
+ count += hdr->dct_part_sizes[i];
+ }
+}
+
+/*
+ * prediction filter taps
+ * normal 6-tap filters
+ */
+static void cfg_tap(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ int i, j;
+
+ if ((hdr->version & 0x03) != 0)
+ return; /* Tap filter not used. */
+
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 6; j++) {
+ if (vp8_dec_pred_bc_tap[i][j].base != 0)
+ hantro_reg_write(vpu,
+ &vp8_dec_pred_bc_tap[i][j],
+ hantro_vp8_dec_mc_filter[i][j]);
+ }
+ }
+}
+
+static void cfg_ref(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr,
+ struct vb2_v4l2_buffer *vb2_dst)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t ref;
+
+ ref = hantro_get_ref(ctx, hdr->last_frame_ts);
+ if (!ref) {
+ vpu_debug(0, "failed to find last frame ts=%llu\n",
+ hdr->last_frame_ts);
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ }
+ vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF0);
+
+ ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
+ if (!ref && hdr->golden_frame_ts)
+ vpu_debug(0, "failed to find golden frame ts=%llu\n",
+ hdr->golden_frame_ts);
+ if (!ref)
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN)
+ ref |= VDPU_REG_VP8_GREF_SIGN_BIAS;
+ vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF2_5(2));
+
+ ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
+ if (!ref && hdr->alt_frame_ts)
+ vpu_debug(0, "failed to find alt frame ts=%llu\n",
+ hdr->alt_frame_ts);
+ if (!ref)
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT)
+ ref |= VDPU_REG_VP8_AREF_SIGN_BIAS;
+ vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF2_5(3));
+}
+
+static void cfg_buffers(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr,
+ struct vb2_v4l2_buffer *vb2_dst)
+{
+ const struct v4l2_vp8_segment *seg = &hdr->segment;
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t dst_dma;
+ u32 reg;
+
+ /* Set probability table buffer address */
+ vdpu_write_relaxed(vpu, ctx->vp8_dec.prob_tbl.dma,
+ VDPU_REG_ADDR_QTABLE);
+
+ /* Set segment map address */
+ reg = VDPU_REG_FWD_PIC1_SEGMENT_BASE(ctx->vp8_dec.segment_map.dma);
+ if (seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED) {
+ reg |= VDPU_REG_FWD_PIC1_SEGMENT_E;
+ if (seg->flags & V4L2_VP8_SEGMENT_FLAG_UPDATE_MAP)
+ reg |= VDPU_REG_FWD_PIC1_SEGMENT_UPD_E;
+ }
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_VP8_SEGMENT_VAL);
+
+ /* set output frame buffer address */
+ dst_dma = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ vdpu_write_relaxed(vpu, dst_dma, VDPU_REG_ADDR_DST);
+}
+
+int rockchip_vpu2_vp8_dec_run(struct hantro_ctx *ctx)
+{
+ const struct v4l2_ctrl_vp8_frame *hdr;
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_dst;
+ size_t height = ctx->dst_fmt.height;
+ size_t width = ctx->dst_fmt.width;
+ u32 mb_width, mb_height;
+ u32 reg;
+
+ hantro_start_prepare_run(ctx);
+
+ hdr = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_VP8_FRAME);
+ if (WARN_ON(!hdr))
+ return -EINVAL;
+
+ /* Reset segment_map buffer in keyframe */
+ if (V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) && ctx->vp8_dec.segment_map.cpu)
+ memset(ctx->vp8_dec.segment_map.cpu, 0,
+ ctx->vp8_dec.segment_map.size);
+
+ hantro_vp8_prob_update(ctx, hdr);
+
+ /*
+ * Extensive testing shows that the hardware does not properly
+ * clear the internal state from previous a decoding run. This
+ * causes corruption in decoded frames for multi-instance use cases.
+ * A soft reset before programming the registers has been found
+ * to resolve those problems.
+ */
+ ctx->codec_ops->reset(ctx);
+
+ reg = VDPU_REG_CONFIG_DEC_TIMEOUT_E
+ | VDPU_REG_CONFIG_DEC_CLK_GATE_E;
+ if (!V4L2_VP8_FRAME_IS_KEY_FRAME(hdr))
+ reg |= VDPU_REG_DEC_CTRL0_PIC_INTER_E;
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_EN_FLAGS);
+
+ reg = VDPU_REG_CONFIG_DEC_STRENDIAN_E
+ | VDPU_REG_CONFIG_DEC_INSWAP32_E
+ | VDPU_REG_CONFIG_DEC_STRSWAP32_E
+ | VDPU_REG_CONFIG_DEC_OUTSWAP32_E
+ | VDPU_REG_CONFIG_DEC_IN_ENDIAN
+ | VDPU_REG_CONFIG_DEC_OUT_ENDIAN;
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_DATA_ENDIAN);
+
+ reg = VDPU_REG_CONFIG_DEC_MAX_BURST(16);
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_AXI_CTRL);
+
+ reg = VDPU_REG_DEC_CTRL0_DEC_MODE(10);
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_DEC_FORMAT);
+
+ if (!(hdr->flags & V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF))
+ hantro_reg_write(vpu, &vp8_dec_skip_mode, 1);
+ if (hdr->lf.level == 0)
+ hantro_reg_write(vpu, &vp8_dec_filter_disable, 1);
+
+ /* Frame dimensions */
+ mb_width = MB_WIDTH(width);
+ mb_height = MB_HEIGHT(height);
+
+ hantro_reg_write(vpu, &vp8_dec_mb_width, mb_width);
+ hantro_reg_write(vpu, &vp8_dec_mb_height, mb_height);
+ hantro_reg_write(vpu, &vp8_dec_mb_width_ext, mb_width >> 9);
+ hantro_reg_write(vpu, &vp8_dec_mb_height_ext, mb_height >> 8);
+
+ /* Boolean decoder */
+ hantro_reg_write(vpu, &vp8_dec_bool_range, hdr->coder_state.range);
+ hantro_reg_write(vpu, &vp8_dec_bool_value, hdr->coder_state.value);
+
+ reg = vdpu_read(vpu, VDPU_REG_VP8_DCT_START_BIT);
+ if (hdr->version != 3)
+ reg |= VDPU_REG_DEC_CTRL4_VC1_HEIGHT_EXT;
+ if (hdr->version & 0x3)
+ reg |= VDPU_REG_DEC_CTRL4_BILIN_MC_E;
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_VP8_DCT_START_BIT);
+
+ cfg_lf(ctx, hdr);
+ cfg_qp(ctx, hdr);
+ cfg_parts(ctx, hdr);
+ cfg_tap(ctx, hdr);
+
+ vb2_dst = hantro_get_dst_buf(ctx);
+ cfg_ref(ctx, hdr, vb2_dst);
+ cfg_buffers(ctx, hdr, vb2_dst);
+
+ hantro_end_prepare_run(ctx);
+
+ hantro_reg_write(vpu, &vp8_dec_start_dec, 1);
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu2_regs.h b/drivers/media/platform/verisilicon/rockchip_vpu2_regs.h
new file mode 100644
index 0000000000..49e4088954
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_regs.h
@@ -0,0 +1,600 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <alpha.lin@rock-chips.com>
+ */
+
+#ifndef ROCKCHIP_VPU2_REGS_H_
+#define ROCKCHIP_VPU2_REGS_H_
+
+/* Encoder registers. */
+#define VEPU_REG_VP8_QUT_1ST(i) (0x000 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_DC_Y2(x) (((x) & 0x3fff) << 16)
+#define VEPU_REG_VP8_QUT_DC_Y1(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_QUT_2ND(i) (0x004 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_AC_Y1(x) (((x) & 0x3fff) << 16)
+#define VEPU_REG_VP8_QUT_DC_CHR(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_QUT_3RD(i) (0x008 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_AC_CHR(x) (((x) & 0x3fff) << 16)
+#define VEPU_REG_VP8_QUT_AC_Y2(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_QUT_4TH(i) (0x00c + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_ZB_DC_CHR(x) (((x) & 0x1ff) << 18)
+#define VEPU_REG_VP8_QUT_ZB_DC_Y2(x) (((x) & 0x1ff) << 9)
+#define VEPU_REG_VP8_QUT_ZB_DC_Y1(x) (((x) & 0x1ff) << 0)
+#define VEPU_REG_VP8_QUT_5TH(i) (0x010 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_ZB_AC_CHR(x) (((x) & 0x1ff) << 18)
+#define VEPU_REG_VP8_QUT_ZB_AC_Y2(x) (((x) & 0x1ff) << 9)
+#define VEPU_REG_VP8_QUT_ZB_AC_Y1(x) (((x) & 0x1ff) << 0)
+#define VEPU_REG_VP8_QUT_6TH(i) (0x014 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_RND_DC_CHR(x) (((x) & 0xff) << 16)
+#define VEPU_REG_VP8_QUT_RND_DC_Y2(x) (((x) & 0xff) << 8)
+#define VEPU_REG_VP8_QUT_RND_DC_Y1(x) (((x) & 0xff) << 0)
+#define VEPU_REG_VP8_QUT_7TH(i) (0x018 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_RND_AC_CHR(x) (((x) & 0xff) << 16)
+#define VEPU_REG_VP8_QUT_RND_AC_Y2(x) (((x) & 0xff) << 8)
+#define VEPU_REG_VP8_QUT_RND_AC_Y1(x) (((x) & 0xff) << 0)
+#define VEPU_REG_VP8_QUT_8TH(i) (0x01c + ((i) * 0x24))
+#define VEPU_REG_VP8_SEG_FILTER_LEVEL(x) (((x) & 0x3f) << 25)
+#define VEPU_REG_VP8_DEQUT_DC_CHR(x) (((x) & 0xff) << 17)
+#define VEPU_REG_VP8_DEQUT_DC_Y2(x) (((x) & 0x1ff) << 8)
+#define VEPU_REG_VP8_DEQUT_DC_Y1(x) (((x) & 0xff) << 0)
+#define VEPU_REG_VP8_QUT_9TH(i) (0x020 + ((i) * 0x24))
+#define VEPU_REG_VP8_DEQUT_AC_CHR(x) (((x) & 0x1ff) << 18)
+#define VEPU_REG_VP8_DEQUT_AC_Y2(x) (((x) & 0x1ff) << 9)
+#define VEPU_REG_VP8_DEQUT_AC_Y1(x) (((x) & 0x1ff) << 0)
+#define VEPU_REG_ADDR_VP8_SEG_MAP 0x06c
+#define VEPU_REG_VP8_INTRA_4X4_PENALTY(i) (0x070 + ((i) * 0x4))
+#define VEPU_REG_VP8_INTRA_4X4_PENALTY_0(x) (((x) & 0xfff) << 0)
+#define VEPU_REG_VP8_INTRA_4x4_PENALTY_1(x) (((x) & 0xfff) << 16)
+#define VEPU_REG_VP8_INTRA_16X16_PENALTY(i) (0x084 + ((i) * 0x4))
+#define VEPU_REG_VP8_INTRA_16X16_PENALTY_0(x) (((x) & 0xfff) << 0)
+#define VEPU_REG_VP8_INTRA_16X16_PENALTY_1(x) (((x) & 0xfff) << 16)
+#define VEPU_REG_VP8_CONTROL 0x0a0
+#define VEPU_REG_VP8_LF_MODE_DELTA_BPRED(x) (((x) & 0x1f) << 24)
+#define VEPU_REG_VP8_LF_REF_DELTA_INTRA_MB(x) (((x) & 0x7f) << 16)
+#define VEPU_REG_VP8_INTER_TYPE_BIT_COST(x) (((x) & 0xfff) << 0)
+#define VEPU_REG_VP8_REF_FRAME_VAL 0x0a4
+#define VEPU_REG_VP8_COEF_DMV_PENALTY(x) (((x) & 0xfff) << 16)
+#define VEPU_REG_VP8_REF_FRAME(x) (((x) & 0xfff) << 0)
+#define VEPU_REG_VP8_LOOP_FILTER_REF_DELTA 0x0a8
+#define VEPU_REG_VP8_LF_REF_DELTA_ALT_REF(x) (((x) & 0x7f) << 16)
+#define VEPU_REG_VP8_LF_REF_DELTA_LAST_REF(x) (((x) & 0x7f) << 8)
+#define VEPU_REG_VP8_LF_REF_DELTA_GOLDEN(x) (((x) & 0x7f) << 0)
+#define VEPU_REG_VP8_LOOP_FILTER_MODE_DELTA 0x0ac
+#define VEPU_REG_VP8_LF_MODE_DELTA_SPLITMV(x) (((x) & 0x7f) << 16)
+#define VEPU_REG_VP8_LF_MODE_DELTA_ZEROMV(x) (((x) & 0x7f) << 8)
+#define VEPU_REG_VP8_LF_MODE_DELTA_NEWMV(x) (((x) & 0x7f) << 0)
+#define VEPU_REG_JPEG_LUMA_QUAT(i) (0x000 + ((i) * 0x4))
+#define VEPU_REG_JPEG_CHROMA_QUAT(i) (0x040 + ((i) * 0x4))
+#define VEPU_REG_INTRA_SLICE_BITMAP(i) (0x0b0 + ((i) * 0x4))
+#define VEPU_REG_ADDR_VP8_DCT_PART(i) (0x0b0 + ((i) * 0x4))
+#define VEPU_REG_INTRA_AREA_CTRL 0x0b8
+#define VEPU_REG_INTRA_AREA_TOP(x) (((x) & 0xff) << 24)
+#define VEPU_REG_INTRA_AREA_BOTTOM(x) (((x) & 0xff) << 16)
+#define VEPU_REG_INTRA_AREA_LEFT(x) (((x) & 0xff) << 8)
+#define VEPU_REG_INTRA_AREA_RIGHT(x) (((x) & 0xff) << 0)
+#define VEPU_REG_CIR_INTRA_CTRL 0x0bc
+#define VEPU_REG_CIR_INTRA_FIRST_MB(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_CIR_INTRA_INTERVAL(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_ADDR_IN_PLANE_0 0x0c0
+#define VEPU_REG_ADDR_IN_PLANE_1 0x0c4
+#define VEPU_REG_ADDR_IN_PLANE_2 0x0c8
+#define VEPU_REG_STR_HDR_REM_MSB 0x0cc
+#define VEPU_REG_STR_HDR_REM_LSB 0x0d0
+#define VEPU_REG_STR_BUF_LIMIT 0x0d4
+#define VEPU_REG_AXI_CTRL 0x0d8
+#define VEPU_REG_AXI_CTRL_READ_ID(x) (((x) & 0xff) << 24)
+#define VEPU_REG_AXI_CTRL_WRITE_ID(x) (((x) & 0xff) << 16)
+#define VEPU_REG_AXI_CTRL_BURST_LEN(x) (((x) & 0x3f) << 8)
+#define VEPU_REG_AXI_CTRL_INCREMENT_MODE(x) (((x) & 0x01) << 2)
+#define VEPU_REG_AXI_CTRL_BIRST_DISCARD(x) (((x) & 0x01) << 1)
+#define VEPU_REG_AXI_CTRL_BIRST_DISABLE BIT(0)
+#define VEPU_QP_ADJUST_MAD_DELTA_ROI 0x0dc
+#define VEPU_REG_ROI_QP_DELTA_1 (((x) & 0xf) << 12)
+#define VEPU_REG_ROI_QP_DELTA_2 (((x) & 0xf) << 8)
+#define VEPU_REG_MAD_QP_ADJUSTMENT (((x) & 0xf) << 0)
+#define VEPU_REG_ADDR_REF_LUMA 0x0e0
+#define VEPU_REG_ADDR_REF_CHROMA 0x0e4
+#define VEPU_REG_QP_SUM_DIV2 0x0e8
+#define VEPU_REG_QP_SUM(x) (((x) & 0x001fffff) * 2)
+#define VEPU_REG_ENC_CTRL0 0x0ec
+#define VEPU_REG_DISABLE_QUARTER_PIXEL_MV BIT(28)
+#define VEPU_REG_DEBLOCKING_FILTER_MODE(x) (((x) & 0x3) << 24)
+#define VEPU_REG_CABAC_INIT_IDC(x) (((x) & 0x3) << 21)
+#define VEPU_REG_ENTROPY_CODING_MODE BIT(20)
+#define VEPU_REG_H264_TRANS8X8_MODE BIT(17)
+#define VEPU_REG_H264_INTER4X4_MODE BIT(16)
+#define VEPU_REG_H264_STREAM_MODE BIT(15)
+#define VEPU_REG_H264_SLICE_SIZE(x) (((x) & 0x7f) << 8)
+#define VEPU_REG_ENC_OVER_FILL_STRM_OFFSET 0x0f0
+#define VEPU_REG_STREAM_START_OFFSET(x) (((x) & 0x3f) << 16)
+#define VEPU_REG_SKIP_MACROBLOCK_PENALTY(x) (((x) & 0xff) << 8)
+#define VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(x) (((x) & 0x3) << 4)
+#define VEPU_REG_IN_IMG_CTRL_OVRFLB(x) (((x) & 0xf) << 0)
+#define VEPU_REG_INPUT_LUMA_INFO 0x0f4
+#define VEPU_REG_IN_IMG_CHROMA_OFFSET(x) (((x) & 0x7) << 20)
+#define VEPU_REG_IN_IMG_LUMA_OFFSET(x) (((x) & 0x7) << 16)
+#define VEPU_REG_IN_IMG_CTRL_ROW_LEN(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_RLC_SUM 0x0f8
+#define VEPU_REG_RLC_SUM_OUT(x) (((x) & 0x007fffff) * 4)
+#define VEPU_REG_SPLIT_PENALTY_4X4 0x0f8
+#define VEPU_REG_VP8_SPLIT_PENALTY_4X4 (((x) & 0x1ff) << 19)
+#define VEPU_REG_ADDR_REC_LUMA 0x0fc
+#define VEPU_REG_ADDR_REC_CHROMA 0x100
+#define VEPU_REG_CHECKPOINT(i) (0x104 + ((i) * 0x4))
+#define VEPU_REG_CHECKPOINT_CHECK0(x) (((x) & 0xffff))
+#define VEPU_REG_CHECKPOINT_CHECK1(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_CHECKPOINT_RESULT(x) \
+ ((((x) >> (16 - 16 * ((i) & 1))) & 0xffff) * 32)
+#define VEPU_REG_VP8_SEG0_QUANT_AC_Y1 0x104
+#define VEPU_REG_VP8_SEG0_RND_AC_Y1(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_AC_Y1(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_AC_Y1(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_DC_Y2 0x108
+#define VEPU_REG_VP8_SEG0_RND_DC_Y2(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_DC_Y2(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_DC_Y2(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_AC_Y2 0x10c
+#define VEPU_REG_VP8_SEG0_RND_AC_Y2(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_AC_Y2(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_AC_Y2(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_DC_CHR 0x110
+#define VEPU_REG_VP8_SEG0_RND_DC_CHR(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_DC_CHR(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_DC_CHR(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_AC_CHR 0x114
+#define VEPU_REG_VP8_SEG0_RND_AC_CHR(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_AC_CHR(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_AC_CHR(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_DQUT 0x118
+#define VEPU_REG_VP8_MV_REF_IDX1(x) (((x) & 0x03) << 26)
+#define VEPU_REG_VP8_SEG0_DQUT_DC_Y2(x) (((x) & 0x1ff) << 17)
+#define VEPU_REG_VP8_SEG0_DQUT_AC_Y1(x) (((x) & 0x1ff) << 8)
+#define VEPU_REG_VP8_SEG0_DQUT_DC_Y1(x) (((x) & 0xff) << 0)
+#define VEPU_REG_CHKPT_WORD_ERR(i) (0x118 + ((i) * 0x4))
+#define VEPU_REG_CHKPT_WORD_ERR_CHK0(x) (((x) & 0xffff))
+#define VEPU_REG_CHKPT_WORD_ERR_CHK1(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_VP8_SEG0_QUANT_DQUT_1 0x11c
+#define VEPU_REG_VP8_SEGMENT_MAP_UPDATE BIT(30)
+#define VEPU_REG_VP8_SEGMENT_EN BIT(29)
+#define VEPU_REG_VP8_MV_REF_IDX2_EN BIT(28)
+#define VEPU_REG_VP8_MV_REF_IDX2(x) (((x) & 0x03) << 26)
+#define VEPU_REG_VP8_SEG0_DQUT_AC_CHR(x) (((x) & 0x1ff) << 17)
+#define VEPU_REG_VP8_SEG0_DQUT_DC_CHR(x) (((x) & 0xff) << 9)
+#define VEPU_REG_VP8_SEG0_DQUT_AC_Y2(x) (((x) & 0x1ff) << 0)
+#define VEPU_REG_VP8_BOOL_ENC_VALUE 0x120
+#define VEPU_REG_CHKPT_DELTA_QP 0x124
+#define VEPU_REG_CHKPT_DELTA_QP_CHK0(x) (((x) & 0x0f) << 0)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK1(x) (((x) & 0x0f) << 4)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK2(x) (((x) & 0x0f) << 8)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK3(x) (((x) & 0x0f) << 12)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK4(x) (((x) & 0x0f) << 16)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK5(x) (((x) & 0x0f) << 20)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK6(x) (((x) & 0x0f) << 24)
+#define VEPU_REG_VP8_ENC_CTRL2 0x124
+#define VEPU_REG_VP8_ZERO_MV_PENALTY_FOR_REF2(x) (((x) & 0xff) << 24)
+#define VEPU_REG_VP8_FILTER_SHARPNESS(x) (((x) & 0x07) << 21)
+#define VEPU_REG_VP8_FILTER_LEVEL(x) (((x) & 0x3f) << 15)
+#define VEPU_REG_VP8_DCT_PARTITION_CNT(x) (((x) & 0x03) << 13)
+#define VEPU_REG_VP8_BOOL_ENC_VALUE_BITS(x) (((x) & 0x1f) << 8)
+#define VEPU_REG_VP8_BOOL_ENC_RANGE(x) (((x) & 0xff) << 0)
+#define VEPU_REG_ENC_CTRL1 0x128
+#define VEPU_REG_MAD_THRESHOLD(x) (((x) & 0x3f) << 24)
+#define VEPU_REG_COMPLETED_SLICES(x) (((x) & 0xff) << 16)
+#define VEPU_REG_IN_IMG_CTRL_FMT(x) (((x) & 0xf) << 4)
+#define VEPU_REG_IN_IMG_ROTATE_MODE(x) (((x) & 0x3) << 2)
+#define VEPU_REG_SIZE_TABLE_PRESENT BIT(0)
+#define VEPU_REG_INTRA_INTER_MODE 0x12c
+#define VEPU_REG_INTRA16X16_MODE(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_INTER_MODE(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_ENC_CTRL2 0x130
+#define VEPU_REG_PPS_INIT_QP(x) (((x) & 0x3f) << 26)
+#define VEPU_REG_SLICE_FILTER_ALPHA(x) (((x) & 0xf) << 22)
+#define VEPU_REG_SLICE_FILTER_BETA(x) (((x) & 0xf) << 18)
+#define VEPU_REG_CHROMA_QP_OFFSET(x) (((x) & 0x1f) << 13)
+#define VEPU_REG_FILTER_DISABLE BIT(5)
+#define VEPU_REG_IDR_PIC_ID(x) (((x) & 0xf) << 1)
+#define VEPU_REG_CONSTRAINED_INTRA_PREDICTION BIT(0)
+#define VEPU_REG_ADDR_OUTPUT_STREAM 0x134
+#define VEPU_REG_ADDR_OUTPUT_CTRL 0x138
+#define VEPU_REG_ADDR_NEXT_PIC 0x13c
+#define VEPU_REG_ADDR_MV_OUT 0x140
+#define VEPU_REG_ADDR_CABAC_TBL 0x144
+#define VEPU_REG_ROI1 0x148
+#define VEPU_REG_ROI1_TOP_MB(x) (((x) & 0xff) << 24)
+#define VEPU_REG_ROI1_BOTTOM_MB(x) (((x) & 0xff) << 16)
+#define VEPU_REG_ROI1_LEFT_MB(x) (((x) & 0xff) << 8)
+#define VEPU_REG_ROI1_RIGHT_MB(x) (((x) & 0xff) << 0)
+#define VEPU_REG_ROI2 0x14c
+#define VEPU_REG_ROI2_TOP_MB(x) (((x) & 0xff) << 24)
+#define VEPU_REG_ROI2_BOTTOM_MB(x) (((x) & 0xff) << 16)
+#define VEPU_REG_ROI2_LEFT_MB(x) (((x) & 0xff) << 8)
+#define VEPU_REG_ROI2_RIGHT_MB(x) (((x) & 0xff) << 0)
+#define VEPU_REG_STABLE_MATRIX(i) (0x150 + ((i) * 0x4))
+#define VEPU_REG_STABLE_MOTION_SUM 0x174
+#define VEPU_REG_STABILIZATION_OUTPUT 0x178
+#define VEPU_REG_STABLE_MIN_VALUE(x) (((x) & 0xffffff) << 8)
+#define VEPU_REG_STABLE_MODE_SEL(x) (((x) & 0x3) << 6)
+#define VEPU_REG_STABLE_HOR_GMV(x) (((x) & 0x3f) << 0)
+#define VEPU_REG_RGB2YUV_CONVERSION_COEF1 0x17c
+#define VEPU_REG_RGB2YUV_CONVERSION_COEFB(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_RGB2YUV_CONVERSION_COEFA(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_RGB2YUV_CONVERSION_COEF2 0x180
+#define VEPU_REG_RGB2YUV_CONVERSION_COEFE(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_RGB2YUV_CONVERSION_COEFC(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_RGB2YUV_CONVERSION_COEF3 0x184
+#define VEPU_REG_RGB2YUV_CONVERSION_COEFF(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_RGB_MASK_MSB 0x188
+#define VEPU_REG_RGB_MASK_B_MSB(x) (((x) & 0x1f) << 16)
+#define VEPU_REG_RGB_MASK_G_MSB(x) (((x) & 0x1f) << 8)
+#define VEPU_REG_RGB_MASK_R_MSB(x) (((x) & 0x1f) << 0)
+#define VEPU_REG_MV_PENALTY 0x18c
+#define VEPU_REG_1MV_PENALTY(x) (((x) & 0x3ff) << 21)
+#define VEPU_REG_QMV_PENALTY(x) (((x) & 0x3ff) << 11)
+#define VEPU_REG_4MV_PENALTY(x) (((x) & 0x3ff) << 1)
+#define VEPU_REG_SPLIT_MV_MODE_EN BIT(0)
+#define VEPU_REG_QP_VAL 0x190
+#define VEPU_REG_H264_LUMA_INIT_QP(x) (((x) & 0x3f) << 26)
+#define VEPU_REG_H264_QP_MAX(x) (((x) & 0x3f) << 20)
+#define VEPU_REG_H264_QP_MIN(x) (((x) & 0x3f) << 14)
+#define VEPU_REG_H264_CHKPT_DISTANCE(x) (((x) & 0xfff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_DC_Y1 0x190
+#define VEPU_REG_VP8_SEG0_RND_DC_Y1(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_DC_Y1(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_DC_Y1(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_MVC_RELATE 0x198
+#define VEPU_REG_ZERO_MV_FAVOR_D2(x) (((x) & 0xf) << 20)
+#define VEPU_REG_PENALTY_4X4MV(x) (((x) & 0x1ff) << 11)
+#define VEPU_REG_MVC_VIEW_ID(x) (((x) & 0x7) << 8)
+#define VEPU_REG_MVC_ANCHOR_PIC_FLAG BIT(7)
+#define VEPU_REG_MVC_PRIORITY_ID(x) (((x) & 0x7) << 4)
+#define VEPU_REG_MVC_TEMPORAL_ID(x) (((x) & 0x7) << 1)
+#define VEPU_REG_MVC_INTER_VIEW_FLAG BIT(0)
+#define VEPU_REG_ENCODE_START 0x19c
+#define VEPU_REG_MB_HEIGHT(x) (((x) & 0x1ff) << 20)
+#define VEPU_REG_MB_WIDTH(x) (((x) & 0x1ff) << 8)
+#define VEPU_REG_FRAME_TYPE_INTER (0x0 << 6)
+#define VEPU_REG_FRAME_TYPE_INTRA (0x1 << 6)
+#define VEPU_REG_FRAME_TYPE_MVCINTER (0x2 << 6)
+#define VEPU_REG_ENCODE_FORMAT_JPEG (0x2 << 4)
+#define VEPU_REG_ENCODE_FORMAT_H264 (0x3 << 4)
+#define VEPU_REG_ENCODE_ENABLE BIT(0)
+#define VEPU_REG_MB_CTRL 0x1a0
+#define VEPU_REG_MB_CNT_OUT(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_MB_CNT_SET(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_DATA_ENDIAN 0x1a4
+#define VEPU_REG_INPUT_SWAP8 BIT(31)
+#define VEPU_REG_INPUT_SWAP16 BIT(30)
+#define VEPU_REG_INPUT_SWAP32 BIT(29)
+#define VEPU_REG_OUTPUT_SWAP8 BIT(28)
+#define VEPU_REG_OUTPUT_SWAP16 BIT(27)
+#define VEPU_REG_OUTPUT_SWAP32 BIT(26)
+#define VEPU_REG_TEST_IRQ BIT(24)
+#define VEPU_REG_TEST_COUNTER(x) (((x) & 0xf) << 20)
+#define VEPU_REG_TEST_REG BIT(19)
+#define VEPU_REG_TEST_MEMORY BIT(18)
+#define VEPU_REG_TEST_LEN(x) (((x) & 0x3ffff) << 0)
+#define VEPU_REG_ENC_CTRL3 0x1a8
+#define VEPU_REG_PPS_ID(x) (((x) & 0xff) << 24)
+#define VEPU_REG_INTRA_PRED_MODE(x) (((x) & 0xff) << 16)
+#define VEPU_REG_FRAME_NUM(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_ENC_CTRL4 0x1ac
+#define VEPU_REG_MV_PENALTY_16X8_8X16(x) (((x) & 0x3ff) << 20)
+#define VEPU_REG_MV_PENALTY_8X8(x) (((x) & 0x3ff) << 10)
+#define VEPU_REG_MV_PENALTY_8X4_4X8(x) (((x) & 0x3ff) << 0)
+#define VEPU_REG_ADDR_VP8_PROB_CNT 0x1b0
+#define VEPU_REG_INTERRUPT 0x1b4
+#define VEPU_REG_INTERRUPT_NON BIT(28)
+#define VEPU_REG_MV_WRITE_EN BIT(24)
+#define VEPU_REG_RECON_WRITE_DIS BIT(20)
+#define VEPU_REG_INTERRUPT_SLICE_READY_EN BIT(16)
+#define VEPU_REG_CLK_GATING_EN BIT(12)
+#define VEPU_REG_INTERRUPT_TIMEOUT_EN BIT(10)
+#define VEPU_REG_INTERRUPT_RESET BIT(9)
+#define VEPU_REG_INTERRUPT_DIS_BIT BIT(8)
+#define VEPU_REG_INTERRUPT_TIMEOUT BIT(6)
+#define VEPU_REG_INTERRUPT_BUFFER_FULL BIT(5)
+#define VEPU_REG_INTERRUPT_BUS_ERROR BIT(4)
+#define VEPU_REG_INTERRUPT_FUSE BIT(3)
+#define VEPU_REG_INTERRUPT_SLICE_READY BIT(2)
+#define VEPU_REG_INTERRUPT_FRAME_READY BIT(1)
+#define VEPU_REG_INTERRUPT_BIT BIT(0)
+#define VEPU_REG_DMV_PENALTY_TBL(i) (0x1E0 + ((i) * 0x4))
+#define VEPU_REG_DMV_PENALTY_TABLE_BIT(x, i) ((x) << (i) * 8)
+#define VEPU_REG_DMV_Q_PIXEL_PENALTY_TBL(i) (0x260 + ((i) * 0x4))
+#define VEPU_REG_DMV_Q_PIXEL_PENALTY_TABLE_BIT(x, i) ((x) << (i) * 8)
+
+/* vpu decoder register */
+#define VDPU_REG_DEC_CTRL0 0x0c8 // 50
+#define VDPU_REG_REF_BUF_CTRL2_REFBU2_PICID(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_REF_BUF_CTRL2_REFBU2_THR(x) (((x) & 0xfff) << 13)
+#define VDPU_REG_CONFIG_TILED_MODE_LSB BIT(12)
+#define VDPU_REG_CONFIG_DEC_ADV_PRE_DIS BIT(11)
+#define VDPU_REG_CONFIG_DEC_SCMD_DIS BIT(10)
+#define VDPU_REG_DEC_CTRL0_SKIP_MODE BIT(9)
+#define VDPU_REG_DEC_CTRL0_FILTERING_DIS BIT(8)
+#define VDPU_REG_DEC_CTRL0_PIC_FIXED_QUANT BIT(7)
+#define VDPU_REG_CONFIG_DEC_LATENCY(x) (((x) & 0x3f) << 1)
+#define VDPU_REG_CONFIG_TILED_MODE_MSB(x) BIT(0)
+#define VDPU_REG_CONFIG_DEC_OUT_TILED_E BIT(0)
+#define VDPU_REG_STREAM_LEN 0x0cc
+#define VDPU_REG_DEC_CTRL3_INIT_QP(x) (((x) & 0x3f) << 25)
+#define VDPU_REG_DEC_STREAM_LEN_HI BIT(24)
+#define VDPU_REG_DEC_CTRL3_STREAM_LEN(x) (((x) & 0xffffff) << 0)
+#define VDPU_REG_ERROR_CONCEALMENT 0x0d0
+#define VDPU_REG_REF_BUF_CTRL2_APF_THRESHOLD(x) (((x) & 0x3fff) << 17)
+#define VDPU_REG_ERR_CONC_STARTMB_X(x) (((x) & 0x1ff) << 8)
+#define VDPU_REG_ERR_CONC_STARTMB_Y(x) (((x) & 0xff) << 0)
+#define VDPU_REG_DEC_FORMAT 0x0d4
+#define VDPU_REG_DEC_CTRL0_DEC_MODE(x) (((x) & 0xf) << 0)
+#define VDPU_REG_DATA_ENDIAN 0x0d8
+#define VDPU_REG_CONFIG_DEC_STRENDIAN_E BIT(5)
+#define VDPU_REG_CONFIG_DEC_STRSWAP32_E BIT(4)
+#define VDPU_REG_CONFIG_DEC_OUTSWAP32_E BIT(3)
+#define VDPU_REG_CONFIG_DEC_INSWAP32_E BIT(2)
+#define VDPU_REG_CONFIG_DEC_OUT_ENDIAN BIT(1)
+#define VDPU_REG_CONFIG_DEC_IN_ENDIAN BIT(0)
+#define VDPU_REG_INTERRUPT 0x0dc
+#define VDPU_REG_INTERRUPT_DEC_TIMEOUT BIT(13)
+#define VDPU_REG_INTERRUPT_DEC_ERROR_INT BIT(12)
+#define VDPU_REG_INTERRUPT_DEC_PIC_INF BIT(10)
+#define VDPU_REG_INTERRUPT_DEC_SLICE_INT BIT(9)
+#define VDPU_REG_INTERRUPT_DEC_ASO_INT BIT(8)
+#define VDPU_REG_INTERRUPT_DEC_BUFFER_INT BIT(6)
+#define VDPU_REG_INTERRUPT_DEC_BUS_INT BIT(5)
+#define VDPU_REG_INTERRUPT_DEC_RDY_INT BIT(4)
+#define VDPU_REG_INTERRUPT_DEC_IRQ_DIS BIT(1)
+#define VDPU_REG_INTERRUPT_DEC_IRQ BIT(0)
+#define VDPU_REG_AXI_CTRL 0x0e0
+#define VDPU_REG_AXI_DEC_SEL BIT(23)
+#define VDPU_REG_CONFIG_DEC_DATA_DISC_E BIT(22)
+#define VDPU_REG_PARAL_BUS_E(x) BIT(21)
+#define VDPU_REG_CONFIG_DEC_MAX_BURST(x) (((x) & 0x1f) << 16)
+#define VDPU_REG_DEC_CTRL0_DEC_AXI_WR_ID(x) (((x) & 0xff) << 8)
+#define VDPU_REG_CONFIG_DEC_AXI_RD_ID(x) (((x) & 0xff) << 0)
+#define VDPU_REG_EN_FLAGS 0x0e4
+#define VDPU_REG_AHB_HLOCK_E BIT(31)
+#define VDPU_REG_CACHE_E BIT(29)
+#define VDPU_REG_PREFETCH_SINGLE_CHANNEL_E BIT(28)
+#define VDPU_REG_INTRA_3_CYCLE_ENHANCE BIT(27)
+#define VDPU_REG_INTRA_DOUBLE_SPEED BIT(26)
+#define VDPU_REG_INTER_DOUBLE_SPEED BIT(25)
+#define VDPU_REG_DEC_CTRL3_START_CODE_E BIT(22)
+#define VDPU_REG_DEC_CTRL3_CH_8PIX_ILEAV_E BIT(21)
+#define VDPU_REG_DEC_CTRL0_RLC_MODE_E BIT(20)
+#define VDPU_REG_DEC_CTRL0_DIVX3_E BIT(19)
+#define VDPU_REG_DEC_CTRL0_PJPEG_E BIT(18)
+#define VDPU_REG_DEC_CTRL0_PIC_INTERLACE_E BIT(17)
+#define VDPU_REG_DEC_CTRL0_PIC_FIELDMODE_E BIT(16)
+#define VDPU_REG_DEC_CTRL0_PIC_B_E BIT(15)
+#define VDPU_REG_DEC_CTRL0_PIC_INTER_E BIT(14)
+#define VDPU_REG_DEC_CTRL0_PIC_TOPFIELD_E BIT(13)
+#define VDPU_REG_DEC_CTRL0_FWD_INTERLACE_E BIT(12)
+#define VDPU_REG_DEC_CTRL0_SORENSON_E BIT(11)
+#define VDPU_REG_DEC_CTRL0_WRITE_MVS_E BIT(10)
+#define VDPU_REG_DEC_CTRL0_REF_TOPFIELD_E BIT(9)
+#define VDPU_REG_DEC_CTRL0_REFTOPFIRST_E BIT(8)
+#define VDPU_REG_DEC_CTRL0_SEQ_MBAFF_E BIT(7)
+#define VDPU_REG_DEC_CTRL0_PICORD_COUNT_E BIT(6)
+#define VDPU_REG_CONFIG_DEC_TIMEOUT_E BIT(5)
+#define VDPU_REG_CONFIG_DEC_CLK_GATE_E BIT(4)
+#define VDPU_REG_DEC_CTRL0_DEC_OUT_DIS BIT(2)
+#define VDPU_REG_REF_BUF_CTRL2_REFBU2_BUF_E BIT(1)
+#define VDPU_REG_INTERRUPT_DEC_E BIT(0)
+#define VDPU_REG_SOFT_RESET 0x0e8
+#define VDPU_REG_PRED_FLT 0x0ec
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_0(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_1(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_2(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_ADDITIONAL_CHROMA_ADDRESS 0x0f0
+#define VDPU_REG_ADDR_QTABLE 0x0f4
+#define VDPU_REG_DIRECT_MV_ADDR 0x0f8
+#define VDPU_REG_ADDR_DST 0x0fc
+#define VDPU_REG_ADDR_STR 0x100
+#define VDPU_REG_REFBUF_RELATED 0x104
+#define VDPU_REG_FWD_PIC(i) (0x128 + ((i) * 0x4))
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F5(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F4(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_REF_PIC(i) (0x130 + ((i) * 0x4))
+#define VDPU_REG_REF_PIC_REFER1_NBR(x) (((x) & 0xffff) << 16)
+#define VDPU_REG_REF_PIC_REFER0_NBR(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_H264_ADDR_REF(i) (0x150 + ((i) * 0x4))
+#define VDPU_REG_ADDR_REF_FIELD_E BIT(1)
+#define VDPU_REG_ADDR_REF_TOPC_E BIT(0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST0 0x190
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F5(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F4(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST1 0x194
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F11(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F10(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F9(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F8(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F7(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F6(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST2 0x198
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F15(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F14(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F13(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F12(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST3 0x19c
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B5(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B4(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B3(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B2(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B1(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B0(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST4 0x1a0
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B11(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B10(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B9(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B8(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B7(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B6(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST5 0x1a4
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B15(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B14(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B13(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B12(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST6 0x1a8
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_LT_REF 0x1ac
+#define VDPU_REG_VALID_REF 0x1b0
+#define VDPU_REG_H264_PIC_MB_SIZE 0x1b8
+#define VDPU_REG_DEC_CTRL2_CH_QP_OFFSET2(x) (((x) & 0x1f) << 22)
+#define VDPU_REG_DEC_CTRL2_CH_QP_OFFSET(x) (((x) & 0x1f) << 17)
+#define VDPU_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(x) (((x) & 0xff) << 9)
+#define VDPU_REG_DEC_CTRL1_PIC_MB_WIDTH(x) (((x) & 0x1ff) << 0)
+#define VDPU_REG_H264_CTRL 0x1bc
+#define VDPU_REG_DEC_CTRL4_WEIGHT_BIPR_IDC(x) (((x) & 0x3) << 16)
+#define VDPU_REG_DEC_CTRL1_REF_FRAMES(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_CURRENT_FRAME 0x1c0
+#define VDPU_REG_DEC_CTRL5_FILT_CTRL_PRES BIT(31)
+#define VDPU_REG_DEC_CTRL5_RDPIC_CNT_PRES BIT(30)
+#define VDPU_REG_DEC_CTRL4_FRAMENUM_LEN(x) (((x) & 0x1f) << 16)
+#define VDPU_REG_DEC_CTRL4_FRAMENUM(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_REF_FRAME 0x1c4
+#define VDPU_REG_DEC_CTRL5_REFPIC_MK_LEN(x) (((x) & 0x7ff) << 16)
+#define VDPU_REG_DEC_CTRL5_IDR_PIC_ID(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_DEC_CTRL6 0x1c8
+#define VDPU_REG_DEC_CTRL6_PPS_ID(x) (((x) & 0xff) << 24)
+#define VDPU_REG_DEC_CTRL6_REFIDX1_ACTIVE(x) (((x) & 0x1f) << 19)
+#define VDPU_REG_DEC_CTRL6_REFIDX0_ACTIVE(x) (((x) & 0x1f) << 14)
+#define VDPU_REG_DEC_CTRL6_POC_LENGTH(x) (((x) & 0xff) << 0)
+#define VDPU_REG_ENABLE_FLAG 0x1cc
+#define VDPU_REG_DEC_CTRL5_IDR_PIC_E BIT(8)
+#define VDPU_REG_DEC_CTRL4_DIR_8X8_INFER_E BIT(7)
+#define VDPU_REG_DEC_CTRL4_BLACKWHITE_E BIT(6)
+#define VDPU_REG_DEC_CTRL4_CABAC_E BIT(5)
+#define VDPU_REG_DEC_CTRL4_WEIGHT_PRED_E BIT(4)
+#define VDPU_REG_DEC_CTRL5_CONST_INTRA_E BIT(3)
+#define VDPU_REG_DEC_CTRL5_8X8TRANS_FLAG_E BIT(2)
+#define VDPU_REG_DEC_CTRL2_TYPE1_QUANT_E BIT(1)
+#define VDPU_REG_DEC_CTRL2_FIELDPIC_FLAG_E BIT(0)
+#define VDPU_REG_VP8_PIC_MB_SIZE 0x1e0
+#define VDPU_REG_DEC_PIC_MB_WIDTH(x) (((x) & 0x1ff) << 23)
+#define VDPU_REG_DEC_MB_WIDTH_OFF(x) (((x) & 0xf) << 19)
+#define VDPU_REG_DEC_PIC_MB_HEIGHT_P(x) (((x) & 0xff) << 11)
+#define VDPU_REG_DEC_MB_HEIGHT_OFF(x) (((x) & 0xf) << 7)
+#define VDPU_REG_DEC_CTRL1_PIC_MB_W_EXT(x) (((x) & 0x7) << 3)
+#define VDPU_REG_DEC_CTRL1_PIC_MB_H_EXT(x) (((x) & 0x7) << 0)
+#define VDPU_REG_VP8_DCT_START_BIT 0x1e4
+#define VDPU_REG_DEC_CTRL4_DCT1_START_BIT(x) (((x) & 0x3f) << 26)
+#define VDPU_REG_DEC_CTRL4_DCT2_START_BIT(x) (((x) & 0x3f) << 20)
+#define VDPU_REG_DEC_CTRL4_VC1_HEIGHT_EXT BIT(13)
+#define VDPU_REG_DEC_CTRL4_BILIN_MC_E BIT(12)
+#define VDPU_REG_VP8_CTRL0 0x1e8
+#define VDPU_REG_DEC_CTRL2_STRM_START_BIT(x) (((x) & 0x3f) << 26)
+#define VDPU_REG_DEC_CTRL2_STRM1_START_BIT(x) (((x) & 0x3f) << 18)
+#define VDPU_REG_DEC_CTRL2_BOOLEAN_VALUE(x) (((x) & 0xff) << 8)
+#define VDPU_REG_DEC_CTRL2_BOOLEAN_RANGE(x) (((x) & 0xff) << 0)
+#define VDPU_REG_VP8_DATA_VAL 0x1f0
+#define VDPU_REG_DEC_CTRL6_COEFFS_PART_AM(x) (((x) & 0xf) << 24)
+#define VDPU_REG_DEC_CTRL6_STREAM1_LEN(x) (((x) & 0xffffff) << 0)
+#define VDPU_REG_PRED_FLT7 0x1f4
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_5_1(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_5_2(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_5_3(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT8 0x1f8
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_6_0(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_6_1(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_6_2(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT9 0x1fc
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_6_3(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_7_0(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_7_1(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT10 0x200
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_7_2(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_7_3(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_2_M1(x) (((x) & 0x3) << 10)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_2_4(x) (((x) & 0x3) << 8)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_4_M1(x) (((x) & 0x3) << 6)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_4_4(x) (((x) & 0x3) << 4)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_6_M1(x) (((x) & 0x3) << 2)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_6_4(x) (((x) & 0x3) << 0)
+#define VDPU_REG_FILTER_LEVEL 0x204
+#define VDPU_REG_REF_PIC_LF_LEVEL_0(x) (((x) & 0x3f) << 18)
+#define VDPU_REG_REF_PIC_LF_LEVEL_1(x) (((x) & 0x3f) << 12)
+#define VDPU_REG_REF_PIC_LF_LEVEL_2(x) (((x) & 0x3f) << 6)
+#define VDPU_REG_REF_PIC_LF_LEVEL_3(x) (((x) & 0x3f) << 0)
+#define VDPU_REG_VP8_QUANTER0 0x208
+#define VDPU_REG_REF_PIC_QUANT_DELTA_0(x) (((x) & 0x1f) << 27)
+#define VDPU_REG_REF_PIC_QUANT_DELTA_1(x) (((x) & 0x1f) << 22)
+#define VDPU_REG_REF_PIC_QUANT_0(x) (((x) & 0x7ff) << 11)
+#define VDPU_REG_REF_PIC_QUANT_1(x) (((x) & 0x7ff) << 0)
+#define VDPU_REG_VP8_ADDR_REF0 0x20c
+#define VDPU_REG_FILTER_MB_ADJ 0x210
+#define VDPU_REG_REF_PIC_FILT_TYPE_E BIT(31)
+#define VDPU_REG_REF_PIC_FILT_SHARPNESS(x) (((x) & 0x7) << 28)
+#define VDPU_REG_FILT_MB_ADJ_0(x) (((x) & 0x7f) << 21)
+#define VDPU_REG_FILT_MB_ADJ_1(x) (((x) & 0x7f) << 14)
+#define VDPU_REG_FILT_MB_ADJ_2(x) (((x) & 0x7f) << 7)
+#define VDPU_REG_FILT_MB_ADJ_3(x) (((x) & 0x7f) << 0)
+#define VDPU_REG_FILTER_REF_ADJ 0x214
+#define VDPU_REG_REF_PIC_ADJ_0(x) (((x) & 0x7f) << 21)
+#define VDPU_REG_REF_PIC_ADJ_1(x) (((x) & 0x7f) << 14)
+#define VDPU_REG_REF_PIC_ADJ_2(x) (((x) & 0x7f) << 7)
+#define VDPU_REG_REF_PIC_ADJ_3(x) (((x) & 0x7f) << 0)
+#define VDPU_REG_VP8_ADDR_REF2_5(i) (0x218 + ((i) * 0x4))
+#define VDPU_REG_VP8_GREF_SIGN_BIAS BIT(0)
+#define VDPU_REG_VP8_AREF_SIGN_BIAS BIT(0)
+#define VDPU_REG_VP8_DCT_BASE(i) (0x230 + ((i) * 0x4))
+#define VDPU_REG_VP8_ADDR_CTRL_PART 0x244
+#define VDPU_REG_VP8_ADDR_REF1 0x250
+#define VDPU_REG_VP8_SEGMENT_VAL 0x254
+#define VDPU_REG_FWD_PIC1_SEGMENT_BASE(x) ((x) << 0)
+#define VDPU_REG_FWD_PIC1_SEGMENT_UPD_E BIT(1)
+#define VDPU_REG_FWD_PIC1_SEGMENT_E BIT(0)
+#define VDPU_REG_VP8_DCT_START_BIT2 0x258
+#define VDPU_REG_DEC_CTRL7_DCT3_START_BIT(x) (((x) & 0x3f) << 24)
+#define VDPU_REG_DEC_CTRL7_DCT4_START_BIT(x) (((x) & 0x3f) << 18)
+#define VDPU_REG_DEC_CTRL7_DCT5_START_BIT(x) (((x) & 0x3f) << 12)
+#define VDPU_REG_DEC_CTRL7_DCT6_START_BIT(x) (((x) & 0x3f) << 6)
+#define VDPU_REG_DEC_CTRL7_DCT7_START_BIT(x) (((x) & 0x3f) << 0)
+#define VDPU_REG_VP8_QUANTER1 0x25c
+#define VDPU_REG_REF_PIC_QUANT_DELTA_2(x) (((x) & 0x1f) << 27)
+#define VDPU_REG_REF_PIC_QUANT_DELTA_3(x) (((x) & 0x1f) << 22)
+#define VDPU_REG_REF_PIC_QUANT_2(x) (((x) & 0x7ff) << 11)
+#define VDPU_REG_REF_PIC_QUANT_3(x) (((x) & 0x7ff) << 0)
+#define VDPU_REG_VP8_QUANTER2 0x260
+#define VDPU_REG_REF_PIC_QUANT_DELTA_4(x) (((x) & 0x1f) << 27)
+#define VDPU_REG_REF_PIC_QUANT_4(x) (((x) & 0x7ff) << 11)
+#define VDPU_REG_REF_PIC_QUANT_5(x) (((x) & 0x7ff) << 0)
+#define VDPU_REG_PRED_FLT1 0x264
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_3(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_1_0(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_1_1(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT2 0x268
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_1_2(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_1_3(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_2_0(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT3 0x26c
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_2_1(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_2_2(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_2_3(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT4 0x270
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_3_0(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_3_1(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_3_2(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT5 0x274
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_3_3(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_4_0(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_4_1(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT6 0x278
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_4_2(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_4_3(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_5_0(x) (((x) & 0x3ff) << 2)
+
+#endif /* ROCKCHIP_VPU2_REGS_H_ */
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
new file mode 100644
index 0000000000..cc44838574
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c
@@ -0,0 +1,2232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Collabora
+ *
+ * Author: Benjamin Gaignard <benjamin.gaignard@collabora.com>
+ */
+
+#include <media/v4l2-mem2mem.h>
+#include "hantro.h"
+#include "hantro_v4l2.h"
+#include "rockchip_vpu981_regs.h"
+
+#define AV1_DEC_MODE 17
+#define GM_GLOBAL_MODELS_PER_FRAME 7
+#define GLOBAL_MODEL_TOTAL_SIZE (6 * 4 + 4 * 2)
+#define GLOBAL_MODEL_SIZE ALIGN(GM_GLOBAL_MODELS_PER_FRAME * GLOBAL_MODEL_TOTAL_SIZE, 2048)
+#define AV1_MAX_TILES 128
+#define AV1_TILE_INFO_SIZE (AV1_MAX_TILES * 16)
+#define AV1DEC_MAX_PIC_BUFFERS 24
+#define AV1_REF_SCALE_SHIFT 14
+#define AV1_INVALID_IDX -1
+#define MAX_FRAME_DISTANCE 31
+#define AV1_PRIMARY_REF_NONE 7
+#define AV1_TILE_SIZE ALIGN(32 * 128, 4096)
+/*
+ * These 3 values aren't defined enum v4l2_av1_segment_feature because
+ * they are not part of the specification
+ */
+#define V4L2_AV1_SEG_LVL_ALT_LF_Y_H 2
+#define V4L2_AV1_SEG_LVL_ALT_LF_U 3
+#define V4L2_AV1_SEG_LVL_ALT_LF_V 4
+
+#define SUPERRES_SCALE_BITS 3
+#define SCALE_NUMERATOR 8
+#define SUPERRES_SCALE_DENOMINATOR_MIN (SCALE_NUMERATOR + 1)
+
+#define RS_SUBPEL_BITS 6
+#define RS_SUBPEL_MASK ((1 << RS_SUBPEL_BITS) - 1)
+#define RS_SCALE_SUBPEL_BITS 14
+#define RS_SCALE_SUBPEL_MASK ((1 << RS_SCALE_SUBPEL_BITS) - 1)
+#define RS_SCALE_EXTRA_BITS (RS_SCALE_SUBPEL_BITS - RS_SUBPEL_BITS)
+#define RS_SCALE_EXTRA_OFF (1 << (RS_SCALE_EXTRA_BITS - 1))
+
+#define IS_INTRA(type) ((type == V4L2_AV1_KEY_FRAME) || (type == V4L2_AV1_INTRA_ONLY_FRAME))
+
+#define LST_BUF_IDX (V4L2_AV1_REF_LAST_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define LST2_BUF_IDX (V4L2_AV1_REF_LAST2_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define LST3_BUF_IDX (V4L2_AV1_REF_LAST3_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define GLD_BUF_IDX (V4L2_AV1_REF_GOLDEN_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define BWD_BUF_IDX (V4L2_AV1_REF_BWDREF_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define ALT2_BUF_IDX (V4L2_AV1_REF_ALTREF2_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define ALT_BUF_IDX (V4L2_AV1_REF_ALTREF_FRAME - V4L2_AV1_REF_LAST_FRAME)
+
+#define DIV_LUT_PREC_BITS 14
+#define DIV_LUT_BITS 8
+#define DIV_LUT_NUM BIT(DIV_LUT_BITS)
+#define WARP_PARAM_REDUCE_BITS 6
+#define WARPEDMODEL_PREC_BITS 16
+
+#define AV1_DIV_ROUND_UP_POW2(value, n) \
+({ \
+ typeof(n) _n = n; \
+ typeof(value) _value = value; \
+ (_value + (BIT(_n) >> 1)) >> _n; \
+})
+
+#define AV1_DIV_ROUND_UP_POW2_SIGNED(value, n) \
+({ \
+ typeof(n) _n_ = n; \
+ typeof(value) _value_ = value; \
+ (((_value_) < 0) ? -AV1_DIV_ROUND_UP_POW2(-(_value_), (_n_)) \
+ : AV1_DIV_ROUND_UP_POW2((_value_), (_n_))); \
+})
+
+struct rockchip_av1_film_grain {
+ u8 scaling_lut_y[256];
+ u8 scaling_lut_cb[256];
+ u8 scaling_lut_cr[256];
+ s16 cropped_luma_grain_block[4096];
+ s16 cropped_chroma_grain_block[1024 * 2];
+};
+
+static const short div_lut[DIV_LUT_NUM + 1] = {
+ 16384, 16320, 16257, 16194, 16132, 16070, 16009, 15948, 15888, 15828, 15768,
+ 15709, 15650, 15592, 15534, 15477, 15420, 15364, 15308, 15252, 15197, 15142,
+ 15087, 15033, 14980, 14926, 14873, 14821, 14769, 14717, 14665, 14614, 14564,
+ 14513, 14463, 14413, 14364, 14315, 14266, 14218, 14170, 14122, 14075, 14028,
+ 13981, 13935, 13888, 13843, 13797, 13752, 13707, 13662, 13618, 13574, 13530,
+ 13487, 13443, 13400, 13358, 13315, 13273, 13231, 13190, 13148, 13107, 13066,
+ 13026, 12985, 12945, 12906, 12866, 12827, 12788, 12749, 12710, 12672, 12633,
+ 12596, 12558, 12520, 12483, 12446, 12409, 12373, 12336, 12300, 12264, 12228,
+ 12193, 12157, 12122, 12087, 12053, 12018, 11984, 11950, 11916, 11882, 11848,
+ 11815, 11782, 11749, 11716, 11683, 11651, 11619, 11586, 11555, 11523, 11491,
+ 11460, 11429, 11398, 11367, 11336, 11305, 11275, 11245, 11215, 11185, 11155,
+ 11125, 11096, 11067, 11038, 11009, 10980, 10951, 10923, 10894, 10866, 10838,
+ 10810, 10782, 10755, 10727, 10700, 10673, 10645, 10618, 10592, 10565, 10538,
+ 10512, 10486, 10460, 10434, 10408, 10382, 10356, 10331, 10305, 10280, 10255,
+ 10230, 10205, 10180, 10156, 10131, 10107, 10082, 10058, 10034, 10010, 9986,
+ 9963, 9939, 9916, 9892, 9869, 9846, 9823, 9800, 9777, 9754, 9732,
+ 9709, 9687, 9664, 9642, 9620, 9598, 9576, 9554, 9533, 9511, 9489,
+ 9468, 9447, 9425, 9404, 9383, 9362, 9341, 9321, 9300, 9279, 9259,
+ 9239, 9218, 9198, 9178, 9158, 9138, 9118, 9098, 9079, 9059, 9039,
+ 9020, 9001, 8981, 8962, 8943, 8924, 8905, 8886, 8867, 8849, 8830,
+ 8812, 8793, 8775, 8756, 8738, 8720, 8702, 8684, 8666, 8648, 8630,
+ 8613, 8595, 8577, 8560, 8542, 8525, 8508, 8490, 8473, 8456, 8439,
+ 8422, 8405, 8389, 8372, 8355, 8339, 8322, 8306, 8289, 8273, 8257,
+ 8240, 8224, 8208, 8192,
+};
+
+static int rockchip_vpu981_get_frame_index(struct hantro_ctx *ctx, int ref)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ u64 timestamp;
+ int i, idx = frame->ref_frame_idx[ref];
+
+ if (idx >= V4L2_AV1_TOTAL_REFS_PER_FRAME || idx < 0)
+ return AV1_INVALID_IDX;
+
+ timestamp = frame->reference_frame_ts[idx];
+ for (i = 0; i < AV1_MAX_FRAME_BUF_COUNT; i++) {
+ if (!av1_dec->frame_refs[i].used)
+ continue;
+ if (av1_dec->frame_refs[i].timestamp == timestamp)
+ return i;
+ }
+
+ return AV1_INVALID_IDX;
+}
+
+static int rockchip_vpu981_get_order_hint(struct hantro_ctx *ctx, int ref)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ int idx = rockchip_vpu981_get_frame_index(ctx, ref);
+
+ if (idx != AV1_INVALID_IDX)
+ return av1_dec->frame_refs[idx].order_hint;
+
+ return 0;
+}
+
+static int rockchip_vpu981_av1_dec_frame_ref(struct hantro_ctx *ctx,
+ u64 timestamp)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ int i;
+
+ for (i = 0; i < AV1_MAX_FRAME_BUF_COUNT; i++) {
+ int j;
+
+ if (av1_dec->frame_refs[i].used)
+ continue;
+
+ av1_dec->frame_refs[i].width = frame->frame_width_minus_1 + 1;
+ av1_dec->frame_refs[i].height = frame->frame_height_minus_1 + 1;
+ av1_dec->frame_refs[i].mi_cols = DIV_ROUND_UP(frame->frame_width_minus_1 + 1, 8);
+ av1_dec->frame_refs[i].mi_rows = DIV_ROUND_UP(frame->frame_height_minus_1 + 1, 8);
+ av1_dec->frame_refs[i].timestamp = timestamp;
+ av1_dec->frame_refs[i].frame_type = frame->frame_type;
+ av1_dec->frame_refs[i].order_hint = frame->order_hint;
+ if (!av1_dec->frame_refs[i].vb2_ref)
+ av1_dec->frame_refs[i].vb2_ref = hantro_get_dst_buf(ctx);
+
+ for (j = 0; j < V4L2_AV1_TOTAL_REFS_PER_FRAME; j++)
+ av1_dec->frame_refs[i].order_hints[j] = frame->order_hints[j];
+ av1_dec->frame_refs[i].used = true;
+ av1_dec->current_frame_index = i;
+
+ return i;
+ }
+
+ return AV1_INVALID_IDX;
+}
+
+static void rockchip_vpu981_av1_dec_frame_unref(struct hantro_ctx *ctx, int idx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+
+ if (idx >= 0)
+ av1_dec->frame_refs[idx].used = false;
+}
+
+static void rockchip_vpu981_av1_dec_clean_refs(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+
+ int ref, idx;
+
+ for (idx = 0; idx < AV1_MAX_FRAME_BUF_COUNT; idx++) {
+ u64 timestamp = av1_dec->frame_refs[idx].timestamp;
+ bool used = false;
+
+ if (!av1_dec->frame_refs[idx].used)
+ continue;
+
+ for (ref = 0; ref < V4L2_AV1_TOTAL_REFS_PER_FRAME; ref++) {
+ if (ctrls->frame->reference_frame_ts[ref] == timestamp)
+ used = true;
+ }
+
+ if (!used)
+ rockchip_vpu981_av1_dec_frame_unref(ctx, idx);
+ }
+}
+
+static size_t rockchip_vpu981_av1_dec_luma_size(struct hantro_ctx *ctx)
+{
+ return ctx->dst_fmt.width * ctx->dst_fmt.height * ctx->bit_depth / 8;
+}
+
+static size_t rockchip_vpu981_av1_dec_chroma_size(struct hantro_ctx *ctx)
+{
+ size_t cr_offset = rockchip_vpu981_av1_dec_luma_size(ctx);
+
+ return ALIGN((cr_offset * 3) / 2, 64);
+}
+
+static void rockchip_vpu981_av1_dec_tiles_free(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+
+ if (av1_dec->db_data_col.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->db_data_col.size,
+ av1_dec->db_data_col.cpu,
+ av1_dec->db_data_col.dma);
+ av1_dec->db_data_col.cpu = NULL;
+
+ if (av1_dec->db_ctrl_col.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->db_ctrl_col.size,
+ av1_dec->db_ctrl_col.cpu,
+ av1_dec->db_ctrl_col.dma);
+ av1_dec->db_ctrl_col.cpu = NULL;
+
+ if (av1_dec->cdef_col.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->cdef_col.size,
+ av1_dec->cdef_col.cpu, av1_dec->cdef_col.dma);
+ av1_dec->cdef_col.cpu = NULL;
+
+ if (av1_dec->sr_col.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->sr_col.size,
+ av1_dec->sr_col.cpu, av1_dec->sr_col.dma);
+ av1_dec->sr_col.cpu = NULL;
+
+ if (av1_dec->lr_col.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->lr_col.size,
+ av1_dec->lr_col.cpu, av1_dec->lr_col.dma);
+ av1_dec->lr_col.cpu = NULL;
+}
+
+static int rockchip_vpu981_av1_dec_tiles_reallocate(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ unsigned int num_tile_cols = 1 << ctrls->tile_group_entry->tile_col;
+ unsigned int height = ALIGN(ctrls->frame->frame_height_minus_1 + 1, 64);
+ unsigned int height_in_sb = height / 64;
+ unsigned int stripe_num = ((height + 8) + 63) / 64;
+ size_t size;
+
+ if (av1_dec->db_data_col.size >=
+ ALIGN(height * 12 * ctx->bit_depth / 8, 128) * num_tile_cols)
+ return 0;
+
+ rockchip_vpu981_av1_dec_tiles_free(ctx);
+
+ size = ALIGN(height * 12 * ctx->bit_depth / 8, 128) * num_tile_cols;
+ av1_dec->db_data_col.cpu = dma_alloc_coherent(vpu->dev, size,
+ &av1_dec->db_data_col.dma,
+ GFP_KERNEL);
+ if (!av1_dec->db_data_col.cpu)
+ goto buffer_allocation_error;
+ av1_dec->db_data_col.size = size;
+
+ size = ALIGN(height * 2 * 16 / 4, 128) * num_tile_cols;
+ av1_dec->db_ctrl_col.cpu = dma_alloc_coherent(vpu->dev, size,
+ &av1_dec->db_ctrl_col.dma,
+ GFP_KERNEL);
+ if (!av1_dec->db_ctrl_col.cpu)
+ goto buffer_allocation_error;
+ av1_dec->db_ctrl_col.size = size;
+
+ size = ALIGN(height_in_sb * 44 * ctx->bit_depth * 16 / 8, 128) * num_tile_cols;
+ av1_dec->cdef_col.cpu = dma_alloc_coherent(vpu->dev, size,
+ &av1_dec->cdef_col.dma,
+ GFP_KERNEL);
+ if (!av1_dec->cdef_col.cpu)
+ goto buffer_allocation_error;
+ av1_dec->cdef_col.size = size;
+
+ size = ALIGN(height_in_sb * (3040 + 1280), 128) * num_tile_cols;
+ av1_dec->sr_col.cpu = dma_alloc_coherent(vpu->dev, size,
+ &av1_dec->sr_col.dma,
+ GFP_KERNEL);
+ if (!av1_dec->sr_col.cpu)
+ goto buffer_allocation_error;
+ av1_dec->sr_col.size = size;
+
+ size = ALIGN(stripe_num * 1536 * ctx->bit_depth / 8, 128) * num_tile_cols;
+ av1_dec->lr_col.cpu = dma_alloc_coherent(vpu->dev, size,
+ &av1_dec->lr_col.dma,
+ GFP_KERNEL);
+ if (!av1_dec->lr_col.cpu)
+ goto buffer_allocation_error;
+ av1_dec->lr_col.size = size;
+
+ av1_dec->num_tile_cols_allocated = num_tile_cols;
+ return 0;
+
+buffer_allocation_error:
+ rockchip_vpu981_av1_dec_tiles_free(ctx);
+ return -ENOMEM;
+}
+
+void rockchip_vpu981_av1_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+
+ if (av1_dec->global_model.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->global_model.size,
+ av1_dec->global_model.cpu,
+ av1_dec->global_model.dma);
+ av1_dec->global_model.cpu = NULL;
+
+ if (av1_dec->tile_info.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->tile_info.size,
+ av1_dec->tile_info.cpu,
+ av1_dec->tile_info.dma);
+ av1_dec->tile_info.cpu = NULL;
+
+ if (av1_dec->film_grain.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->film_grain.size,
+ av1_dec->film_grain.cpu,
+ av1_dec->film_grain.dma);
+ av1_dec->film_grain.cpu = NULL;
+
+ if (av1_dec->prob_tbl.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->prob_tbl.size,
+ av1_dec->prob_tbl.cpu, av1_dec->prob_tbl.dma);
+ av1_dec->prob_tbl.cpu = NULL;
+
+ if (av1_dec->prob_tbl_out.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->prob_tbl_out.size,
+ av1_dec->prob_tbl_out.cpu,
+ av1_dec->prob_tbl_out.dma);
+ av1_dec->prob_tbl_out.cpu = NULL;
+
+ if (av1_dec->tile_buf.cpu)
+ dma_free_coherent(vpu->dev, av1_dec->tile_buf.size,
+ av1_dec->tile_buf.cpu, av1_dec->tile_buf.dma);
+ av1_dec->tile_buf.cpu = NULL;
+
+ rockchip_vpu981_av1_dec_tiles_free(ctx);
+}
+
+int rockchip_vpu981_av1_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+
+ memset(av1_dec, 0, sizeof(*av1_dec));
+
+ av1_dec->global_model.cpu = dma_alloc_coherent(vpu->dev, GLOBAL_MODEL_SIZE,
+ &av1_dec->global_model.dma,
+ GFP_KERNEL);
+ if (!av1_dec->global_model.cpu)
+ return -ENOMEM;
+ av1_dec->global_model.size = GLOBAL_MODEL_SIZE;
+
+ av1_dec->tile_info.cpu = dma_alloc_coherent(vpu->dev, AV1_MAX_TILES,
+ &av1_dec->tile_info.dma,
+ GFP_KERNEL);
+ if (!av1_dec->tile_info.cpu)
+ return -ENOMEM;
+ av1_dec->tile_info.size = AV1_MAX_TILES;
+
+ av1_dec->film_grain.cpu = dma_alloc_coherent(vpu->dev,
+ ALIGN(sizeof(struct rockchip_av1_film_grain), 2048),
+ &av1_dec->film_grain.dma,
+ GFP_KERNEL);
+ if (!av1_dec->film_grain.cpu)
+ return -ENOMEM;
+ av1_dec->film_grain.size = ALIGN(sizeof(struct rockchip_av1_film_grain), 2048);
+
+ av1_dec->prob_tbl.cpu = dma_alloc_coherent(vpu->dev,
+ ALIGN(sizeof(struct av1cdfs), 2048),
+ &av1_dec->prob_tbl.dma,
+ GFP_KERNEL);
+ if (!av1_dec->prob_tbl.cpu)
+ return -ENOMEM;
+ av1_dec->prob_tbl.size = ALIGN(sizeof(struct av1cdfs), 2048);
+
+ av1_dec->prob_tbl_out.cpu = dma_alloc_coherent(vpu->dev,
+ ALIGN(sizeof(struct av1cdfs), 2048),
+ &av1_dec->prob_tbl_out.dma,
+ GFP_KERNEL);
+ if (!av1_dec->prob_tbl_out.cpu)
+ return -ENOMEM;
+ av1_dec->prob_tbl_out.size = ALIGN(sizeof(struct av1cdfs), 2048);
+ av1_dec->cdfs = &av1_dec->default_cdfs;
+ av1_dec->cdfs_ndvc = &av1_dec->default_cdfs_ndvc;
+
+ rockchip_av1_set_default_cdfs(av1_dec->cdfs, av1_dec->cdfs_ndvc);
+
+ av1_dec->tile_buf.cpu = dma_alloc_coherent(vpu->dev,
+ AV1_TILE_SIZE,
+ &av1_dec->tile_buf.dma,
+ GFP_KERNEL);
+ if (!av1_dec->tile_buf.cpu)
+ return -ENOMEM;
+ av1_dec->tile_buf.size = AV1_TILE_SIZE;
+
+ return 0;
+}
+
+static int rockchip_vpu981_av1_dec_prepare_run(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+
+ ctrls->sequence = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_AV1_SEQUENCE);
+ if (WARN_ON(!ctrls->sequence))
+ return -EINVAL;
+
+ ctrls->tile_group_entry =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY);
+ if (WARN_ON(!ctrls->tile_group_entry))
+ return -EINVAL;
+
+ ctrls->frame = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_AV1_FRAME);
+ if (WARN_ON(!ctrls->frame))
+ return -EINVAL;
+
+ ctrls->film_grain =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_AV1_FILM_GRAIN);
+
+ return rockchip_vpu981_av1_dec_tiles_reallocate(ctx);
+}
+
+static inline int rockchip_vpu981_av1_dec_get_msb(u32 n)
+{
+ if (n == 0)
+ return 0;
+ return 31 ^ __builtin_clz(n);
+}
+
+static short rockchip_vpu981_av1_dec_resolve_divisor_32(u32 d, short *shift)
+{
+ int f;
+ u64 e;
+
+ *shift = rockchip_vpu981_av1_dec_get_msb(d);
+ /* e is obtained from D after resetting the most significant 1 bit. */
+ e = d - ((u32)1 << *shift);
+ /* Get the most significant DIV_LUT_BITS (8) bits of e into f */
+ if (*shift > DIV_LUT_BITS)
+ f = AV1_DIV_ROUND_UP_POW2(e, *shift - DIV_LUT_BITS);
+ else
+ f = e << (DIV_LUT_BITS - *shift);
+ if (f > DIV_LUT_NUM)
+ return -1;
+ *shift += DIV_LUT_PREC_BITS;
+ /* Use f as lookup into the precomputed table of multipliers */
+ return div_lut[f];
+}
+
+static void
+rockchip_vpu981_av1_dec_get_shear_params(const u32 *params, s64 *alpha,
+ s64 *beta, s64 *gamma, s64 *delta)
+{
+ const int *mat = params;
+ short shift;
+ short y;
+ long long gv, dv;
+
+ if (mat[2] <= 0)
+ return;
+
+ *alpha = clamp_val(mat[2] - (1 << WARPEDMODEL_PREC_BITS), S16_MIN, S16_MAX);
+ *beta = clamp_val(mat[3], S16_MIN, S16_MAX);
+
+ y = rockchip_vpu981_av1_dec_resolve_divisor_32(abs(mat[2]), &shift) * (mat[2] < 0 ? -1 : 1);
+
+ gv = ((long long)mat[4] * (1 << WARPEDMODEL_PREC_BITS)) * y;
+
+ *gamma = clamp_val((int)AV1_DIV_ROUND_UP_POW2_SIGNED(gv, shift), S16_MIN, S16_MAX);
+
+ dv = ((long long)mat[3] * mat[4]) * y;
+ *delta = clamp_val(mat[5] -
+ (int)AV1_DIV_ROUND_UP_POW2_SIGNED(dv, shift) - (1 << WARPEDMODEL_PREC_BITS),
+ S16_MIN, S16_MAX);
+
+ *alpha = AV1_DIV_ROUND_UP_POW2_SIGNED(*alpha, WARP_PARAM_REDUCE_BITS)
+ * (1 << WARP_PARAM_REDUCE_BITS);
+ *beta = AV1_DIV_ROUND_UP_POW2_SIGNED(*beta, WARP_PARAM_REDUCE_BITS)
+ * (1 << WARP_PARAM_REDUCE_BITS);
+ *gamma = AV1_DIV_ROUND_UP_POW2_SIGNED(*gamma, WARP_PARAM_REDUCE_BITS)
+ * (1 << WARP_PARAM_REDUCE_BITS);
+ *delta = AV1_DIV_ROUND_UP_POW2_SIGNED(*delta, WARP_PARAM_REDUCE_BITS)
+ * (1 << WARP_PARAM_REDUCE_BITS);
+}
+
+static void rockchip_vpu981_av1_dec_set_global_model(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_av1_global_motion *gm = &frame->global_motion;
+ u8 *dst = av1_dec->global_model.cpu;
+ struct hantro_dev *vpu = ctx->dev;
+ int ref_frame, i;
+
+ memset(dst, 0, GLOBAL_MODEL_SIZE);
+ for (ref_frame = 0; ref_frame < V4L2_AV1_REFS_PER_FRAME; ++ref_frame) {
+ s64 alpha = 0, beta = 0, gamma = 0, delta = 0;
+
+ for (i = 0; i < 6; ++i) {
+ if (i == 2)
+ *(s32 *)dst =
+ gm->params[V4L2_AV1_REF_LAST_FRAME + ref_frame][3];
+ else if (i == 3)
+ *(s32 *)dst =
+ gm->params[V4L2_AV1_REF_LAST_FRAME + ref_frame][2];
+ else
+ *(s32 *)dst =
+ gm->params[V4L2_AV1_REF_LAST_FRAME + ref_frame][i];
+ dst += 4;
+ }
+
+ if (gm->type[V4L2_AV1_REF_LAST_FRAME + ref_frame] <= V4L2_AV1_WARP_MODEL_AFFINE)
+ rockchip_vpu981_av1_dec_get_shear_params(&gm->params[V4L2_AV1_REF_LAST_FRAME + ref_frame][0],
+ &alpha, &beta, &gamma, &delta);
+
+ *(s16 *)dst = alpha;
+ dst += 2;
+ *(s16 *)dst = beta;
+ dst += 2;
+ *(s16 *)dst = gamma;
+ dst += 2;
+ *(s16 *)dst = delta;
+ dst += 2;
+ }
+
+ hantro_write_addr(vpu, AV1_GLOBAL_MODEL, av1_dec->global_model.dma);
+}
+
+static int rockchip_vpu981_av1_tile_log2(int target)
+{
+ int k;
+
+ /*
+ * returns the smallest value for k such that 1 << k is greater
+ * than or equal to target
+ */
+ for (k = 0; (1 << k) < target; k++);
+
+ return k;
+}
+
+static void rockchip_vpu981_av1_dec_set_tile_info(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_av1_tile_info *tile_info = &ctrls->frame->tile_info;
+ const struct v4l2_ctrl_av1_tile_group_entry *group_entry =
+ ctrls->tile_group_entry;
+ int context_update_y =
+ tile_info->context_update_tile_id / tile_info->tile_cols;
+ int context_update_x =
+ tile_info->context_update_tile_id % tile_info->tile_cols;
+ int context_update_tile_id =
+ context_update_x * tile_info->tile_rows + context_update_y;
+ u8 *dst = av1_dec->tile_info.cpu;
+ struct hantro_dev *vpu = ctx->dev;
+ int tile0, tile1;
+
+ memset(dst, 0, av1_dec->tile_info.size);
+
+ for (tile0 = 0; tile0 < tile_info->tile_cols; tile0++) {
+ for (tile1 = 0; tile1 < tile_info->tile_rows; tile1++) {
+ int tile_id = tile1 * tile_info->tile_cols + tile0;
+ u32 start, end;
+ u32 y0 =
+ tile_info->height_in_sbs_minus_1[tile1] + 1;
+ u32 x0 = tile_info->width_in_sbs_minus_1[tile0] + 1;
+
+ /* tile size in SB units (width,height) */
+ *dst++ = x0;
+ *dst++ = 0;
+ *dst++ = 0;
+ *dst++ = 0;
+ *dst++ = y0;
+ *dst++ = 0;
+ *dst++ = 0;
+ *dst++ = 0;
+
+ /* tile start position */
+ start = group_entry[tile_id].tile_offset - group_entry[0].tile_offset;
+ *dst++ = start & 255;
+ *dst++ = (start >> 8) & 255;
+ *dst++ = (start >> 16) & 255;
+ *dst++ = (start >> 24) & 255;
+
+ /* number of bytes in tile data */
+ end = start + group_entry[tile_id].tile_size;
+ *dst++ = end & 255;
+ *dst++ = (end >> 8) & 255;
+ *dst++ = (end >> 16) & 255;
+ *dst++ = (end >> 24) & 255;
+ }
+ }
+
+ hantro_reg_write(vpu, &av1_multicore_expect_context_update, !!(context_update_x == 0));
+ hantro_reg_write(vpu, &av1_tile_enable,
+ !!((tile_info->tile_cols > 1) || (tile_info->tile_rows > 1)));
+ hantro_reg_write(vpu, &av1_num_tile_cols_8k, tile_info->tile_cols);
+ hantro_reg_write(vpu, &av1_num_tile_rows_8k, tile_info->tile_rows);
+ hantro_reg_write(vpu, &av1_context_update_tile_id, context_update_tile_id);
+ hantro_reg_write(vpu, &av1_tile_transpose, 1);
+ if (rockchip_vpu981_av1_tile_log2(tile_info->tile_cols) ||
+ rockchip_vpu981_av1_tile_log2(tile_info->tile_rows))
+ hantro_reg_write(vpu, &av1_dec_tile_size_mag, tile_info->tile_size_bytes - 1);
+ else
+ hantro_reg_write(vpu, &av1_dec_tile_size_mag, 3);
+
+ hantro_write_addr(vpu, AV1_TILE_BASE, av1_dec->tile_info.dma);
+}
+
+static int rockchip_vpu981_av1_dec_get_dist(struct hantro_ctx *ctx,
+ int a, int b)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ int bits = ctrls->sequence->order_hint_bits - 1;
+ int diff, m;
+
+ if (!ctrls->sequence->order_hint_bits)
+ return 0;
+
+ diff = a - b;
+ m = 1 << bits;
+ diff = (diff & (m - 1)) - (diff & m);
+
+ return diff;
+}
+
+static void rockchip_vpu981_av1_dec_set_frame_sign_bias(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_ctrl_av1_sequence *sequence = ctrls->sequence;
+ int i;
+
+ if (!sequence->order_hint_bits || IS_INTRA(frame->frame_type)) {
+ for (i = 0; i < V4L2_AV1_TOTAL_REFS_PER_FRAME; i++)
+ av1_dec->ref_frame_sign_bias[i] = 0;
+
+ return;
+ }
+ // Identify the nearest forward and backward references.
+ for (i = 0; i < V4L2_AV1_TOTAL_REFS_PER_FRAME - 1; i++) {
+ if (rockchip_vpu981_get_frame_index(ctx, i) >= 0) {
+ int rel_off =
+ rockchip_vpu981_av1_dec_get_dist(ctx,
+ rockchip_vpu981_get_order_hint(ctx, i),
+ frame->order_hint);
+ av1_dec->ref_frame_sign_bias[i + 1] = (rel_off <= 0) ? 0 : 1;
+ }
+ }
+}
+
+static bool
+rockchip_vpu981_av1_dec_set_ref(struct hantro_ctx *ctx, int ref, int idx,
+ int width, int height)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_decoded_buffer *dst;
+ dma_addr_t luma_addr, chroma_addr, mv_addr = 0;
+ size_t cr_offset = rockchip_vpu981_av1_dec_luma_size(ctx);
+ size_t mv_offset = rockchip_vpu981_av1_dec_chroma_size(ctx);
+ int cur_width = frame->frame_width_minus_1 + 1;
+ int cur_height = frame->frame_height_minus_1 + 1;
+ int scale_width =
+ ((width << AV1_REF_SCALE_SHIFT) + cur_width / 2) / cur_width;
+ int scale_height =
+ ((height << AV1_REF_SCALE_SHIFT) + cur_height / 2) / cur_height;
+
+ switch (ref) {
+ case 0:
+ hantro_reg_write(vpu, &av1_ref0_height, height);
+ hantro_reg_write(vpu, &av1_ref0_width, width);
+ hantro_reg_write(vpu, &av1_ref0_ver_scale, scale_width);
+ hantro_reg_write(vpu, &av1_ref0_hor_scale, scale_height);
+ break;
+ case 1:
+ hantro_reg_write(vpu, &av1_ref1_height, height);
+ hantro_reg_write(vpu, &av1_ref1_width, width);
+ hantro_reg_write(vpu, &av1_ref1_ver_scale, scale_width);
+ hantro_reg_write(vpu, &av1_ref1_hor_scale, scale_height);
+ break;
+ case 2:
+ hantro_reg_write(vpu, &av1_ref2_height, height);
+ hantro_reg_write(vpu, &av1_ref2_width, width);
+ hantro_reg_write(vpu, &av1_ref2_ver_scale, scale_width);
+ hantro_reg_write(vpu, &av1_ref2_hor_scale, scale_height);
+ break;
+ case 3:
+ hantro_reg_write(vpu, &av1_ref3_height, height);
+ hantro_reg_write(vpu, &av1_ref3_width, width);
+ hantro_reg_write(vpu, &av1_ref3_ver_scale, scale_width);
+ hantro_reg_write(vpu, &av1_ref3_hor_scale, scale_height);
+ break;
+ case 4:
+ hantro_reg_write(vpu, &av1_ref4_height, height);
+ hantro_reg_write(vpu, &av1_ref4_width, width);
+ hantro_reg_write(vpu, &av1_ref4_ver_scale, scale_width);
+ hantro_reg_write(vpu, &av1_ref4_hor_scale, scale_height);
+ break;
+ case 5:
+ hantro_reg_write(vpu, &av1_ref5_height, height);
+ hantro_reg_write(vpu, &av1_ref5_width, width);
+ hantro_reg_write(vpu, &av1_ref5_ver_scale, scale_width);
+ hantro_reg_write(vpu, &av1_ref5_hor_scale, scale_height);
+ break;
+ case 6:
+ hantro_reg_write(vpu, &av1_ref6_height, height);
+ hantro_reg_write(vpu, &av1_ref6_width, width);
+ hantro_reg_write(vpu, &av1_ref6_ver_scale, scale_width);
+ hantro_reg_write(vpu, &av1_ref6_hor_scale, scale_height);
+ break;
+ default:
+ pr_warn("AV1 invalid reference frame index\n");
+ }
+
+ dst = vb2_to_hantro_decoded_buf(&av1_dec->frame_refs[idx].vb2_ref->vb2_buf);
+ luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
+ chroma_addr = luma_addr + cr_offset;
+ mv_addr = luma_addr + mv_offset;
+
+ hantro_write_addr(vpu, AV1_REFERENCE_Y(ref), luma_addr);
+ hantro_write_addr(vpu, AV1_REFERENCE_CB(ref), chroma_addr);
+ hantro_write_addr(vpu, AV1_REFERENCE_MV(ref), mv_addr);
+
+ return (scale_width != (1 << AV1_REF_SCALE_SHIFT)) ||
+ (scale_height != (1 << AV1_REF_SCALE_SHIFT));
+}
+
+static void rockchip_vpu981_av1_dec_set_sign_bias(struct hantro_ctx *ctx,
+ int ref, int val)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ switch (ref) {
+ case 0:
+ hantro_reg_write(vpu, &av1_ref0_sign_bias, val);
+ break;
+ case 1:
+ hantro_reg_write(vpu, &av1_ref1_sign_bias, val);
+ break;
+ case 2:
+ hantro_reg_write(vpu, &av1_ref2_sign_bias, val);
+ break;
+ case 3:
+ hantro_reg_write(vpu, &av1_ref3_sign_bias, val);
+ break;
+ case 4:
+ hantro_reg_write(vpu, &av1_ref4_sign_bias, val);
+ break;
+ case 5:
+ hantro_reg_write(vpu, &av1_ref5_sign_bias, val);
+ break;
+ case 6:
+ hantro_reg_write(vpu, &av1_ref6_sign_bias, val);
+ break;
+ default:
+ pr_warn("AV1 invalid sign bias index\n");
+ break;
+ }
+}
+
+static void rockchip_vpu981_av1_dec_set_segmentation(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_av1_segmentation *seg = &frame->segmentation;
+ u32 segval[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX] = { 0 };
+ struct hantro_dev *vpu = ctx->dev;
+ u8 segsign = 0, preskip_segid = 0, last_active_seg = 0, i, j;
+
+ if (!!(seg->flags & V4L2_AV1_SEGMENTATION_FLAG_ENABLED) &&
+ frame->primary_ref_frame < V4L2_AV1_REFS_PER_FRAME) {
+ int idx = rockchip_vpu981_get_frame_index(ctx, frame->primary_ref_frame);
+
+ if (idx >= 0) {
+ dma_addr_t luma_addr, mv_addr = 0;
+ struct hantro_decoded_buffer *seg;
+ size_t mv_offset = rockchip_vpu981_av1_dec_chroma_size(ctx);
+
+ seg = vb2_to_hantro_decoded_buf(&av1_dec->frame_refs[idx].vb2_ref->vb2_buf);
+ luma_addr = hantro_get_dec_buf_addr(ctx, &seg->base.vb.vb2_buf);
+ mv_addr = luma_addr + mv_offset;
+
+ hantro_write_addr(vpu, AV1_SEGMENTATION, mv_addr);
+ hantro_reg_write(vpu, &av1_use_temporal3_mvs, 1);
+ }
+ }
+
+ hantro_reg_write(vpu, &av1_segment_temp_upd_e,
+ !!(seg->flags & V4L2_AV1_SEGMENTATION_FLAG_TEMPORAL_UPDATE));
+ hantro_reg_write(vpu, &av1_segment_upd_e,
+ !!(seg->flags & V4L2_AV1_SEGMENTATION_FLAG_UPDATE_MAP));
+ hantro_reg_write(vpu, &av1_segment_e,
+ !!(seg->flags & V4L2_AV1_SEGMENTATION_FLAG_ENABLED));
+
+ hantro_reg_write(vpu, &av1_error_resilient,
+ !!(frame->flags & V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE));
+
+ if (IS_INTRA(frame->frame_type) ||
+ !!(frame->flags & V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE)) {
+ hantro_reg_write(vpu, &av1_use_temporal3_mvs, 0);
+ }
+
+ if (seg->flags & V4L2_AV1_SEGMENTATION_FLAG_ENABLED) {
+ int s;
+
+ for (s = 0; s < V4L2_AV1_MAX_SEGMENTS; s++) {
+ if (seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_Q)) {
+ segval[s][V4L2_AV1_SEG_LVL_ALT_Q] =
+ clamp(abs(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_Q]),
+ 0, 255);
+ segsign |=
+ (seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_Q] < 0) << s;
+ }
+
+ if (seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_LF_Y_V))
+ segval[s][V4L2_AV1_SEG_LVL_ALT_LF_Y_V] =
+ clamp(abs(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]),
+ -63, 63);
+
+ if (seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_LF_Y_H))
+ segval[s][V4L2_AV1_SEG_LVL_ALT_LF_Y_H] =
+ clamp(abs(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]),
+ -63, 63);
+
+ if (seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_LF_U))
+ segval[s][V4L2_AV1_SEG_LVL_ALT_LF_U] =
+ clamp(abs(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_LF_U]),
+ -63, 63);
+
+ if (seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_LF_V))
+ segval[s][V4L2_AV1_SEG_LVL_ALT_LF_V] =
+ clamp(abs(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_LF_V]),
+ -63, 63);
+
+ if (frame->frame_type && seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_REF_FRAME))
+ segval[s][V4L2_AV1_SEG_LVL_REF_FRAME]++;
+
+ if (seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_REF_SKIP))
+ segval[s][V4L2_AV1_SEG_LVL_REF_SKIP] = 1;
+
+ if (seg->feature_enabled[s] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_REF_GLOBALMV))
+ segval[s][V4L2_AV1_SEG_LVL_REF_GLOBALMV] = 1;
+ }
+ }
+
+ for (i = 0; i < V4L2_AV1_MAX_SEGMENTS; i++) {
+ for (j = 0; j < V4L2_AV1_SEG_LVL_MAX; j++) {
+ if (seg->feature_enabled[i]
+ & V4L2_AV1_SEGMENT_FEATURE_ENABLED(j)) {
+ preskip_segid |= (j >= V4L2_AV1_SEG_LVL_REF_FRAME);
+ last_active_seg = max(i, last_active_seg);
+ }
+ }
+ }
+
+ hantro_reg_write(vpu, &av1_last_active_seg, last_active_seg);
+ hantro_reg_write(vpu, &av1_preskip_segid, preskip_segid);
+
+ hantro_reg_write(vpu, &av1_seg_quant_sign, segsign);
+
+ /* Write QP, filter level, ref frame and skip for every segment */
+ hantro_reg_write(vpu, &av1_quant_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg0,
+ segval[0][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+
+ hantro_reg_write(vpu, &av1_quant_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg1,
+ segval[1][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+
+ hantro_reg_write(vpu, &av1_quant_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg2,
+ segval[2][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+
+ hantro_reg_write(vpu, &av1_quant_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg3,
+ segval[3][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+
+ hantro_reg_write(vpu, &av1_quant_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg4,
+ segval[4][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+
+ hantro_reg_write(vpu, &av1_quant_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg5,
+ segval[5][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+
+ hantro_reg_write(vpu, &av1_quant_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg6,
+ segval[6][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+
+ hantro_reg_write(vpu, &av1_quant_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_ALT_Q]);
+ hantro_reg_write(vpu, &av1_filt_level_delta0_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
+ hantro_reg_write(vpu, &av1_filt_level_delta1_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
+ hantro_reg_write(vpu, &av1_filt_level_delta2_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_ALT_LF_U]);
+ hantro_reg_write(vpu, &av1_filt_level_delta3_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_ALT_LF_V]);
+ hantro_reg_write(vpu, &av1_refpic_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_REF_FRAME]);
+ hantro_reg_write(vpu, &av1_skip_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_REF_SKIP]);
+ hantro_reg_write(vpu, &av1_global_mv_seg7,
+ segval[7][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
+}
+
+static bool rockchip_vpu981_av1_dec_is_lossless(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_av1_segmentation *segmentation = &frame->segmentation;
+ const struct v4l2_av1_quantization *quantization = &frame->quantization;
+ int i;
+
+ for (i = 0; i < V4L2_AV1_MAX_SEGMENTS; i++) {
+ int qindex = quantization->base_q_idx;
+
+ if (segmentation->feature_enabled[i] &
+ V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_Q)) {
+ qindex += segmentation->feature_data[i][V4L2_AV1_SEG_LVL_ALT_Q];
+ }
+ qindex = clamp(qindex, 0, 255);
+
+ if (qindex ||
+ quantization->delta_q_y_dc ||
+ quantization->delta_q_u_dc ||
+ quantization->delta_q_u_ac ||
+ quantization->delta_q_v_dc ||
+ quantization->delta_q_v_ac)
+ return false;
+ }
+ return true;
+}
+
+static void rockchip_vpu981_av1_dec_set_loopfilter(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_av1_loop_filter *loop_filter = &frame->loop_filter;
+ bool filtering_dis = (loop_filter->level[0] == 0) && (loop_filter->level[1] == 0);
+ struct hantro_dev *vpu = ctx->dev;
+
+ hantro_reg_write(vpu, &av1_filtering_dis, filtering_dis);
+ hantro_reg_write(vpu, &av1_filt_level_base_gt32, loop_filter->level[0] > 32);
+ hantro_reg_write(vpu, &av1_filt_sharpness, loop_filter->sharpness);
+
+ hantro_reg_write(vpu, &av1_filt_level0, loop_filter->level[0]);
+ hantro_reg_write(vpu, &av1_filt_level1, loop_filter->level[1]);
+ hantro_reg_write(vpu, &av1_filt_level2, loop_filter->level[2]);
+ hantro_reg_write(vpu, &av1_filt_level3, loop_filter->level[3]);
+
+ if (loop_filter->flags & V4L2_AV1_LOOP_FILTER_FLAG_DELTA_ENABLED &&
+ !rockchip_vpu981_av1_dec_is_lossless(ctx) &&
+ !(frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC)) {
+ hantro_reg_write(vpu, &av1_filt_ref_adj_0,
+ loop_filter->ref_deltas[0]);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_1,
+ loop_filter->ref_deltas[1]);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_2,
+ loop_filter->ref_deltas[2]);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_3,
+ loop_filter->ref_deltas[3]);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_4,
+ loop_filter->ref_deltas[4]);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_5,
+ loop_filter->ref_deltas[5]);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_6,
+ loop_filter->ref_deltas[6]);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_7,
+ loop_filter->ref_deltas[7]);
+ hantro_reg_write(vpu, &av1_filt_mb_adj_0,
+ loop_filter->mode_deltas[0]);
+ hantro_reg_write(vpu, &av1_filt_mb_adj_1,
+ loop_filter->mode_deltas[1]);
+ } else {
+ hantro_reg_write(vpu, &av1_filt_ref_adj_0, 0);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_1, 0);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_2, 0);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_3, 0);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_4, 0);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_5, 0);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_6, 0);
+ hantro_reg_write(vpu, &av1_filt_ref_adj_7, 0);
+ hantro_reg_write(vpu, &av1_filt_mb_adj_0, 0);
+ hantro_reg_write(vpu, &av1_filt_mb_adj_1, 0);
+ }
+
+ hantro_write_addr(vpu, AV1_DB_DATA_COL, av1_dec->db_data_col.dma);
+ hantro_write_addr(vpu, AV1_DB_CTRL_COL, av1_dec->db_ctrl_col.dma);
+}
+
+static void rockchip_vpu981_av1_dec_update_prob(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ bool frame_is_intra = IS_INTRA(frame->frame_type);
+ struct av1cdfs *out_cdfs = (struct av1cdfs *)av1_dec->prob_tbl_out.cpu;
+ int i;
+
+ if (frame->flags & V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF)
+ return;
+
+ for (i = 0; i < NUM_REF_FRAMES; i++) {
+ if (frame->refresh_frame_flags & BIT(i)) {
+ struct mvcdfs stored_mv_cdf;
+
+ rockchip_av1_get_cdfs(ctx, i);
+ stored_mv_cdf = av1_dec->cdfs->mv_cdf;
+ *av1_dec->cdfs = *out_cdfs;
+ if (frame_is_intra) {
+ av1_dec->cdfs->mv_cdf = stored_mv_cdf;
+ *av1_dec->cdfs_ndvc = out_cdfs->mv_cdf;
+ }
+ rockchip_av1_store_cdfs(ctx,
+ frame->refresh_frame_flags);
+ break;
+ }
+ }
+}
+
+void rockchip_vpu981_av1_dec_done(struct hantro_ctx *ctx)
+{
+ rockchip_vpu981_av1_dec_update_prob(ctx);
+}
+
+static void rockchip_vpu981_av1_dec_set_prob(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_av1_quantization *quantization = &frame->quantization;
+ struct hantro_dev *vpu = ctx->dev;
+ bool error_resilient_mode =
+ !!(frame->flags & V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE);
+ bool frame_is_intra = IS_INTRA(frame->frame_type);
+
+ if (error_resilient_mode || frame_is_intra ||
+ frame->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
+ av1_dec->cdfs = &av1_dec->default_cdfs;
+ av1_dec->cdfs_ndvc = &av1_dec->default_cdfs_ndvc;
+ rockchip_av1_default_coeff_probs(quantization->base_q_idx,
+ av1_dec->cdfs);
+ } else {
+ rockchip_av1_get_cdfs(ctx, frame->ref_frame_idx[frame->primary_ref_frame]);
+ }
+ rockchip_av1_store_cdfs(ctx, frame->refresh_frame_flags);
+
+ memcpy(av1_dec->prob_tbl.cpu, av1_dec->cdfs, sizeof(struct av1cdfs));
+
+ if (frame_is_intra) {
+ int mv_offset = offsetof(struct av1cdfs, mv_cdf);
+ /* Overwrite MV context area with intrabc MV context */
+ memcpy(av1_dec->prob_tbl.cpu + mv_offset, av1_dec->cdfs_ndvc,
+ sizeof(struct mvcdfs));
+ }
+
+ hantro_write_addr(vpu, AV1_PROP_TABLE_OUT, av1_dec->prob_tbl_out.dma);
+ hantro_write_addr(vpu, AV1_PROP_TABLE, av1_dec->prob_tbl.dma);
+}
+
+static void
+rockchip_vpu981_av1_dec_init_scaling_function(const u8 *values, const u8 *scaling,
+ u8 num_points, u8 *scaling_lut)
+{
+ int i, point;
+
+ if (num_points == 0) {
+ memset(scaling_lut, 0, 256);
+ return;
+ }
+
+ for (point = 0; point < num_points - 1; point++) {
+ int x;
+ s32 delta_y = scaling[point + 1] - scaling[point];
+ s32 delta_x = values[point + 1] - values[point];
+ s64 delta =
+ delta_x ? delta_y * ((65536 + (delta_x >> 1)) /
+ delta_x) : 0;
+
+ for (x = 0; x < delta_x; x++) {
+ scaling_lut[values[point] + x] =
+ scaling[point] +
+ (s32)((x * delta + 32768) >> 16);
+ }
+ }
+
+ for (i = values[num_points - 1]; i < 256; i++)
+ scaling_lut[i] = scaling[num_points - 1];
+}
+
+static void rockchip_vpu981_av1_dec_set_fgs(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_film_grain *film_grain = ctrls->film_grain;
+ struct rockchip_av1_film_grain *fgmem = av1_dec->film_grain.cpu;
+ struct hantro_dev *vpu = ctx->dev;
+ bool scaling_from_luma =
+ !!(film_grain->flags & V4L2_AV1_FILM_GRAIN_FLAG_CHROMA_SCALING_FROM_LUMA);
+ s32 (*ar_coeffs_y)[24];
+ s32 (*ar_coeffs_cb)[25];
+ s32 (*ar_coeffs_cr)[25];
+ s32 (*luma_grain_block)[73][82];
+ s32 (*cb_grain_block)[38][44];
+ s32 (*cr_grain_block)[38][44];
+ s32 ar_coeff_lag, ar_coeff_shift;
+ s32 grain_scale_shift, bitdepth;
+ s32 grain_center, grain_min, grain_max;
+ int i, j;
+
+ hantro_reg_write(vpu, &av1_apply_grain, 0);
+
+ if (!(film_grain->flags & V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN)) {
+ hantro_reg_write(vpu, &av1_num_y_points_b, 0);
+ hantro_reg_write(vpu, &av1_num_cb_points_b, 0);
+ hantro_reg_write(vpu, &av1_num_cr_points_b, 0);
+ hantro_reg_write(vpu, &av1_scaling_shift, 0);
+ hantro_reg_write(vpu, &av1_cb_mult, 0);
+ hantro_reg_write(vpu, &av1_cb_luma_mult, 0);
+ hantro_reg_write(vpu, &av1_cb_offset, 0);
+ hantro_reg_write(vpu, &av1_cr_mult, 0);
+ hantro_reg_write(vpu, &av1_cr_luma_mult, 0);
+ hantro_reg_write(vpu, &av1_cr_offset, 0);
+ hantro_reg_write(vpu, &av1_overlap_flag, 0);
+ hantro_reg_write(vpu, &av1_clip_to_restricted_range, 0);
+ hantro_reg_write(vpu, &av1_chroma_scaling_from_luma, 0);
+ hantro_reg_write(vpu, &av1_random_seed, 0);
+ hantro_write_addr(vpu, AV1_FILM_GRAIN, 0);
+ return;
+ }
+
+ ar_coeffs_y = kzalloc(sizeof(int32_t) * 24, GFP_KERNEL);
+ ar_coeffs_cb = kzalloc(sizeof(int32_t) * 25, GFP_KERNEL);
+ ar_coeffs_cr = kzalloc(sizeof(int32_t) * 25, GFP_KERNEL);
+ luma_grain_block = kzalloc(sizeof(int32_t) * 73 * 82, GFP_KERNEL);
+ cb_grain_block = kzalloc(sizeof(int32_t) * 38 * 44, GFP_KERNEL);
+ cr_grain_block = kzalloc(sizeof(int32_t) * 38 * 44, GFP_KERNEL);
+
+ if (!ar_coeffs_y || !ar_coeffs_cb || !ar_coeffs_cr ||
+ !luma_grain_block || !cb_grain_block || !cr_grain_block) {
+ pr_warn("Fail allocating memory for film grain parameters\n");
+ goto alloc_fail;
+ }
+
+ hantro_reg_write(vpu, &av1_apply_grain, 1);
+
+ hantro_reg_write(vpu, &av1_num_y_points_b,
+ film_grain->num_y_points > 0);
+ hantro_reg_write(vpu, &av1_num_cb_points_b,
+ film_grain->num_cb_points > 0);
+ hantro_reg_write(vpu, &av1_num_cr_points_b,
+ film_grain->num_cr_points > 0);
+ hantro_reg_write(vpu, &av1_scaling_shift,
+ film_grain->grain_scaling_minus_8 + 8);
+
+ if (!scaling_from_luma) {
+ hantro_reg_write(vpu, &av1_cb_mult, film_grain->cb_mult - 128);
+ hantro_reg_write(vpu, &av1_cb_luma_mult, film_grain->cb_luma_mult - 128);
+ hantro_reg_write(vpu, &av1_cb_offset, film_grain->cb_offset - 256);
+ hantro_reg_write(vpu, &av1_cr_mult, film_grain->cr_mult - 128);
+ hantro_reg_write(vpu, &av1_cr_luma_mult, film_grain->cr_luma_mult - 128);
+ hantro_reg_write(vpu, &av1_cr_offset, film_grain->cr_offset - 256);
+ } else {
+ hantro_reg_write(vpu, &av1_cb_mult, 0);
+ hantro_reg_write(vpu, &av1_cb_luma_mult, 0);
+ hantro_reg_write(vpu, &av1_cb_offset, 0);
+ hantro_reg_write(vpu, &av1_cr_mult, 0);
+ hantro_reg_write(vpu, &av1_cr_luma_mult, 0);
+ hantro_reg_write(vpu, &av1_cr_offset, 0);
+ }
+
+ hantro_reg_write(vpu, &av1_overlap_flag,
+ !!(film_grain->flags & V4L2_AV1_FILM_GRAIN_FLAG_OVERLAP));
+ hantro_reg_write(vpu, &av1_clip_to_restricted_range,
+ !!(film_grain->flags & V4L2_AV1_FILM_GRAIN_FLAG_CLIP_TO_RESTRICTED_RANGE));
+ hantro_reg_write(vpu, &av1_chroma_scaling_from_luma, scaling_from_luma);
+ hantro_reg_write(vpu, &av1_random_seed, film_grain->grain_seed);
+
+ rockchip_vpu981_av1_dec_init_scaling_function(film_grain->point_y_value,
+ film_grain->point_y_scaling,
+ film_grain->num_y_points,
+ fgmem->scaling_lut_y);
+
+ if (film_grain->flags &
+ V4L2_AV1_FILM_GRAIN_FLAG_CHROMA_SCALING_FROM_LUMA) {
+ memcpy(fgmem->scaling_lut_cb, fgmem->scaling_lut_y,
+ sizeof(*fgmem->scaling_lut_y) * 256);
+ memcpy(fgmem->scaling_lut_cr, fgmem->scaling_lut_y,
+ sizeof(*fgmem->scaling_lut_y) * 256);
+ } else {
+ rockchip_vpu981_av1_dec_init_scaling_function
+ (film_grain->point_cb_value, film_grain->point_cb_scaling,
+ film_grain->num_cb_points, fgmem->scaling_lut_cb);
+ rockchip_vpu981_av1_dec_init_scaling_function
+ (film_grain->point_cr_value, film_grain->point_cr_scaling,
+ film_grain->num_cr_points, fgmem->scaling_lut_cr);
+ }
+
+ for (i = 0; i < V4L2_AV1_AR_COEFFS_SIZE; i++) {
+ if (i < 24)
+ (*ar_coeffs_y)[i] = film_grain->ar_coeffs_y_plus_128[i] - 128;
+ (*ar_coeffs_cb)[i] = film_grain->ar_coeffs_cb_plus_128[i] - 128;
+ (*ar_coeffs_cr)[i] = film_grain->ar_coeffs_cr_plus_128[i] - 128;
+ }
+
+ ar_coeff_lag = film_grain->ar_coeff_lag;
+ ar_coeff_shift = film_grain->ar_coeff_shift_minus_6 + 6;
+ grain_scale_shift = film_grain->grain_scale_shift;
+ bitdepth = ctx->bit_depth;
+ grain_center = 128 << (bitdepth - 8);
+ grain_min = 0 - grain_center;
+ grain_max = (256 << (bitdepth - 8)) - 1 - grain_center;
+
+ rockchip_av1_generate_luma_grain_block(luma_grain_block, bitdepth,
+ film_grain->num_y_points, grain_scale_shift,
+ ar_coeff_lag, ar_coeffs_y, ar_coeff_shift,
+ grain_min, grain_max, film_grain->grain_seed);
+
+ rockchip_av1_generate_chroma_grain_block(luma_grain_block, cb_grain_block,
+ cr_grain_block, bitdepth,
+ film_grain->num_y_points,
+ film_grain->num_cb_points,
+ film_grain->num_cr_points,
+ grain_scale_shift, ar_coeff_lag, ar_coeffs_cb,
+ ar_coeffs_cr, ar_coeff_shift, grain_min,
+ grain_max,
+ scaling_from_luma,
+ film_grain->grain_seed);
+
+ for (i = 0; i < 64; i++) {
+ for (j = 0; j < 64; j++)
+ fgmem->cropped_luma_grain_block[i * 64 + j] =
+ (*luma_grain_block)[i + 9][j + 9];
+ }
+
+ for (i = 0; i < 32; i++) {
+ for (j = 0; j < 32; j++) {
+ fgmem->cropped_chroma_grain_block[i * 64 + 2 * j] =
+ (*cb_grain_block)[i + 6][j + 6];
+ fgmem->cropped_chroma_grain_block[i * 64 + 2 * j + 1] =
+ (*cr_grain_block)[i + 6][j + 6];
+ }
+ }
+
+ hantro_write_addr(vpu, AV1_FILM_GRAIN, av1_dec->film_grain.dma);
+
+alloc_fail:
+ kfree(ar_coeffs_y);
+ kfree(ar_coeffs_cb);
+ kfree(ar_coeffs_cr);
+ kfree(luma_grain_block);
+ kfree(cb_grain_block);
+ kfree(cr_grain_block);
+}
+
+static void rockchip_vpu981_av1_dec_set_cdef(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_av1_cdef *cdef = &frame->cdef;
+ struct hantro_dev *vpu = ctx->dev;
+ u32 luma_pri_strength = 0;
+ u16 luma_sec_strength = 0;
+ u32 chroma_pri_strength = 0;
+ u16 chroma_sec_strength = 0;
+ int i;
+
+ hantro_reg_write(vpu, &av1_cdef_bits, cdef->bits);
+ hantro_reg_write(vpu, &av1_cdef_damping, cdef->damping_minus_3);
+
+ for (i = 0; i < BIT(cdef->bits); i++) {
+ luma_pri_strength |= cdef->y_pri_strength[i] << (i * 4);
+ if (cdef->y_sec_strength[i] == 4)
+ luma_sec_strength |= 3 << (i * 2);
+ else
+ luma_sec_strength |= cdef->y_sec_strength[i] << (i * 2);
+
+ chroma_pri_strength |= cdef->uv_pri_strength[i] << (i * 4);
+ if (cdef->uv_sec_strength[i] == 4)
+ chroma_sec_strength |= 3 << (i * 2);
+ else
+ chroma_sec_strength |= cdef->uv_sec_strength[i] << (i * 2);
+ }
+
+ hantro_reg_write(vpu, &av1_cdef_luma_primary_strength,
+ luma_pri_strength);
+ hantro_reg_write(vpu, &av1_cdef_luma_secondary_strength,
+ luma_sec_strength);
+ hantro_reg_write(vpu, &av1_cdef_chroma_primary_strength,
+ chroma_pri_strength);
+ hantro_reg_write(vpu, &av1_cdef_chroma_secondary_strength,
+ chroma_sec_strength);
+
+ hantro_write_addr(vpu, AV1_CDEF_COL, av1_dec->cdef_col.dma);
+}
+
+static void rockchip_vpu981_av1_dec_set_lr(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ const struct v4l2_av1_loop_restoration *loop_restoration =
+ &frame->loop_restoration;
+ struct hantro_dev *vpu = ctx->dev;
+ u16 lr_type = 0, lr_unit_size = 0;
+ u8 restoration_unit_size[V4L2_AV1_NUM_PLANES_MAX] = { 3, 3, 3 };
+ int i;
+
+ if (loop_restoration->flags & V4L2_AV1_LOOP_RESTORATION_FLAG_USES_LR) {
+ restoration_unit_size[0] = 1 + loop_restoration->lr_unit_shift;
+ restoration_unit_size[1] =
+ 1 + loop_restoration->lr_unit_shift - loop_restoration->lr_uv_shift;
+ restoration_unit_size[2] =
+ 1 + loop_restoration->lr_unit_shift - loop_restoration->lr_uv_shift;
+ }
+
+ for (i = 0; i < V4L2_AV1_NUM_PLANES_MAX; i++) {
+ lr_type |=
+ loop_restoration->frame_restoration_type[i] << (i * 2);
+ lr_unit_size |= restoration_unit_size[i] << (i * 2);
+ }
+
+ hantro_reg_write(vpu, &av1_lr_type, lr_type);
+ hantro_reg_write(vpu, &av1_lr_unit_size, lr_unit_size);
+ hantro_write_addr(vpu, AV1_LR_COL, av1_dec->lr_col.dma);
+}
+
+static void rockchip_vpu981_av1_dec_set_superres_params(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ struct hantro_dev *vpu = ctx->dev;
+ u8 superres_scale_denominator = SCALE_NUMERATOR;
+ int superres_luma_step = RS_SCALE_SUBPEL_BITS;
+ int superres_chroma_step = RS_SCALE_SUBPEL_BITS;
+ int superres_luma_step_invra = RS_SCALE_SUBPEL_BITS;
+ int superres_chroma_step_invra = RS_SCALE_SUBPEL_BITS;
+ int superres_init_luma_subpel_x = 0;
+ int superres_init_chroma_subpel_x = 0;
+ int superres_is_scaled = 0;
+ int min_w = min_t(uint32_t, 16, frame->upscaled_width);
+ int upscaled_luma, downscaled_luma;
+ int downscaled_chroma, upscaled_chroma;
+ int step_luma, step_chroma;
+ int err_luma, err_chroma;
+ int initial_luma, initial_chroma;
+ int width = 0;
+
+ if (frame->flags & V4L2_AV1_FRAME_FLAG_USE_SUPERRES)
+ superres_scale_denominator = frame->superres_denom;
+
+ if (superres_scale_denominator <= SCALE_NUMERATOR)
+ goto set_regs;
+
+ width = (frame->upscaled_width * SCALE_NUMERATOR +
+ (superres_scale_denominator / 2)) / superres_scale_denominator;
+
+ if (width < min_w)
+ width = min_w;
+
+ if (width == frame->upscaled_width)
+ goto set_regs;
+
+ superres_is_scaled = 1;
+ upscaled_luma = frame->upscaled_width;
+ downscaled_luma = width;
+ downscaled_chroma = (downscaled_luma + 1) >> 1;
+ upscaled_chroma = (upscaled_luma + 1) >> 1;
+ step_luma =
+ ((downscaled_luma << RS_SCALE_SUBPEL_BITS) +
+ (upscaled_luma / 2)) / upscaled_luma;
+ step_chroma =
+ ((downscaled_chroma << RS_SCALE_SUBPEL_BITS) +
+ (upscaled_chroma / 2)) / upscaled_chroma;
+ err_luma =
+ (upscaled_luma * step_luma)
+ - (downscaled_luma << RS_SCALE_SUBPEL_BITS);
+ err_chroma =
+ (upscaled_chroma * step_chroma)
+ - (downscaled_chroma << RS_SCALE_SUBPEL_BITS);
+ initial_luma =
+ ((-((upscaled_luma - downscaled_luma) << (RS_SCALE_SUBPEL_BITS - 1))
+ + upscaled_luma / 2)
+ / upscaled_luma + (1 << (RS_SCALE_EXTRA_BITS - 1)) - err_luma / 2)
+ & RS_SCALE_SUBPEL_MASK;
+ initial_chroma =
+ ((-((upscaled_chroma - downscaled_chroma) << (RS_SCALE_SUBPEL_BITS - 1))
+ + upscaled_chroma / 2)
+ / upscaled_chroma + (1 << (RS_SCALE_EXTRA_BITS - 1)) - err_chroma / 2)
+ & RS_SCALE_SUBPEL_MASK;
+ superres_luma_step = step_luma;
+ superres_chroma_step = step_chroma;
+ superres_luma_step_invra =
+ ((upscaled_luma << RS_SCALE_SUBPEL_BITS) + (downscaled_luma / 2))
+ / downscaled_luma;
+ superres_chroma_step_invra =
+ ((upscaled_chroma << RS_SCALE_SUBPEL_BITS) + (downscaled_chroma / 2))
+ / downscaled_chroma;
+ superres_init_luma_subpel_x = initial_luma;
+ superres_init_chroma_subpel_x = initial_chroma;
+
+set_regs:
+ hantro_reg_write(vpu, &av1_superres_pic_width, frame->upscaled_width);
+
+ if (frame->flags & V4L2_AV1_FRAME_FLAG_USE_SUPERRES)
+ hantro_reg_write(vpu, &av1_scale_denom_minus9,
+ frame->superres_denom - SUPERRES_SCALE_DENOMINATOR_MIN);
+ else
+ hantro_reg_write(vpu, &av1_scale_denom_minus9, frame->superres_denom);
+
+ hantro_reg_write(vpu, &av1_superres_luma_step, superres_luma_step);
+ hantro_reg_write(vpu, &av1_superres_chroma_step, superres_chroma_step);
+ hantro_reg_write(vpu, &av1_superres_luma_step_invra,
+ superres_luma_step_invra);
+ hantro_reg_write(vpu, &av1_superres_chroma_step_invra,
+ superres_chroma_step_invra);
+ hantro_reg_write(vpu, &av1_superres_init_luma_subpel_x,
+ superres_init_luma_subpel_x);
+ hantro_reg_write(vpu, &av1_superres_init_chroma_subpel_x,
+ superres_init_chroma_subpel_x);
+ hantro_reg_write(vpu, &av1_superres_is_scaled, superres_is_scaled);
+
+ hantro_write_addr(vpu, AV1_SR_COL, av1_dec->sr_col.dma);
+}
+
+static void rockchip_vpu981_av1_dec_set_picture_dimensions(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ struct hantro_dev *vpu = ctx->dev;
+ int pic_width_in_cbs = DIV_ROUND_UP(frame->frame_width_minus_1 + 1, 8);
+ int pic_height_in_cbs = DIV_ROUND_UP(frame->frame_height_minus_1 + 1, 8);
+ int pic_width_pad = ALIGN(frame->frame_width_minus_1 + 1, 8)
+ - (frame->frame_width_minus_1 + 1);
+ int pic_height_pad = ALIGN(frame->frame_height_minus_1 + 1, 8)
+ - (frame->frame_height_minus_1 + 1);
+
+ hantro_reg_write(vpu, &av1_pic_width_in_cbs, pic_width_in_cbs);
+ hantro_reg_write(vpu, &av1_pic_height_in_cbs, pic_height_in_cbs);
+ hantro_reg_write(vpu, &av1_pic_width_pad, pic_width_pad);
+ hantro_reg_write(vpu, &av1_pic_height_pad, pic_height_pad);
+
+ rockchip_vpu981_av1_dec_set_superres_params(ctx);
+}
+
+static void rockchip_vpu981_av1_dec_set_other_frames(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ struct hantro_dev *vpu = ctx->dev;
+ bool use_ref_frame_mvs =
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS);
+ int cur_frame_offset = frame->order_hint;
+ int alt_frame_offset = 0;
+ int gld_frame_offset = 0;
+ int bwd_frame_offset = 0;
+ int alt2_frame_offset = 0;
+ int refs_selected[3] = { 0, 0, 0 };
+ int cur_mi_cols = DIV_ROUND_UP(frame->frame_width_minus_1 + 1, 8);
+ int cur_mi_rows = DIV_ROUND_UP(frame->frame_height_minus_1 + 1, 8);
+ int cur_offset[V4L2_AV1_TOTAL_REFS_PER_FRAME - 1];
+ int cur_roffset[V4L2_AV1_TOTAL_REFS_PER_FRAME - 1];
+ int mf_types[3] = { 0, 0, 0 };
+ int ref_stamp = 2;
+ int ref_ind = 0;
+ int rf, idx;
+
+ alt_frame_offset = rockchip_vpu981_get_order_hint(ctx, ALT_BUF_IDX);
+ gld_frame_offset = rockchip_vpu981_get_order_hint(ctx, GLD_BUF_IDX);
+ bwd_frame_offset = rockchip_vpu981_get_order_hint(ctx, BWD_BUF_IDX);
+ alt2_frame_offset = rockchip_vpu981_get_order_hint(ctx, ALT2_BUF_IDX);
+
+ idx = rockchip_vpu981_get_frame_index(ctx, LST_BUF_IDX);
+ if (idx >= 0) {
+ int alt_frame_offset_in_lst =
+ av1_dec->frame_refs[idx].order_hints[V4L2_AV1_REF_ALTREF_FRAME];
+ bool is_lst_overlay =
+ (alt_frame_offset_in_lst == gld_frame_offset);
+
+ if (!is_lst_overlay) {
+ int lst_mi_cols = av1_dec->frame_refs[idx].mi_cols;
+ int lst_mi_rows = av1_dec->frame_refs[idx].mi_rows;
+ bool lst_intra_only =
+ IS_INTRA(av1_dec->frame_refs[idx].frame_type);
+
+ if (lst_mi_cols == cur_mi_cols &&
+ lst_mi_rows == cur_mi_rows && !lst_intra_only) {
+ mf_types[ref_ind] = V4L2_AV1_REF_LAST_FRAME;
+ refs_selected[ref_ind++] = LST_BUF_IDX;
+ }
+ }
+ ref_stamp--;
+ }
+
+ idx = rockchip_vpu981_get_frame_index(ctx, BWD_BUF_IDX);
+ if (rockchip_vpu981_av1_dec_get_dist(ctx, bwd_frame_offset, cur_frame_offset) > 0) {
+ int bwd_mi_cols = av1_dec->frame_refs[idx].mi_cols;
+ int bwd_mi_rows = av1_dec->frame_refs[idx].mi_rows;
+ bool bwd_intra_only =
+ IS_INTRA(av1_dec->frame_refs[idx].frame_type);
+
+ if (bwd_mi_cols == cur_mi_cols && bwd_mi_rows == cur_mi_rows &&
+ !bwd_intra_only) {
+ mf_types[ref_ind] = V4L2_AV1_REF_BWDREF_FRAME;
+ refs_selected[ref_ind++] = BWD_BUF_IDX;
+ ref_stamp--;
+ }
+ }
+
+ idx = rockchip_vpu981_get_frame_index(ctx, ALT2_BUF_IDX);
+ if (rockchip_vpu981_av1_dec_get_dist(ctx, alt2_frame_offset, cur_frame_offset) > 0) {
+ int alt2_mi_cols = av1_dec->frame_refs[idx].mi_cols;
+ int alt2_mi_rows = av1_dec->frame_refs[idx].mi_rows;
+ bool alt2_intra_only =
+ IS_INTRA(av1_dec->frame_refs[idx].frame_type);
+
+ if (alt2_mi_cols == cur_mi_cols && alt2_mi_rows == cur_mi_rows &&
+ !alt2_intra_only) {
+ mf_types[ref_ind] = V4L2_AV1_REF_ALTREF2_FRAME;
+ refs_selected[ref_ind++] = ALT2_BUF_IDX;
+ ref_stamp--;
+ }
+ }
+
+ idx = rockchip_vpu981_get_frame_index(ctx, ALT_BUF_IDX);
+ if (rockchip_vpu981_av1_dec_get_dist(ctx, alt_frame_offset, cur_frame_offset) > 0 &&
+ ref_stamp >= 0) {
+ int alt_mi_cols = av1_dec->frame_refs[idx].mi_cols;
+ int alt_mi_rows = av1_dec->frame_refs[idx].mi_rows;
+ bool alt_intra_only =
+ IS_INTRA(av1_dec->frame_refs[idx].frame_type);
+
+ if (alt_mi_cols == cur_mi_cols && alt_mi_rows == cur_mi_rows &&
+ !alt_intra_only) {
+ mf_types[ref_ind] = V4L2_AV1_REF_ALTREF_FRAME;
+ refs_selected[ref_ind++] = ALT_BUF_IDX;
+ ref_stamp--;
+ }
+ }
+
+ idx = rockchip_vpu981_get_frame_index(ctx, LST2_BUF_IDX);
+ if (idx >= 0 && ref_stamp >= 0) {
+ int lst2_mi_cols = av1_dec->frame_refs[idx].mi_cols;
+ int lst2_mi_rows = av1_dec->frame_refs[idx].mi_rows;
+ bool lst2_intra_only =
+ IS_INTRA(av1_dec->frame_refs[idx].frame_type);
+
+ if (lst2_mi_cols == cur_mi_cols && lst2_mi_rows == cur_mi_rows &&
+ !lst2_intra_only) {
+ mf_types[ref_ind] = V4L2_AV1_REF_LAST2_FRAME;
+ refs_selected[ref_ind++] = LST2_BUF_IDX;
+ ref_stamp--;
+ }
+ }
+
+ for (rf = 0; rf < V4L2_AV1_TOTAL_REFS_PER_FRAME - 1; ++rf) {
+ idx = rockchip_vpu981_get_frame_index(ctx, rf);
+ if (idx >= 0) {
+ int rf_order_hint = rockchip_vpu981_get_order_hint(ctx, rf);
+
+ cur_offset[rf] =
+ rockchip_vpu981_av1_dec_get_dist(ctx, cur_frame_offset, rf_order_hint);
+ cur_roffset[rf] =
+ rockchip_vpu981_av1_dec_get_dist(ctx, rf_order_hint, cur_frame_offset);
+ } else {
+ cur_offset[rf] = 0;
+ cur_roffset[rf] = 0;
+ }
+ }
+
+ hantro_reg_write(vpu, &av1_use_temporal0_mvs, 0);
+ hantro_reg_write(vpu, &av1_use_temporal1_mvs, 0);
+ hantro_reg_write(vpu, &av1_use_temporal2_mvs, 0);
+ hantro_reg_write(vpu, &av1_use_temporal3_mvs, 0);
+
+ hantro_reg_write(vpu, &av1_mf1_last_offset, 0);
+ hantro_reg_write(vpu, &av1_mf1_last2_offset, 0);
+ hantro_reg_write(vpu, &av1_mf1_last3_offset, 0);
+ hantro_reg_write(vpu, &av1_mf1_golden_offset, 0);
+ hantro_reg_write(vpu, &av1_mf1_bwdref_offset, 0);
+ hantro_reg_write(vpu, &av1_mf1_altref2_offset, 0);
+ hantro_reg_write(vpu, &av1_mf1_altref_offset, 0);
+
+ if (use_ref_frame_mvs && ref_ind > 0 &&
+ cur_offset[mf_types[0] - V4L2_AV1_REF_LAST_FRAME] <= MAX_FRAME_DISTANCE &&
+ cur_offset[mf_types[0] - V4L2_AV1_REF_LAST_FRAME] >= -MAX_FRAME_DISTANCE) {
+ int rf = rockchip_vpu981_get_order_hint(ctx, refs_selected[0]);
+ int idx = rockchip_vpu981_get_frame_index(ctx, refs_selected[0]);
+ u32 *oh = av1_dec->frame_refs[idx].order_hints;
+ int val;
+
+ hantro_reg_write(vpu, &av1_use_temporal0_mvs, 1);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST_FRAME]);
+ hantro_reg_write(vpu, &av1_mf1_last_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST2_FRAME]);
+ hantro_reg_write(vpu, &av1_mf1_last2_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST3_FRAME]);
+ hantro_reg_write(vpu, &av1_mf1_last3_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_GOLDEN_FRAME]);
+ hantro_reg_write(vpu, &av1_mf1_golden_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_BWDREF_FRAME]);
+ hantro_reg_write(vpu, &av1_mf1_bwdref_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF2_FRAME]);
+ hantro_reg_write(vpu, &av1_mf1_altref2_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF_FRAME]);
+ hantro_reg_write(vpu, &av1_mf1_altref_offset, val);
+ }
+
+ hantro_reg_write(vpu, &av1_mf2_last_offset, 0);
+ hantro_reg_write(vpu, &av1_mf2_last2_offset, 0);
+ hantro_reg_write(vpu, &av1_mf2_last3_offset, 0);
+ hantro_reg_write(vpu, &av1_mf2_golden_offset, 0);
+ hantro_reg_write(vpu, &av1_mf2_bwdref_offset, 0);
+ hantro_reg_write(vpu, &av1_mf2_altref2_offset, 0);
+ hantro_reg_write(vpu, &av1_mf2_altref_offset, 0);
+
+ if (use_ref_frame_mvs && ref_ind > 1 &&
+ cur_offset[mf_types[1] - V4L2_AV1_REF_LAST_FRAME] <= MAX_FRAME_DISTANCE &&
+ cur_offset[mf_types[1] - V4L2_AV1_REF_LAST_FRAME] >= -MAX_FRAME_DISTANCE) {
+ int rf = rockchip_vpu981_get_order_hint(ctx, refs_selected[1]);
+ int idx = rockchip_vpu981_get_frame_index(ctx, refs_selected[1]);
+ u32 *oh = av1_dec->frame_refs[idx].order_hints;
+ int val;
+
+ hantro_reg_write(vpu, &av1_use_temporal1_mvs, 1);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST_FRAME]);
+ hantro_reg_write(vpu, &av1_mf2_last_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST2_FRAME]);
+ hantro_reg_write(vpu, &av1_mf2_last2_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST3_FRAME]);
+ hantro_reg_write(vpu, &av1_mf2_last3_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_GOLDEN_FRAME]);
+ hantro_reg_write(vpu, &av1_mf2_golden_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_BWDREF_FRAME]);
+ hantro_reg_write(vpu, &av1_mf2_bwdref_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF2_FRAME]);
+ hantro_reg_write(vpu, &av1_mf2_altref2_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF_FRAME]);
+ hantro_reg_write(vpu, &av1_mf2_altref_offset, val);
+ }
+
+ hantro_reg_write(vpu, &av1_mf3_last_offset, 0);
+ hantro_reg_write(vpu, &av1_mf3_last2_offset, 0);
+ hantro_reg_write(vpu, &av1_mf3_last3_offset, 0);
+ hantro_reg_write(vpu, &av1_mf3_golden_offset, 0);
+ hantro_reg_write(vpu, &av1_mf3_bwdref_offset, 0);
+ hantro_reg_write(vpu, &av1_mf3_altref2_offset, 0);
+ hantro_reg_write(vpu, &av1_mf3_altref_offset, 0);
+
+ if (use_ref_frame_mvs && ref_ind > 2 &&
+ cur_offset[mf_types[2] - V4L2_AV1_REF_LAST_FRAME] <= MAX_FRAME_DISTANCE &&
+ cur_offset[mf_types[2] - V4L2_AV1_REF_LAST_FRAME] >= -MAX_FRAME_DISTANCE) {
+ int rf = rockchip_vpu981_get_order_hint(ctx, refs_selected[2]);
+ int idx = rockchip_vpu981_get_frame_index(ctx, refs_selected[2]);
+ u32 *oh = av1_dec->frame_refs[idx].order_hints;
+ int val;
+
+ hantro_reg_write(vpu, &av1_use_temporal2_mvs, 1);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST_FRAME]);
+ hantro_reg_write(vpu, &av1_mf3_last_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST2_FRAME]);
+ hantro_reg_write(vpu, &av1_mf3_last2_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST3_FRAME]);
+ hantro_reg_write(vpu, &av1_mf3_last3_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_GOLDEN_FRAME]);
+ hantro_reg_write(vpu, &av1_mf3_golden_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_BWDREF_FRAME]);
+ hantro_reg_write(vpu, &av1_mf3_bwdref_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF2_FRAME]);
+ hantro_reg_write(vpu, &av1_mf3_altref2_offset, val);
+
+ val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF_FRAME]);
+ hantro_reg_write(vpu, &av1_mf3_altref_offset, val);
+ }
+
+ hantro_reg_write(vpu, &av1_cur_last_offset, cur_offset[0]);
+ hantro_reg_write(vpu, &av1_cur_last2_offset, cur_offset[1]);
+ hantro_reg_write(vpu, &av1_cur_last3_offset, cur_offset[2]);
+ hantro_reg_write(vpu, &av1_cur_golden_offset, cur_offset[3]);
+ hantro_reg_write(vpu, &av1_cur_bwdref_offset, cur_offset[4]);
+ hantro_reg_write(vpu, &av1_cur_altref2_offset, cur_offset[5]);
+ hantro_reg_write(vpu, &av1_cur_altref_offset, cur_offset[6]);
+
+ hantro_reg_write(vpu, &av1_cur_last_roffset, cur_roffset[0]);
+ hantro_reg_write(vpu, &av1_cur_last2_roffset, cur_roffset[1]);
+ hantro_reg_write(vpu, &av1_cur_last3_roffset, cur_roffset[2]);
+ hantro_reg_write(vpu, &av1_cur_golden_roffset, cur_roffset[3]);
+ hantro_reg_write(vpu, &av1_cur_bwdref_roffset, cur_roffset[4]);
+ hantro_reg_write(vpu, &av1_cur_altref2_roffset, cur_roffset[5]);
+ hantro_reg_write(vpu, &av1_cur_altref_roffset, cur_roffset[6]);
+
+ hantro_reg_write(vpu, &av1_mf1_type, mf_types[0] - V4L2_AV1_REF_LAST_FRAME);
+ hantro_reg_write(vpu, &av1_mf2_type, mf_types[1] - V4L2_AV1_REF_LAST_FRAME);
+ hantro_reg_write(vpu, &av1_mf3_type, mf_types[2] - V4L2_AV1_REF_LAST_FRAME);
+}
+
+static void rockchip_vpu981_av1_dec_set_reference_frames(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
+ int frame_type = frame->frame_type;
+ bool allow_intrabc = !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC);
+ int ref_count[AV1DEC_MAX_PIC_BUFFERS] = { 0 };
+ struct hantro_dev *vpu = ctx->dev;
+ int i, ref_frames = 0;
+ bool scale_enable = false;
+
+ if (IS_INTRA(frame_type) && !allow_intrabc)
+ return;
+
+ if (!allow_intrabc) {
+ for (i = 0; i < V4L2_AV1_REFS_PER_FRAME; i++) {
+ int idx = rockchip_vpu981_get_frame_index(ctx, i);
+
+ if (idx >= 0)
+ ref_count[idx]++;
+ }
+
+ for (i = 0; i < AV1DEC_MAX_PIC_BUFFERS; i++) {
+ if (ref_count[i])
+ ref_frames++;
+ }
+ } else {
+ ref_frames = 1;
+ }
+ hantro_reg_write(vpu, &av1_ref_frames, ref_frames);
+
+ rockchip_vpu981_av1_dec_set_frame_sign_bias(ctx);
+
+ for (i = V4L2_AV1_REF_LAST_FRAME; i < V4L2_AV1_TOTAL_REFS_PER_FRAME; i++) {
+ u32 ref = i - 1;
+ int idx = 0;
+ int width, height;
+
+ if (allow_intrabc) {
+ idx = av1_dec->current_frame_index;
+ width = frame->frame_width_minus_1 + 1;
+ height = frame->frame_height_minus_1 + 1;
+ } else {
+ if (rockchip_vpu981_get_frame_index(ctx, ref) > 0)
+ idx = rockchip_vpu981_get_frame_index(ctx, ref);
+ width = av1_dec->frame_refs[idx].width;
+ height = av1_dec->frame_refs[idx].height;
+ }
+
+ scale_enable |=
+ rockchip_vpu981_av1_dec_set_ref(ctx, ref, idx, width,
+ height);
+
+ rockchip_vpu981_av1_dec_set_sign_bias(ctx, ref,
+ av1_dec->ref_frame_sign_bias[i]);
+ }
+ hantro_reg_write(vpu, &av1_ref_scaling_enable, scale_enable);
+
+ hantro_reg_write(vpu, &av1_ref0_gm_mode,
+ frame->global_motion.type[V4L2_AV1_REF_LAST_FRAME]);
+ hantro_reg_write(vpu, &av1_ref1_gm_mode,
+ frame->global_motion.type[V4L2_AV1_REF_LAST2_FRAME]);
+ hantro_reg_write(vpu, &av1_ref2_gm_mode,
+ frame->global_motion.type[V4L2_AV1_REF_LAST3_FRAME]);
+ hantro_reg_write(vpu, &av1_ref3_gm_mode,
+ frame->global_motion.type[V4L2_AV1_REF_GOLDEN_FRAME]);
+ hantro_reg_write(vpu, &av1_ref4_gm_mode,
+ frame->global_motion.type[V4L2_AV1_REF_BWDREF_FRAME]);
+ hantro_reg_write(vpu, &av1_ref5_gm_mode,
+ frame->global_motion.type[V4L2_AV1_REF_ALTREF2_FRAME]);
+ hantro_reg_write(vpu, &av1_ref6_gm_mode,
+ frame->global_motion.type[V4L2_AV1_REF_ALTREF_FRAME]);
+
+ rockchip_vpu981_av1_dec_set_other_frames(ctx);
+}
+
+static void rockchip_vpu981_av1_dec_set_parameters(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+
+ hantro_reg_write(vpu, &av1_skip_mode,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT));
+ hantro_reg_write(vpu, &av1_tempor_mvp_e,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS));
+ hantro_reg_write(vpu, &av1_delta_lf_res_log,
+ ctrls->frame->loop_filter.delta_lf_res);
+ hantro_reg_write(vpu, &av1_delta_lf_multi,
+ !!(ctrls->frame->loop_filter.flags
+ & V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_MULTI));
+ hantro_reg_write(vpu, &av1_delta_lf_present,
+ !!(ctrls->frame->loop_filter.flags
+ & V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_PRESENT));
+ hantro_reg_write(vpu, &av1_disable_cdf_update,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE));
+ hantro_reg_write(vpu, &av1_allow_warp,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION));
+ hantro_reg_write(vpu, &av1_show_frame,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_SHOW_FRAME));
+ hantro_reg_write(vpu, &av1_switchable_motion_mode,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE));
+ hantro_reg_write(vpu, &av1_enable_cdef,
+ !!(ctrls->sequence->flags & V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF));
+ hantro_reg_write(vpu, &av1_allow_masked_compound,
+ !!(ctrls->sequence->flags
+ & V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND));
+ hantro_reg_write(vpu, &av1_allow_interintra,
+ !!(ctrls->sequence->flags
+ & V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND));
+ hantro_reg_write(vpu, &av1_enable_intra_edge_filter,
+ !!(ctrls->sequence->flags
+ & V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER));
+ hantro_reg_write(vpu, &av1_allow_filter_intra,
+ !!(ctrls->sequence->flags & V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA));
+ hantro_reg_write(vpu, &av1_enable_jnt_comp,
+ !!(ctrls->sequence->flags & V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP));
+ hantro_reg_write(vpu, &av1_enable_dual_filter,
+ !!(ctrls->sequence->flags & V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER));
+ hantro_reg_write(vpu, &av1_reduced_tx_set_used,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET));
+ hantro_reg_write(vpu, &av1_allow_screen_content_tools,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS));
+ hantro_reg_write(vpu, &av1_allow_intrabc,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC));
+
+ if (!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS))
+ hantro_reg_write(vpu, &av1_force_interger_mv, 0);
+ else
+ hantro_reg_write(vpu, &av1_force_interger_mv,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV));
+
+ hantro_reg_write(vpu, &av1_blackwhite_e, 0);
+ hantro_reg_write(vpu, &av1_delta_q_res_log, ctrls->frame->quantization.delta_q_res);
+ hantro_reg_write(vpu, &av1_delta_q_present,
+ !!(ctrls->frame->quantization.flags
+ & V4L2_AV1_QUANTIZATION_FLAG_DELTA_Q_PRESENT));
+
+ hantro_reg_write(vpu, &av1_idr_pic_e, !ctrls->frame->frame_type);
+ hantro_reg_write(vpu, &av1_quant_base_qindex, ctrls->frame->quantization.base_q_idx);
+ hantro_reg_write(vpu, &av1_bit_depth_y_minus8, ctx->bit_depth - 8);
+ hantro_reg_write(vpu, &av1_bit_depth_c_minus8, ctx->bit_depth - 8);
+
+ hantro_reg_write(vpu, &av1_mcomp_filt_type, ctrls->frame->interpolation_filter);
+ hantro_reg_write(vpu, &av1_high_prec_mv_e,
+ !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV));
+ hantro_reg_write(vpu, &av1_comp_pred_mode,
+ (ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT) ? 2 : 0);
+ hantro_reg_write(vpu, &av1_transform_mode, (ctrls->frame->tx_mode == 1) ? 3 : 4);
+ hantro_reg_write(vpu, &av1_max_cb_size,
+ (ctrls->sequence->flags
+ & V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK) ? 7 : 6);
+ hantro_reg_write(vpu, &av1_min_cb_size, 3);
+
+ hantro_reg_write(vpu, &av1_comp_pred_fixed_ref, 0);
+ hantro_reg_write(vpu, &av1_comp_pred_var_ref0_av1, 0);
+ hantro_reg_write(vpu, &av1_comp_pred_var_ref1_av1, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg0, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg1, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg2, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg3, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg4, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg5, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg6, 0);
+ hantro_reg_write(vpu, &av1_filt_level_seg7, 0);
+
+ hantro_reg_write(vpu, &av1_qp_delta_y_dc_av1, ctrls->frame->quantization.delta_q_y_dc);
+ hantro_reg_write(vpu, &av1_qp_delta_ch_dc_av1, ctrls->frame->quantization.delta_q_u_dc);
+ hantro_reg_write(vpu, &av1_qp_delta_ch_ac_av1, ctrls->frame->quantization.delta_q_u_ac);
+ if (ctrls->frame->quantization.flags & V4L2_AV1_QUANTIZATION_FLAG_USING_QMATRIX) {
+ hantro_reg_write(vpu, &av1_qmlevel_y, ctrls->frame->quantization.qm_y);
+ hantro_reg_write(vpu, &av1_qmlevel_u, ctrls->frame->quantization.qm_u);
+ hantro_reg_write(vpu, &av1_qmlevel_v, ctrls->frame->quantization.qm_v);
+ } else {
+ hantro_reg_write(vpu, &av1_qmlevel_y, 0xff);
+ hantro_reg_write(vpu, &av1_qmlevel_u, 0xff);
+ hantro_reg_write(vpu, &av1_qmlevel_v, 0xff);
+ }
+
+ hantro_reg_write(vpu, &av1_lossless_e, rockchip_vpu981_av1_dec_is_lossless(ctx));
+ hantro_reg_write(vpu, &av1_quant_delta_v_dc, ctrls->frame->quantization.delta_q_v_dc);
+ hantro_reg_write(vpu, &av1_quant_delta_v_ac, ctrls->frame->quantization.delta_q_v_ac);
+
+ hantro_reg_write(vpu, &av1_skip_ref0,
+ (ctrls->frame->skip_mode_frame[0]) ? ctrls->frame->skip_mode_frame[0] : 1);
+ hantro_reg_write(vpu, &av1_skip_ref1,
+ (ctrls->frame->skip_mode_frame[1]) ? ctrls->frame->skip_mode_frame[1] : 1);
+
+ hantro_write_addr(vpu, AV1_MC_SYNC_CURR, av1_dec->tile_buf.dma);
+ hantro_write_addr(vpu, AV1_MC_SYNC_LEFT, av1_dec->tile_buf.dma);
+}
+
+static void
+rockchip_vpu981_av1_dec_set_input_buffer(struct hantro_ctx *ctx,
+ struct vb2_v4l2_buffer *vb2_src)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
+ const struct v4l2_ctrl_av1_tile_group_entry *group_entry =
+ ctrls->tile_group_entry;
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t src_dma;
+ u32 src_len, src_buf_len;
+ int start_bit, offset;
+
+ src_dma = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
+ src_len = vb2_get_plane_payload(&vb2_src->vb2_buf, 0);
+ src_buf_len = vb2_plane_size(&vb2_src->vb2_buf, 0);
+
+ start_bit = (group_entry[0].tile_offset & 0xf) * 8;
+ offset = group_entry[0].tile_offset & ~0xf;
+
+ hantro_reg_write(vpu, &av1_strm_buffer_len, src_buf_len);
+ hantro_reg_write(vpu, &av1_strm_start_bit, start_bit);
+ hantro_reg_write(vpu, &av1_stream_len, src_len);
+ hantro_reg_write(vpu, &av1_strm_start_offset, 0);
+ hantro_write_addr(vpu, AV1_INPUT_STREAM, src_dma + offset);
+}
+
+static void
+rockchip_vpu981_av1_dec_set_output_buffer(struct hantro_ctx *ctx)
+{
+ struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_decoded_buffer *dst;
+ struct vb2_v4l2_buffer *vb2_dst;
+ dma_addr_t luma_addr, chroma_addr, mv_addr = 0;
+ size_t cr_offset = rockchip_vpu981_av1_dec_luma_size(ctx);
+ size_t mv_offset = rockchip_vpu981_av1_dec_chroma_size(ctx);
+
+ vb2_dst = av1_dec->frame_refs[av1_dec->current_frame_index].vb2_ref;
+ dst = vb2_to_hantro_decoded_buf(&vb2_dst->vb2_buf);
+ luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
+ chroma_addr = luma_addr + cr_offset;
+ mv_addr = luma_addr + mv_offset;
+
+ hantro_write_addr(vpu, AV1_TILE_OUT_LU, luma_addr);
+ hantro_write_addr(vpu, AV1_TILE_OUT_CH, chroma_addr);
+ hantro_write_addr(vpu, AV1_TILE_OUT_MV, mv_addr);
+}
+
+int rockchip_vpu981_av1_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_src;
+ int ret;
+
+ hantro_start_prepare_run(ctx);
+
+ ret = rockchip_vpu981_av1_dec_prepare_run(ctx);
+ if (ret)
+ goto prepare_error;
+
+ vb2_src = hantro_get_src_buf(ctx);
+ if (!vb2_src) {
+ ret = -EINVAL;
+ goto prepare_error;
+ }
+
+ rockchip_vpu981_av1_dec_clean_refs(ctx);
+ rockchip_vpu981_av1_dec_frame_ref(ctx, vb2_src->vb2_buf.timestamp);
+
+ rockchip_vpu981_av1_dec_set_parameters(ctx);
+ rockchip_vpu981_av1_dec_set_global_model(ctx);
+ rockchip_vpu981_av1_dec_set_tile_info(ctx);
+ rockchip_vpu981_av1_dec_set_reference_frames(ctx);
+ rockchip_vpu981_av1_dec_set_segmentation(ctx);
+ rockchip_vpu981_av1_dec_set_loopfilter(ctx);
+ rockchip_vpu981_av1_dec_set_picture_dimensions(ctx);
+ rockchip_vpu981_av1_dec_set_cdef(ctx);
+ rockchip_vpu981_av1_dec_set_lr(ctx);
+ rockchip_vpu981_av1_dec_set_fgs(ctx);
+ rockchip_vpu981_av1_dec_set_prob(ctx);
+
+ hantro_reg_write(vpu, &av1_dec_mode, AV1_DEC_MODE);
+ hantro_reg_write(vpu, &av1_dec_out_ec_byte_word, 0);
+ hantro_reg_write(vpu, &av1_write_mvs_e, 1);
+ hantro_reg_write(vpu, &av1_dec_out_ec_bypass, 1);
+ hantro_reg_write(vpu, &av1_dec_clk_gate_e, 1);
+
+ hantro_reg_write(vpu, &av1_dec_abort_e, 0);
+ hantro_reg_write(vpu, &av1_dec_tile_int_e, 0);
+
+ hantro_reg_write(vpu, &av1_dec_alignment, 64);
+ hantro_reg_write(vpu, &av1_apf_disable, 0);
+ hantro_reg_write(vpu, &av1_apf_threshold, 8);
+ hantro_reg_write(vpu, &av1_dec_buswidth, 2);
+ hantro_reg_write(vpu, &av1_dec_max_burst, 16);
+ hantro_reg_write(vpu, &av1_error_conceal_e, 0);
+ hantro_reg_write(vpu, &av1_axi_rd_ostd_threshold, 64);
+ hantro_reg_write(vpu, &av1_axi_wr_ostd_threshold, 64);
+
+ hantro_reg_write(vpu, &av1_ext_timeout_cycles, 0xfffffff);
+ hantro_reg_write(vpu, &av1_ext_timeout_override_e, 1);
+ hantro_reg_write(vpu, &av1_timeout_cycles, 0xfffffff);
+ hantro_reg_write(vpu, &av1_timeout_override_e, 1);
+
+ rockchip_vpu981_av1_dec_set_output_buffer(ctx);
+ rockchip_vpu981_av1_dec_set_input_buffer(ctx, vb2_src);
+
+ hantro_end_prepare_run(ctx);
+
+ hantro_reg_write(vpu, &av1_dec_e, 1);
+
+ return 0;
+
+prepare_error:
+ hantro_end_prepare_run(ctx);
+ hantro_irq_done(vpu, VB2_BUF_STATE_ERROR);
+ return ret;
+}
+
+static void rockchip_vpu981_postproc_enable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ int width = ctx->dst_fmt.width;
+ int height = ctx->dst_fmt.height;
+ struct vb2_v4l2_buffer *vb2_dst;
+ size_t chroma_offset;
+ dma_addr_t dst_dma;
+
+ vb2_dst = hantro_get_dst_buf(ctx);
+
+ dst_dma = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ chroma_offset = ctx->dst_fmt.plane_fmt[0].bytesperline *
+ ctx->dst_fmt.height;
+
+ /* enable post processor */
+ hantro_reg_write(vpu, &av1_pp_out_e, 1);
+ hantro_reg_write(vpu, &av1_pp_in_format, 0);
+ hantro_reg_write(vpu, &av1_pp0_dup_hor, 1);
+ hantro_reg_write(vpu, &av1_pp0_dup_ver, 1);
+
+ hantro_reg_write(vpu, &av1_pp_in_height, height / 2);
+ hantro_reg_write(vpu, &av1_pp_in_width, width / 2);
+ hantro_reg_write(vpu, &av1_pp_out_height, height);
+ hantro_reg_write(vpu, &av1_pp_out_width, width);
+ hantro_reg_write(vpu, &av1_pp_out_y_stride,
+ ctx->dst_fmt.plane_fmt[0].bytesperline);
+ hantro_reg_write(vpu, &av1_pp_out_c_stride,
+ ctx->dst_fmt.plane_fmt[0].bytesperline);
+ switch (ctx->dst_fmt.pixelformat) {
+ case V4L2_PIX_FMT_P010:
+ hantro_reg_write(vpu, &av1_pp_out_format, 1);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ hantro_reg_write(vpu, &av1_pp_out_format, 3);
+ break;
+ default:
+ hantro_reg_write(vpu, &av1_pp_out_format, 0);
+ }
+
+ hantro_reg_write(vpu, &av1_ppd_blend_exist, 0);
+ hantro_reg_write(vpu, &av1_ppd_dith_exist, 0);
+ hantro_reg_write(vpu, &av1_ablend_crop_e, 0);
+ hantro_reg_write(vpu, &av1_pp_format_customer1_e, 0);
+ hantro_reg_write(vpu, &av1_pp_crop_exist, 0);
+ hantro_reg_write(vpu, &av1_pp_up_level, 0);
+ hantro_reg_write(vpu, &av1_pp_down_level, 0);
+ hantro_reg_write(vpu, &av1_pp_exist, 0);
+
+ hantro_write_addr(vpu, AV1_PP_OUT_LU, dst_dma);
+ hantro_write_addr(vpu, AV1_PP_OUT_CH, dst_dma + chroma_offset);
+}
+
+static void rockchip_vpu981_postproc_disable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ /* disable post processor */
+ hantro_reg_write(vpu, &av1_pp_out_e, 0);
+}
+
+const struct hantro_postproc_ops rockchip_vpu981_postproc_ops = {
+ .enable = rockchip_vpu981_postproc_enable,
+ .disable = rockchip_vpu981_postproc_disable,
+};
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h b/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h
new file mode 100644
index 0000000000..182e6c830f
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu981_regs.h
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Collabora
+ *
+ * Author: Benjamin Gaignard <benjamin.gaignard@collabora.com>
+ */
+
+#ifndef _ROCKCHIP_VPU981_REGS_H_
+#define _ROCKCHIP_VPU981_REGS_H_
+
+#include "hantro.h"
+
+#define AV1_SWREG(nr) ((nr) * 4)
+
+#define AV1_DEC_REG(b, s, m) \
+ ((const struct hantro_reg) { \
+ .base = AV1_SWREG(b), \
+ .shift = s, \
+ .mask = m, \
+ })
+
+#define AV1_REG_INTERRUPT AV1_SWREG(1)
+#define AV1_REG_INTERRUPT_DEC_RDY_INT BIT(12)
+
+#define AV1_REG_CONFIG AV1_SWREG(2)
+#define AV1_REG_CONFIG_DEC_CLK_GATE_E BIT(10)
+
+#define av1_dec_e AV1_DEC_REG(1, 0, 0x1)
+#define av1_dec_abort_e AV1_DEC_REG(1, 5, 0x1)
+#define av1_dec_tile_int_e AV1_DEC_REG(1, 7, 0x1)
+
+#define av1_dec_clk_gate_e AV1_DEC_REG(2, 10, 0x1)
+
+#define av1_dec_out_ec_bypass AV1_DEC_REG(3, 8, 0x1)
+#define av1_write_mvs_e AV1_DEC_REG(3, 12, 0x1)
+#define av1_filtering_dis AV1_DEC_REG(3, 14, 0x1)
+#define av1_dec_out_dis AV1_DEC_REG(3, 15, 0x1)
+#define av1_dec_out_ec_byte_word AV1_DEC_REG(3, 16, 0x1)
+#define av1_skip_mode AV1_DEC_REG(3, 26, 0x1)
+#define av1_dec_mode AV1_DEC_REG(3, 27, 0x1f)
+
+#define av1_ref_frames AV1_DEC_REG(4, 0, 0xf)
+#define av1_pic_height_in_cbs AV1_DEC_REG(4, 6, 0x1fff)
+#define av1_pic_width_in_cbs AV1_DEC_REG(4, 19, 0x1fff)
+
+#define av1_ref_scaling_enable AV1_DEC_REG(5, 0, 0x1)
+#define av1_filt_level_base_gt32 AV1_DEC_REG(5, 1, 0x1)
+#define av1_error_resilient AV1_DEC_REG(5, 2, 0x1)
+#define av1_force_interger_mv AV1_DEC_REG(5, 3, 0x1)
+#define av1_allow_intrabc AV1_DEC_REG(5, 4, 0x1)
+#define av1_allow_screen_content_tools AV1_DEC_REG(5, 5, 0x1)
+#define av1_reduced_tx_set_used AV1_DEC_REG(5, 6, 0x1)
+#define av1_enable_dual_filter AV1_DEC_REG(5, 7, 0x1)
+#define av1_enable_jnt_comp AV1_DEC_REG(5, 8, 0x1)
+#define av1_allow_filter_intra AV1_DEC_REG(5, 9, 0x1)
+#define av1_enable_intra_edge_filter AV1_DEC_REG(5, 10, 0x1)
+#define av1_tempor_mvp_e AV1_DEC_REG(5, 11, 0x1)
+#define av1_allow_interintra AV1_DEC_REG(5, 12, 0x1)
+#define av1_allow_masked_compound AV1_DEC_REG(5, 13, 0x1)
+#define av1_enable_cdef AV1_DEC_REG(5, 14, 0x1)
+#define av1_switchable_motion_mode AV1_DEC_REG(5, 15, 0x1)
+#define av1_show_frame AV1_DEC_REG(5, 16, 0x1)
+#define av1_superres_is_scaled AV1_DEC_REG(5, 17, 0x1)
+#define av1_allow_warp AV1_DEC_REG(5, 18, 0x1)
+#define av1_disable_cdf_update AV1_DEC_REG(5, 19, 0x1)
+#define av1_preskip_segid AV1_DEC_REG(5, 20, 0x1)
+#define av1_delta_lf_present AV1_DEC_REG(5, 21, 0x1)
+#define av1_delta_lf_multi AV1_DEC_REG(5, 22, 0x1)
+#define av1_delta_lf_res_log AV1_DEC_REG(5, 23, 0x3)
+#define av1_strm_start_bit AV1_DEC_REG(5, 25, 0x7f)
+
+#define av1_stream_len AV1_DEC_REG(6, 0, 0xffffffff)
+
+#define av1_delta_q_present AV1_DEC_REG(7, 0, 0x1)
+#define av1_delta_q_res_log AV1_DEC_REG(7, 1, 0x3)
+#define av1_cdef_damping AV1_DEC_REG(7, 3, 0x3)
+#define av1_cdef_bits AV1_DEC_REG(7, 5, 0x3)
+#define av1_apply_grain AV1_DEC_REG(7, 7, 0x1)
+#define av1_num_y_points_b AV1_DEC_REG(7, 8, 0x1)
+#define av1_num_cb_points_b AV1_DEC_REG(7, 9, 0x1)
+#define av1_num_cr_points_b AV1_DEC_REG(7, 10, 0x1)
+#define av1_overlap_flag AV1_DEC_REG(7, 11, 0x1)
+#define av1_clip_to_restricted_range AV1_DEC_REG(7, 12, 0x1)
+#define av1_chroma_scaling_from_luma AV1_DEC_REG(7, 13, 0x1)
+#define av1_random_seed AV1_DEC_REG(7, 14, 0xffff)
+#define av1_blackwhite_e AV1_DEC_REG(7, 30, 0x1)
+
+#define av1_scaling_shift AV1_DEC_REG(8, 0, 0xf)
+#define av1_bit_depth_c_minus8 AV1_DEC_REG(8, 4, 0x3)
+#define av1_bit_depth_y_minus8 AV1_DEC_REG(8, 6, 0x3)
+#define av1_quant_base_qindex AV1_DEC_REG(8, 8, 0xff)
+#define av1_idr_pic_e AV1_DEC_REG(8, 16, 0x1)
+#define av1_superres_pic_width AV1_DEC_REG(8, 17, 0x7fff)
+
+#define av1_ref4_sign_bias AV1_DEC_REG(9, 2, 0x1)
+#define av1_ref5_sign_bias AV1_DEC_REG(9, 3, 0x1)
+#define av1_ref6_sign_bias AV1_DEC_REG(9, 4, 0x1)
+#define av1_mf1_type AV1_DEC_REG(9, 5, 0x7)
+#define av1_mf2_type AV1_DEC_REG(9, 8, 0x7)
+#define av1_mf3_type AV1_DEC_REG(9, 11, 0x7)
+#define av1_scale_denom_minus9 AV1_DEC_REG(9, 14, 0x7)
+#define av1_last_active_seg AV1_DEC_REG(9, 17, 0x7)
+#define av1_context_update_tile_id AV1_DEC_REG(9, 20, 0xfff)
+
+#define av1_tile_transpose AV1_DEC_REG(10, 0, 0x1)
+#define av1_tile_enable AV1_DEC_REG(10, 1, 0x1)
+#define av1_multicore_full_width AV1_DEC_REG(10, 2, 0xff)
+#define av1_num_tile_rows_8k AV1_DEC_REG(10, 10, 0x7f)
+#define av1_num_tile_cols_8k AV1_DEC_REG(10, 17, 0x7f)
+#define av1_multicore_tile_start_x AV1_DEC_REG(10, 24, 0xff)
+
+#define av1_use_temporal3_mvs AV1_DEC_REG(11, 0, 0x1)
+#define av1_use_temporal2_mvs AV1_DEC_REG(11, 1, 0x1)
+#define av1_use_temporal1_mvs AV1_DEC_REG(11, 2, 0x1)
+#define av1_use_temporal0_mvs AV1_DEC_REG(11, 3, 0x1)
+#define av1_comp_pred_mode AV1_DEC_REG(11, 4, 0x3)
+#define av1_high_prec_mv_e AV1_DEC_REG(11, 7, 0x1)
+#define av1_mcomp_filt_type AV1_DEC_REG(11, 8, 0x7)
+#define av1_multicore_expect_context_update AV1_DEC_REG(11, 11, 0x1)
+#define av1_multicore_sbx_offset AV1_DEC_REG(11, 12, 0x7f)
+#define av1_ulticore_tile_col AV1_DEC_REG(11, 19, 0x7f)
+#define av1_transform_mode AV1_DEC_REG(11, 27, 0x7)
+#define av1_dec_tile_size_mag AV1_DEC_REG(11, 30, 0x3)
+
+#define av1_seg_quant_sign AV1_DEC_REG(12, 2, 0xff)
+#define av1_max_cb_size AV1_DEC_REG(12, 10, 0x7)
+#define av1_min_cb_size AV1_DEC_REG(12, 13, 0x7)
+#define av1_comp_pred_fixed_ref AV1_DEC_REG(12, 16, 0x7)
+#define av1_multicore_tile_width AV1_DEC_REG(12, 19, 0x7f)
+#define av1_pic_height_pad AV1_DEC_REG(12, 26, 0x7)
+#define av1_pic_width_pad AV1_DEC_REG(12, 29, 0x7)
+
+#define av1_segment_e AV1_DEC_REG(13, 0, 0x1)
+#define av1_segment_upd_e AV1_DEC_REG(13, 1, 0x1)
+#define av1_segment_temp_upd_e AV1_DEC_REG(13, 2, 0x1)
+#define av1_comp_pred_var_ref0_av1 AV1_DEC_REG(13, 3, 0x7)
+#define av1_comp_pred_var_ref1_av1 AV1_DEC_REG(13, 6, 0x7)
+#define av1_lossless_e AV1_DEC_REG(13, 9, 0x1)
+#define av1_qp_delta_ch_ac_av1 AV1_DEC_REG(13, 11, 0x7f)
+#define av1_qp_delta_ch_dc_av1 AV1_DEC_REG(13, 18, 0x7f)
+#define av1_qp_delta_y_dc_av1 AV1_DEC_REG(13, 25, 0x7f)
+
+#define av1_quant_seg0 AV1_DEC_REG(14, 0, 0xff)
+#define av1_filt_level_seg0 AV1_DEC_REG(14, 8, 0x3f)
+#define av1_skip_seg0 AV1_DEC_REG(14, 14, 0x1)
+#define av1_refpic_seg0 AV1_DEC_REG(14, 15, 0xf)
+#define av1_filt_level_delta0_seg0 AV1_DEC_REG(14, 19, 0x7f)
+#define av1_filt_level0 AV1_DEC_REG(14, 26, 0x3f)
+
+#define av1_quant_seg1 AV1_DEC_REG(15, 0, 0xff)
+#define av1_filt_level_seg1 AV1_DEC_REG(15, 8, 0x3f)
+#define av1_skip_seg1 AV1_DEC_REG(15, 14, 0x1)
+#define av1_refpic_seg1 AV1_DEC_REG(15, 15, 0xf)
+#define av1_filt_level_delta0_seg1 AV1_DEC_REG(15, 19, 0x7f)
+#define av1_filt_level1 AV1_DEC_REG(15, 26, 0x3f)
+
+#define av1_quant_seg2 AV1_DEC_REG(16, 0, 0xff)
+#define av1_filt_level_seg2 AV1_DEC_REG(16, 8, 0x3f)
+#define av1_skip_seg2 AV1_DEC_REG(16, 14, 0x1)
+#define av1_refpic_seg2 AV1_DEC_REG(16, 15, 0xf)
+#define av1_filt_level_delta0_seg2 AV1_DEC_REG(16, 19, 0x7f)
+#define av1_filt_level2 AV1_DEC_REG(16, 26, 0x3f)
+
+#define av1_quant_seg3 AV1_DEC_REG(17, 0, 0xff)
+#define av1_filt_level_seg3 AV1_DEC_REG(17, 8, 0x3f)
+#define av1_skip_seg3 AV1_DEC_REG(17, 14, 0x1)
+#define av1_refpic_seg3 AV1_DEC_REG(17, 15, 0xf)
+#define av1_filt_level_delta0_seg3 AV1_DEC_REG(17, 19, 0x7f)
+#define av1_filt_level3 AV1_DEC_REG(17, 26, 0x3f)
+
+#define av1_quant_seg4 AV1_DEC_REG(18, 0, 0xff)
+#define av1_filt_level_seg4 AV1_DEC_REG(18, 8, 0x3f)
+#define av1_skip_seg4 AV1_DEC_REG(18, 14, 0x1)
+#define av1_refpic_seg4 AV1_DEC_REG(18, 15, 0xf)
+#define av1_filt_level_delta0_seg4 AV1_DEC_REG(18, 19, 0x7f)
+#define av1_lr_type AV1_DEC_REG(18, 26, 0x3f)
+
+#define av1_quant_seg5 AV1_DEC_REG(19, 0, 0xff)
+#define av1_filt_level_seg5 AV1_DEC_REG(19, 8, 0x3f)
+#define av1_skip_seg5 AV1_DEC_REG(19, 14, 0x1)
+#define av1_refpic_seg5 AV1_DEC_REG(19, 15, 0xf)
+#define av1_filt_level_delta0_seg5 AV1_DEC_REG(19, 19, 0x7f)
+#define av1_lr_unit_size AV1_DEC_REG(19, 26, 0x3f)
+
+#define av1_filt_level_delta1_seg0 AV1_DEC_REG(20, 0, 0x7f)
+#define av1_filt_level_delta2_seg0 AV1_DEC_REG(20, 7, 0x7f)
+#define av1_filt_level_delta3_seg0 AV1_DEC_REG(20, 14, 0x7f)
+#define av1_global_mv_seg0 AV1_DEC_REG(20, 21, 0x1)
+#define av1_mf1_last_offset AV1_DEC_REG(20, 22, 0x1ff)
+
+#define av1_filt_level_delta1_seg1 AV1_DEC_REG(21, 0, 0x7f)
+#define av1_filt_level_delta2_seg1 AV1_DEC_REG(21, 7, 0x7f)
+#define av1_filt_level_delta3_seg1 AV1_DEC_REG(21, 14, 0x7f)
+#define av1_global_mv_seg1 AV1_DEC_REG(21, 21, 0x1)
+#define av1_mf1_last2_offset AV1_DEC_REG(21, 22, 0x1ff)
+
+#define av1_filt_level_delta1_seg2 AV1_DEC_REG(22, 0, 0x7f)
+#define av1_filt_level_delta2_seg2 AV1_DEC_REG(22, 7, 0x7f)
+#define av1_filt_level_delta3_seg2 AV1_DEC_REG(22, 14, 0x7f)
+#define av1_global_mv_seg2 AV1_DEC_REG(22, 21, 0x1)
+#define av1_mf1_last3_offset AV1_DEC_REG(22, 22, 0x1ff)
+
+#define av1_filt_level_delta1_seg3 AV1_DEC_REG(23, 0, 0x7f)
+#define av1_filt_level_delta2_seg3 AV1_DEC_REG(23, 7, 0x7f)
+#define av1_filt_level_delta3_seg3 AV1_DEC_REG(23, 14, 0x7f)
+#define av1_global_mv_seg3 AV1_DEC_REG(23, 21, 0x1)
+#define av1_mf1_golden_offset AV1_DEC_REG(23, 22, 0x1ff)
+
+#define av1_filt_level_delta1_seg4 AV1_DEC_REG(24, 0, 0x7f)
+#define av1_filt_level_delta2_seg4 AV1_DEC_REG(24, 7, 0x7f)
+#define av1_filt_level_delta3_seg4 AV1_DEC_REG(24, 14, 0x7f)
+#define av1_global_mv_seg4 AV1_DEC_REG(24, 21, 0x1)
+#define av1_mf1_bwdref_offset AV1_DEC_REG(24, 22, 0x1ff)
+
+#define av1_filt_level_delta1_seg5 AV1_DEC_REG(25, 0, 0x7f)
+#define av1_filt_level_delta2_seg5 AV1_DEC_REG(25, 7, 0x7f)
+#define av1_filt_level_delta3_seg5 AV1_DEC_REG(25, 14, 0x7f)
+#define av1_global_mv_seg5 AV1_DEC_REG(25, 21, 0x1)
+#define av1_mf1_altref2_offset AV1_DEC_REG(25, 22, 0x1ff)
+
+#define av1_filt_level_delta1_seg6 AV1_DEC_REG(26, 0, 0x7f)
+#define av1_filt_level_delta2_seg6 AV1_DEC_REG(26, 7, 0x7f)
+#define av1_filt_level_delta3_seg6 AV1_DEC_REG(26, 14, 0x7f)
+#define av1_global_mv_seg6 AV1_DEC_REG(26, 21, 0x1)
+#define av1_mf1_altref_offset AV1_DEC_REG(26, 22, 0x1ff)
+
+#define av1_filt_level_delta1_seg7 AV1_DEC_REG(27, 0, 0x7f)
+#define av1_filt_level_delta2_seg7 AV1_DEC_REG(27, 7, 0x7f)
+#define av1_filt_level_delta3_seg7 AV1_DEC_REG(27, 14, 0x7f)
+#define av1_global_mv_seg7 AV1_DEC_REG(27, 21, 0x1)
+#define av1_mf2_last_offset AV1_DEC_REG(27, 22, 0x1ff)
+
+#define av1_cb_offset AV1_DEC_REG(28, 0, 0x1ff)
+#define av1_cb_luma_mult AV1_DEC_REG(28, 9, 0xff)
+#define av1_cb_mult AV1_DEC_REG(28, 17, 0xff)
+#define av1_quant_delta_v_dc AV1_DEC_REG(28, 25, 0x7f)
+
+#define av1_cr_offset AV1_DEC_REG(29, 0, 0x1ff)
+#define av1_cr_luma_mult AV1_DEC_REG(29, 9, 0xff)
+#define av1_cr_mult AV1_DEC_REG(29, 17, 0xff)
+#define av1_quant_delta_v_ac AV1_DEC_REG(29, 25, 0x7f)
+
+#define av1_filt_ref_adj_5 AV1_DEC_REG(30, 0, 0x7f)
+#define av1_filt_ref_adj_4 AV1_DEC_REG(30, 7, 0x7f)
+#define av1_filt_mb_adj_1 AV1_DEC_REG(30, 14, 0x7f)
+#define av1_filt_mb_adj_0 AV1_DEC_REG(30, 21, 0x7f)
+#define av1_filt_sharpness AV1_DEC_REG(30, 28, 0x7)
+
+#define av1_quant_seg6 AV1_DEC_REG(31, 0, 0xff)
+#define av1_filt_level_seg6 AV1_DEC_REG(31, 8, 0x3f)
+#define av1_skip_seg6 AV1_DEC_REG(31, 14, 0x1)
+#define av1_refpic_seg6 AV1_DEC_REG(31, 15, 0xf)
+#define av1_filt_level_delta0_seg6 AV1_DEC_REG(31, 19, 0x7f)
+#define av1_skip_ref0 AV1_DEC_REG(31, 26, 0xf)
+
+#define av1_quant_seg7 AV1_DEC_REG(32, 0, 0xff)
+#define av1_filt_level_seg7 AV1_DEC_REG(32, 8, 0x3f)
+#define av1_skip_seg7 AV1_DEC_REG(32, 14, 0x1)
+#define av1_refpic_seg7 AV1_DEC_REG(32, 15, 0xf)
+#define av1_filt_level_delta0_seg7 AV1_DEC_REG(32, 19, 0x7f)
+#define av1_skip_ref1 AV1_DEC_REG(32, 26, 0xf)
+
+#define av1_ref0_height AV1_DEC_REG(33, 0, 0xffff)
+#define av1_ref0_width AV1_DEC_REG(33, 16, 0xffff)
+
+#define av1_ref1_height AV1_DEC_REG(34, 0, 0xffff)
+#define av1_ref1_width AV1_DEC_REG(34, 16, 0xffff)
+
+#define av1_ref2_height AV1_DEC_REG(35, 0, 0xffff)
+#define av1_ref2_width AV1_DEC_REG(35, 16, 0xffff)
+
+#define av1_ref0_ver_scale AV1_DEC_REG(36, 0, 0xffff)
+#define av1_ref0_hor_scale AV1_DEC_REG(36, 16, 0xffff)
+
+#define av1_ref1_ver_scale AV1_DEC_REG(37, 0, 0xffff)
+#define av1_ref1_hor_scale AV1_DEC_REG(37, 16, 0xffff)
+
+#define av1_ref2_ver_scale AV1_DEC_REG(38, 0, 0xffff)
+#define av1_ref2_hor_scale AV1_DEC_REG(38, 16, 0xffff)
+
+#define av1_ref3_ver_scale AV1_DEC_REG(39, 0, 0xffff)
+#define av1_ref3_hor_scale AV1_DEC_REG(39, 16, 0xffff)
+
+#define av1_ref4_ver_scale AV1_DEC_REG(40, 0, 0xffff)
+#define av1_ref4_hor_scale AV1_DEC_REG(40, 16, 0xffff)
+
+#define av1_ref5_ver_scale AV1_DEC_REG(41, 0, 0xffff)
+#define av1_ref5_hor_scale AV1_DEC_REG(41, 16, 0xffff)
+
+#define av1_ref6_ver_scale AV1_DEC_REG(42, 0, 0xffff)
+#define av1_ref6_hor_scale AV1_DEC_REG(42, 16, 0xffff)
+
+#define av1_ref3_height AV1_DEC_REG(43, 0, 0xffff)
+#define av1_ref3_width AV1_DEC_REG(43, 16, 0xffff)
+
+#define av1_ref4_height AV1_DEC_REG(44, 0, 0xffff)
+#define av1_ref4_width AV1_DEC_REG(44, 16, 0xffff)
+
+#define av1_ref5_height AV1_DEC_REG(45, 0, 0xffff)
+#define av1_ref5_width AV1_DEC_REG(45, 16, 0xffff)
+
+#define av1_ref6_height AV1_DEC_REG(46, 0, 0xffff)
+#define av1_ref6_width AV1_DEC_REG(46, 16, 0xffff)
+
+#define av1_mf2_last2_offset AV1_DEC_REG(47, 0, 0x1ff)
+#define av1_mf2_last3_offset AV1_DEC_REG(47, 9, 0x1ff)
+#define av1_mf2_golden_offset AV1_DEC_REG(47, 18, 0x1ff)
+#define av1_qmlevel_y AV1_DEC_REG(47, 27, 0xf)
+
+#define av1_mf2_bwdref_offset AV1_DEC_REG(48, 0, 0x1ff)
+#define av1_mf2_altref2_offset AV1_DEC_REG(48, 9, 0x1ff)
+#define av1_mf2_altref_offset AV1_DEC_REG(48, 18, 0x1ff)
+#define av1_qmlevel_u AV1_DEC_REG(48, 27, 0xf)
+
+#define av1_filt_ref_adj_6 AV1_DEC_REG(49, 0, 0x7f)
+#define av1_filt_ref_adj_7 AV1_DEC_REG(49, 7, 0x7f)
+#define av1_qmlevel_v AV1_DEC_REG(49, 14, 0xf)
+
+#define av1_superres_chroma_step AV1_DEC_REG(51, 0, 0x3fff)
+#define av1_superres_luma_step AV1_DEC_REG(51, 14, 0x3fff)
+
+#define av1_superres_init_chroma_subpel_x AV1_DEC_REG(52, 0, 0x3fff)
+#define av1_superres_init_luma_subpel_x AV1_DEC_REG(52, 14, 0x3fff)
+
+#define av1_cdef_chroma_secondary_strength AV1_DEC_REG(53, 0, 0xffff)
+#define av1_cdef_luma_secondary_strength AV1_DEC_REG(53, 16, 0xffff)
+
+#define av1_apf_threshold AV1_DEC_REG(55, 0, 0xffff)
+#define av1_apf_single_pu_mode AV1_DEC_REG(55, 30, 0x1)
+#define av1_apf_disable AV1_DEC_REG(55, 30, 0x1)
+
+#define av1_dec_max_burst AV1_DEC_REG(58, 0, 0xff)
+#define av1_dec_buswidth AV1_DEC_REG(58, 8, 0x7)
+#define av1_dec_multicore_mode AV1_DEC_REG(58, 11, 0x3)
+#define av1_dec_axi_wd_id_e AV1_DEC_REG(58, 13, 0x1)
+#define av1_dec_axi_rd_id_e AV1_DEC_REG(58, 14, 0x1)
+#define av1_dec_mc_polltime AV1_DEC_REG(58, 17, 0x3ff)
+#define av1_dec_mc_pollmode AV1_DEC_REG(58, 27, 0x3)
+
+#define av1_filt_ref_adj_3 AV1_DEC_REG(59, 0, 0x3f)
+#define av1_filt_ref_adj_2 AV1_DEC_REG(59, 7, 0x3f)
+#define av1_filt_ref_adj_1 AV1_DEC_REG(59, 14, 0x3f)
+#define av1_filt_ref_adj_0 AV1_DEC_REG(59, 21, 0x3f)
+#define av1_ref0_sign_bias AV1_DEC_REG(59, 28, 0x1)
+#define av1_ref1_sign_bias AV1_DEC_REG(59, 29, 0x1)
+#define av1_ref2_sign_bias AV1_DEC_REG(59, 30, 0x1)
+#define av1_ref3_sign_bias AV1_DEC_REG(59, 31, 0x1)
+
+#define av1_cur_last_roffset AV1_DEC_REG(184, 0, 0x1ff)
+#define av1_cur_last_offset AV1_DEC_REG(184, 9, 0x1ff)
+#define av1_mf3_last_offset AV1_DEC_REG(184, 18, 0x1ff)
+#define av1_ref0_gm_mode AV1_DEC_REG(184, 27, 0x3)
+
+#define av1_cur_last2_roffset AV1_DEC_REG(185, 0, 0x1ff)
+#define av1_cur_last2_offset AV1_DEC_REG(185, 9, 0x1ff)
+#define av1_mf3_last2_offset AV1_DEC_REG(185, 18, 0x1ff)
+#define av1_ref1_gm_mode AV1_DEC_REG(185, 27, 0x3)
+
+#define av1_cur_last3_roffset AV1_DEC_REG(186, 0, 0x1ff)
+#define av1_cur_last3_offset AV1_DEC_REG(186, 9, 0x1ff)
+#define av1_mf3_last3_offset AV1_DEC_REG(186, 18, 0x1ff)
+#define av1_ref2_gm_mode AV1_DEC_REG(186, 27, 0x3)
+
+#define av1_cur_golden_roffset AV1_DEC_REG(187, 0, 0x1ff)
+#define av1_cur_golden_offset AV1_DEC_REG(187, 9, 0x1ff)
+#define av1_mf3_golden_offset AV1_DEC_REG(187, 18, 0x1ff)
+#define av1_ref3_gm_mode AV1_DEC_REG(187, 27, 0x3)
+
+#define av1_cur_bwdref_roffset AV1_DEC_REG(188, 0, 0x1ff)
+#define av1_cur_bwdref_offset AV1_DEC_REG(188, 9, 0x1ff)
+#define av1_mf3_bwdref_offset AV1_DEC_REG(188, 18, 0x1ff)
+#define av1_ref4_gm_mode AV1_DEC_REG(188, 27, 0x3)
+
+#define av1_cur_altref2_roffset AV1_DEC_REG(257, 0, 0x1ff)
+#define av1_cur_altref2_offset AV1_DEC_REG(257, 9, 0x1ff)
+#define av1_mf3_altref2_offset AV1_DEC_REG(257, 18, 0x1ff)
+#define av1_ref5_gm_mode AV1_DEC_REG(257, 27, 0x3)
+
+#define av1_strm_buffer_len AV1_DEC_REG(258, 0, 0xffffffff)
+
+#define av1_strm_start_offset AV1_DEC_REG(259, 0, 0xffffffff)
+
+#define av1_ppd_blend_exist AV1_DEC_REG(260, 21, 0x1)
+#define av1_ppd_dith_exist AV1_DEC_REG(260, 23, 0x1)
+#define av1_ablend_crop_e AV1_DEC_REG(260, 24, 0x1)
+#define av1_pp_format_p010_e AV1_DEC_REG(260, 25, 0x1)
+#define av1_pp_format_customer1_e AV1_DEC_REG(260, 26, 0x1)
+#define av1_pp_crop_exist AV1_DEC_REG(260, 27, 0x1)
+#define av1_pp_up_level AV1_DEC_REG(260, 28, 0x1)
+#define av1_pp_down_level AV1_DEC_REG(260, 29, 0x3)
+#define av1_pp_exist AV1_DEC_REG(260, 31, 0x1)
+
+#define av1_cur_altref_roffset AV1_DEC_REG(262, 0, 0x1ff)
+#define av1_cur_altref_offset AV1_DEC_REG(262, 9, 0x1ff)
+#define av1_mf3_altref_offset AV1_DEC_REG(262, 18, 0x1ff)
+#define av1_ref6_gm_mode AV1_DEC_REG(262, 27, 0x3)
+
+#define av1_cdef_luma_primary_strength AV1_DEC_REG(263, 0, 0xffffffff)
+
+#define av1_cdef_chroma_primary_strength AV1_DEC_REG(264, 0, 0xffffffff)
+
+#define av1_axi_arqos AV1_DEC_REG(265, 0, 0xf)
+#define av1_axi_awqos AV1_DEC_REG(265, 4, 0xf)
+#define av1_axi_wr_ostd_threshold AV1_DEC_REG(265, 8, 0x3ff)
+#define av1_axi_rd_ostd_threshold AV1_DEC_REG(265, 18, 0x3ff)
+#define av1_axi_wr_4k_dis AV1_DEC_REG(265, 31, 0x1)
+
+#define av1_128bit_mode AV1_DEC_REG(266, 5, 0x1)
+#define av1_wr_shaper_bypass AV1_DEC_REG(266, 10, 0x1)
+#define av1_error_conceal_e AV1_DEC_REG(266, 30, 0x1)
+
+#define av1_superres_chroma_step_invra AV1_DEC_REG(298, 0, 0xffff)
+#define av1_superres_luma_step_invra AV1_DEC_REG(298, 16, 0xffff)
+
+#define av1_dec_alignment AV1_DEC_REG(314, 0, 0xffff)
+
+#define av1_ext_timeout_cycles AV1_DEC_REG(318, 0, 0x7fffffff)
+#define av1_ext_timeout_override_e AV1_DEC_REG(318, 31, 0x1)
+
+#define av1_timeout_cycles AV1_DEC_REG(319, 0, 0x7fffffff)
+#define av1_timeout_override_e AV1_DEC_REG(319, 31, 0x1)
+
+#define av1_pp_out_e AV1_DEC_REG(320, 0, 0x1)
+#define av1_pp_cr_first AV1_DEC_REG(320, 1, 0x1)
+#define av1_pp_out_mode AV1_DEC_REG(320, 2, 0x1)
+#define av1_pp_out_tile_e AV1_DEC_REG(320, 3, 0x1)
+#define av1_pp_status AV1_DEC_REG(320, 4, 0xf)
+#define av1_pp_in_blk_size AV1_DEC_REG(320, 8, 0x7)
+#define av1_pp_out_p010_fmt AV1_DEC_REG(320, 11, 0x3)
+#define av1_pp_out_rgb_fmt AV1_DEC_REG(320, 13, 0x1f)
+#define av1_rgb_range_max AV1_DEC_REG(320, 18, 0xfff)
+#define av1_pp_rgb_planar AV1_DEC_REG(320, 30, 0x1)
+
+#define av1_scale_hratio AV1_DEC_REG(322, 0, 0x3ffff)
+#define av1_pp_out_format AV1_DEC_REG(322, 18, 0x1f)
+#define av1_ver_scale_mode AV1_DEC_REG(322, 23, 0x3)
+#define av1_hor_scale_mode AV1_DEC_REG(322, 25, 0x3)
+#define av1_pp_in_format AV1_DEC_REG(322, 27, 0x1f)
+
+#define av1_pp_out_c_stride AV1_DEC_REG(329, 0, 0xffff)
+#define av1_pp_out_y_stride AV1_DEC_REG(329, 16, 0xffff)
+
+#define av1_pp_in_height AV1_DEC_REG(331, 0, 0xffff)
+#define av1_pp_in_width AV1_DEC_REG(331, 16, 0xffff)
+
+#define av1_pp_out_height AV1_DEC_REG(332, 0, 0xffff)
+#define av1_pp_out_width AV1_DEC_REG(332, 16, 0xffff)
+
+#define av1_pp1_dup_ver AV1_DEC_REG(394, 0, 0xff)
+#define av1_pp1_dup_hor AV1_DEC_REG(394, 8, 0xff)
+#define av1_pp0_dup_ver AV1_DEC_REG(394, 16, 0xff)
+#define av1_pp0_dup_hor AV1_DEC_REG(394, 24, 0xff)
+
+#define AV1_TILE_OUT_LU (AV1_SWREG(65))
+#define AV1_REFERENCE_Y(i) (AV1_SWREG(67) + ((i) * 0x8))
+#define AV1_SEGMENTATION (AV1_SWREG(81))
+#define AV1_GLOBAL_MODEL (AV1_SWREG(83))
+#define AV1_CDEF_COL (AV1_SWREG(85))
+#define AV1_SR_COL (AV1_SWREG(89))
+#define AV1_LR_COL (AV1_SWREG(91))
+#define AV1_FILM_GRAIN (AV1_SWREG(95))
+#define AV1_TILE_OUT_CH (AV1_SWREG(99))
+#define AV1_REFERENCE_CB(i) (AV1_SWREG(101) + ((i) * 0x8))
+#define AV1_TILE_OUT_MV (AV1_SWREG(133))
+#define AV1_REFERENCE_MV(i) (AV1_SWREG(135) + ((i) * 0x8))
+#define AV1_TILE_BASE (AV1_SWREG(167))
+#define AV1_INPUT_STREAM (AV1_SWREG(169))
+#define AV1_PROP_TABLE_OUT (AV1_SWREG(171))
+#define AV1_PROP_TABLE (AV1_SWREG(173))
+#define AV1_MC_SYNC_CURR (AV1_SWREG(175))
+#define AV1_MC_SYNC_LEFT (AV1_SWREG(177))
+#define AV1_DB_DATA_COL (AV1_SWREG(179))
+#define AV1_DB_CTRL_COL (AV1_SWREG(183))
+#define AV1_PP_OUT_LU (AV1_SWREG(326))
+#define AV1_PP_OUT_CH (AV1_SWREG(328))
+
+#endif /* _ROCKCHIP_VPU981_REGS_H_ */
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
new file mode 100644
index 0000000000..f975276707
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
@@ -0,0 +1,814 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+
+#include "hantro.h"
+#include "hantro_jpeg.h"
+#include "hantro_g1_regs.h"
+#include "hantro_h1_regs.h"
+#include "rockchip_vpu2_regs.h"
+#include "rockchip_vpu981_regs.h"
+
+#define RK3066_ACLK_MAX_FREQ (300 * 1000 * 1000)
+#define RK3288_ACLK_MAX_FREQ (400 * 1000 * 1000)
+#define RK3588_ACLK_MAX_FREQ (300 * 1000 * 1000)
+
+#define ROCKCHIP_VPU981_MIN_SIZE 64
+
+/*
+ * Supported formats.
+ */
+
+static const struct hantro_fmt rockchip_vpu_enc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_YUV420P,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_YUV420SP,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_YUYV422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_UYVY422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .codec_mode = HANTRO_MODE_JPEG_ENC,
+ .max_depth = 2,
+ .header_size = JPEG_HEADER_SIZE,
+ .frmsize = {
+ .min_width = 96,
+ .max_width = 8192,
+ .step_width = MB_DIM,
+ .min_height = 32,
+ .max_height = 8192,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rockchip_vpu1_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rockchip_vpu981_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_P010,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rk3066_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_4K_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_4K_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_4K_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_4K_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rockchip_vdpu2_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rockchip_vpu981_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12_4L4,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .frmsize = {
+ .min_width = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV15_4L4,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .frmsize = {
+ .min_width = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_AV1_FRAME,
+ .codec_mode = HANTRO_MODE_AV1_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = ROCKCHIP_VPU981_MIN_SIZE,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static irqreturn_t rockchip_vpu1_vepu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vepu_read(vpu, H1_REG_INTERRUPT);
+ state = (status & H1_REG_INTERRUPT_FRAME_RDY) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vepu_write(vpu, 0, H1_REG_INTERRUPT);
+ vepu_write(vpu, 0, H1_REG_AXI_CTRL);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_vpu2_vdpu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, VDPU_REG_INTERRUPT);
+ state = (status & VDPU_REG_INTERRUPT_DEC_IRQ) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, VDPU_REG_INTERRUPT);
+ vdpu_write(vpu, 0, VDPU_REG_AXI_CTRL);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_vpu2_vepu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vepu_read(vpu, VEPU_REG_INTERRUPT);
+ state = (status & VEPU_REG_INTERRUPT_FRAME_READY) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vepu_write(vpu, 0, VEPU_REG_INTERRUPT);
+ vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rk3588_vpu981_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, AV1_REG_INTERRUPT);
+ state = (status & AV1_REG_INTERRUPT_DEC_RDY_INT) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, AV1_REG_INTERRUPT);
+ vdpu_write(vpu, AV1_REG_CONFIG_DEC_CLK_GATE_E, AV1_REG_CONFIG);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+static int rk3036_vpu_hw_init(struct hantro_dev *vpu)
+{
+ /* Bump ACLK to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3066_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static int rk3066_vpu_hw_init(struct hantro_dev *vpu)
+{
+ /* Bump ACLKs to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3066_ACLK_MAX_FREQ);
+ clk_set_rate(vpu->clocks[2].clk, RK3066_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static int rk3588_vpu981_hw_init(struct hantro_dev *vpu)
+{
+ /* Bump ACLKs to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3588_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static int rockchip_vpu_hw_init(struct hantro_dev *vpu)
+{
+ /* Bump ACLK to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3288_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static void rk3066_vpu_dec_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_IRQ_DIS, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+}
+
+static void rockchip_vpu1_enc_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vepu_write(vpu, H1_REG_INTERRUPT_DIS_BIT, H1_REG_INTERRUPT);
+ vepu_write(vpu, 0, H1_REG_ENC_CTRL);
+ vepu_write(vpu, 0, H1_REG_AXI_CTRL);
+}
+
+static void rockchip_vpu2_dec_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vdpu_write(vpu, VDPU_REG_INTERRUPT_DEC_IRQ_DIS, VDPU_REG_INTERRUPT);
+ vdpu_write(vpu, 0, VDPU_REG_EN_FLAGS);
+ vdpu_write(vpu, 1, VDPU_REG_SOFT_RESET);
+}
+
+static void rockchip_vpu2_enc_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vepu_write(vpu, VEPU_REG_INTERRUPT_DIS_BIT, VEPU_REG_INTERRUPT);
+ vepu_write(vpu, 0, VEPU_REG_ENCODE_START);
+ vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
+}
+
+/*
+ * Supported codec ops.
+ */
+static const struct hantro_codec_ops rk3036_vpu_codec_ops[] = {
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops rk3066_vpu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = hantro_h1_jpeg_enc_run,
+ .reset = rockchip_vpu1_enc_reset,
+ .done = hantro_h1_jpeg_enc_done,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = rk3066_vpu_dec_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = rk3066_vpu_dec_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = rk3066_vpu_dec_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops rk3288_vpu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = hantro_h1_jpeg_enc_run,
+ .reset = rockchip_vpu1_enc_reset,
+ .done = hantro_h1_jpeg_enc_done,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops rk3399_vpu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = rockchip_vpu2_jpeg_enc_run,
+ .reset = rockchip_vpu2_enc_reset,
+ .done = rockchip_vpu2_jpeg_enc_done,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = rockchip_vpu2_h264_dec_run,
+ .reset = rockchip_vpu2_dec_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = rockchip_vpu2_mpeg2_dec_run,
+ .reset = rockchip_vpu2_dec_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = rockchip_vpu2_vp8_dec_run,
+ .reset = rockchip_vpu2_dec_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops rk3568_vepu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = rockchip_vpu2_jpeg_enc_run,
+ .reset = rockchip_vpu2_enc_reset,
+ .done = rockchip_vpu2_jpeg_enc_done,
+ },
+};
+
+static const struct hantro_codec_ops rk3588_vpu981_codec_ops[] = {
+ [HANTRO_MODE_AV1_DEC] = {
+ .run = rockchip_vpu981_av1_dec_run,
+ .init = rockchip_vpu981_av1_dec_init,
+ .exit = rockchip_vpu981_av1_dec_exit,
+ .done = rockchip_vpu981_av1_dec_done,
+ },
+};
+/*
+ * VPU variant.
+ */
+
+static const struct hantro_irq rockchip_vdpu1_irqs[] = {
+ { "vdpu", hantro_g1_irq },
+};
+
+static const struct hantro_irq rockchip_vpu1_irqs[] = {
+ { "vepu", rockchip_vpu1_vepu_irq },
+ { "vdpu", hantro_g1_irq },
+};
+
+static const struct hantro_irq rockchip_vdpu2_irqs[] = {
+ { "vdpu", rockchip_vpu2_vdpu_irq },
+};
+
+static const struct hantro_irq rockchip_vpu2_irqs[] = {
+ { "vepu", rockchip_vpu2_vepu_irq },
+ { "vdpu", rockchip_vpu2_vdpu_irq },
+};
+
+static const struct hantro_irq rk3568_vepu_irqs[] = {
+ { "vepu", rockchip_vpu2_vepu_irq },
+};
+
+static const char * const rk3066_vpu_clk_names[] = {
+ "aclk_vdpu", "hclk_vdpu",
+ "aclk_vepu", "hclk_vepu"
+};
+
+static const struct hantro_irq rk3588_vpu981_irqs[] = {
+ { "vdpu", rk3588_vpu981_irq },
+};
+
+static const char * const rockchip_vpu_clk_names[] = {
+ "aclk", "hclk"
+};
+
+static const char * const rk3588_vpu981_vpu_clk_names[] = {
+ "aclk", "hclk",
+};
+
+/* VDPU1/VEPU1 */
+
+const struct hantro_variant rk3036_vpu_variant = {
+ .dec_offset = 0x400,
+ .dec_fmts = rk3066_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3066_vpu_dec_fmts),
+ .postproc_fmts = rockchip_vpu1_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(rockchip_vpu1_postproc_fmts),
+ .postproc_ops = &hantro_g1_postproc_ops,
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = rk3036_vpu_codec_ops,
+ .irqs = rockchip_vdpu1_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vdpu1_irqs),
+ .init = rk3036_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
+
+/*
+ * Despite this variant has separate clocks for decoder and encoder,
+ * it's still required to enable all four of them for either decoding
+ * or encoding and we can't split it in separate g1/h1 variants.
+ */
+const struct hantro_variant rk3066_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rockchip_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
+ .dec_offset = 0x400,
+ .dec_fmts = rk3066_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3066_vpu_dec_fmts),
+ .postproc_fmts = rockchip_vpu1_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(rockchip_vpu1_postproc_fmts),
+ .postproc_ops = &hantro_g1_postproc_ops,
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
+ HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
+ .codec_ops = rk3066_vpu_codec_ops,
+ .irqs = rockchip_vpu1_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vpu1_irqs),
+ .init = rk3066_vpu_hw_init,
+ .clk_names = rk3066_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rk3066_vpu_clk_names)
+};
+
+const struct hantro_variant rk3288_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rockchip_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
+ .dec_offset = 0x400,
+ .dec_fmts = rk3288_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3288_vpu_dec_fmts),
+ .postproc_fmts = rockchip_vpu1_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(rockchip_vpu1_postproc_fmts),
+ .postproc_ops = &hantro_g1_postproc_ops,
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
+ HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
+ .codec_ops = rk3288_vpu_codec_ops,
+ .irqs = rockchip_vpu1_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vpu1_irqs),
+ .init = rockchip_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
+
+/* VDPU2/VEPU2 */
+
+const struct hantro_variant rk3328_vpu_variant = {
+ .dec_offset = 0x400,
+ .dec_fmts = rockchip_vdpu2_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rockchip_vdpu2_dec_fmts),
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = rk3399_vpu_codec_ops,
+ .irqs = rockchip_vdpu2_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vdpu2_irqs),
+ .init = rockchip_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names),
+};
+
+/*
+ * H.264 decoding explicitly disabled in RK3399.
+ * This ensures userspace applications use the Rockchip VDEC core,
+ * which has better performance.
+ */
+const struct hantro_variant rk3399_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rockchip_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
+ .dec_offset = 0x400,
+ .dec_fmts = rk3399_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts),
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
+ HANTRO_VP8_DECODER,
+ .codec_ops = rk3399_vpu_codec_ops,
+ .irqs = rockchip_vpu2_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vpu2_irqs),
+ .init = rockchip_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
+
+const struct hantro_variant rk3568_vepu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rockchip_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
+ .codec = HANTRO_JPEG_ENCODER,
+ .codec_ops = rk3568_vepu_codec_ops,
+ .irqs = rk3568_vepu_irqs,
+ .num_irqs = ARRAY_SIZE(rk3568_vepu_irqs),
+ .init = rockchip_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
+
+const struct hantro_variant rk3568_vpu_variant = {
+ .dec_offset = 0x400,
+ .dec_fmts = rockchip_vdpu2_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rockchip_vdpu2_dec_fmts),
+ .codec = HANTRO_MPEG2_DECODER |
+ HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
+ .codec_ops = rk3399_vpu_codec_ops,
+ .irqs = rockchip_vdpu2_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vdpu2_irqs),
+ .init = rockchip_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
+
+const struct hantro_variant px30_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rockchip_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
+ .dec_offset = 0x400,
+ .dec_fmts = rockchip_vdpu2_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rockchip_vdpu2_dec_fmts),
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
+ HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
+ .codec_ops = rk3399_vpu_codec_ops,
+ .irqs = rockchip_vpu2_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vpu2_irqs),
+ .init = rk3036_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
+
+const struct hantro_variant rk3588_vpu981_variant = {
+ .dec_offset = 0x0,
+ .dec_fmts = rockchip_vpu981_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rockchip_vpu981_dec_fmts),
+ .postproc_fmts = rockchip_vpu981_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(rockchip_vpu981_postproc_fmts),
+ .postproc_ops = &rockchip_vpu981_postproc_ops,
+ .codec = HANTRO_AV1_DECODER,
+ .codec_ops = rk3588_vpu981_codec_ops,
+ .irqs = rk3588_vpu981_irqs,
+ .num_irqs = ARRAY_SIZE(rk3588_vpu981_irqs),
+ .init = rk3588_vpu981_hw_init,
+ .clk_names = rk3588_vpu981_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rk3588_vpu981_vpu_clk_names)
+};
diff --git a/drivers/media/platform/verisilicon/sama5d4_vdec_hw.c b/drivers/media/platform/verisilicon/sama5d4_vdec_hw.c
new file mode 100644
index 0000000000..b205e2db5b
--- /dev/null
+++ b/drivers/media/platform/verisilicon/sama5d4_vdec_hw.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VDEC driver
+ *
+ * Copyright (C) 2021 Collabora Ltd, Emil Velikov <emil.velikov@collabora.com>
+ */
+
+#include "hantro.h"
+
+/*
+ * Supported formats.
+ */
+
+static const struct hantro_fmt sama5d4_vdec_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_HD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_HD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt sama5d4_vdec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_HD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_HD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_HD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_HD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_HD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_HD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_HD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_HD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+/*
+ * Supported codec ops.
+ */
+
+static const struct hantro_codec_ops sama5d4_vdec_codec_ops[] = {
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+};
+
+static const struct hantro_irq sama5d4_irqs[] = {
+ { "vdec", hantro_g1_irq },
+};
+
+static const char * const sama5d4_clk_names[] = { "vdec_clk" };
+
+const struct hantro_variant sama5d4_vdec_variant = {
+ .dec_fmts = sama5d4_vdec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(sama5d4_vdec_fmts),
+ .postproc_fmts = sama5d4_vdec_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(sama5d4_vdec_postproc_fmts),
+ .postproc_ops = &hantro_g1_postproc_ops,
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = sama5d4_vdec_codec_ops,
+ .irqs = sama5d4_irqs,
+ .num_irqs = ARRAY_SIZE(sama5d4_irqs),
+ .clk_names = sama5d4_clk_names,
+ .num_clocks = ARRAY_SIZE(sama5d4_clk_names),
+};
diff --git a/drivers/media/platform/verisilicon/sunxi_vpu_hw.c b/drivers/media/platform/verisilicon/sunxi_vpu_hw.c
new file mode 100644
index 0000000000..02ce8b064a
--- /dev/null
+++ b/drivers/media/platform/verisilicon/sunxi_vpu_hw.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner Hantro G2 VPU codec driver
+ *
+ * Copyright (C) 2021 Jernej Skrabec <jernej.skrabec@gmail.com>
+ */
+
+#include <linux/clk.h>
+
+#include "hantro.h"
+
+static const struct hantro_fmt sunxi_vpu_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = 32,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = 32,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_P010,
+ .codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = 32,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = 32,
+ },
+ },
+};
+
+static const struct hantro_fmt sunxi_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12_4L4,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = 32,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = 32,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_P010_4L4,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = 32,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = 32,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9_FRAME,
+ .codec_mode = HANTRO_MODE_VP9_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = 32,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = 32,
+ },
+ },
+};
+
+static int sunxi_vpu_hw_init(struct hantro_dev *vpu)
+{
+ clk_set_rate(vpu->clocks[0].clk, 300000000);
+
+ return 0;
+}
+
+static void sunxi_vpu_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ reset_control_reset(vpu->resets);
+}
+
+static const struct hantro_codec_ops sunxi_vpu_codec_ops[] = {
+ [HANTRO_MODE_VP9_DEC] = {
+ .run = hantro_g2_vp9_dec_run,
+ .done = hantro_g2_vp9_dec_done,
+ .reset = sunxi_vpu_reset,
+ .init = hantro_vp9_dec_init,
+ .exit = hantro_vp9_dec_exit,
+ },
+};
+
+static const struct hantro_irq sunxi_irqs[] = {
+ { NULL, hantro_g2_irq },
+};
+
+static const char * const sunxi_clk_names[] = { "mod", "bus" };
+
+const struct hantro_variant sunxi_vpu_variant = {
+ .dec_fmts = sunxi_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(sunxi_vpu_dec_fmts),
+ .postproc_fmts = sunxi_vpu_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(sunxi_vpu_postproc_fmts),
+ .postproc_ops = &hantro_g2_postproc_ops,
+ .codec = HANTRO_VP9_DECODER,
+ .codec_ops = sunxi_vpu_codec_ops,
+ .init = sunxi_vpu_hw_init,
+ .irqs = sunxi_irqs,
+ .num_irqs = ARRAY_SIZE(sunxi_irqs),
+ .clk_names = sunxi_clk_names,
+ .num_clocks = ARRAY_SIZE(sunxi_clk_names),
+ .double_buffer = 1,
+ .legacy_regs = 1,
+ .late_postproc = 1,
+};