summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/tegra
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/gpu/drm/tegra
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/gpu/drm/tegra/Kconfig43
-rw-r--r--drivers/gpu/drm/tegra/Makefile31
-rw-r--r--drivers/gpu/drm/tegra/dc.c3296
-rw-r--r--drivers/gpu/drm/tegra/dc.h840
-rw-r--r--drivers/gpu/drm/tegra/dp.c885
-rw-r--r--drivers/gpu/drm/tegra/dp.h177
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c827
-rw-r--r--drivers/gpu/drm/tegra/dpaux.h73
-rw-r--r--drivers/gpu/drm/tegra/drm.c1447
-rw-r--r--drivers/gpu/drm/tegra/drm.h218
-rw-r--r--drivers/gpu/drm/tegra/dsi.c1700
-rw-r--r--drivers/gpu/drm/tegra/dsi.h143
-rw-r--r--drivers/gpu/drm/tegra/falcon.c231
-rw-r--r--drivers/gpu/drm/tegra/falcon.h115
-rw-r--r--drivers/gpu/drm/tegra/fb.c414
-rw-r--r--drivers/gpu/drm/tegra/firewall.c257
-rw-r--r--drivers/gpu/drm/tegra/gem.c784
-rw-r--r--drivers/gpu/drm/tegra/gem.h85
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c407
-rw-r--r--drivers/gpu/drm/tegra/gr2d.h26
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c642
-rw-r--r--drivers/gpu/drm/tegra/gr3d.h24
-rw-r--r--drivers/gpu/drm/tegra/hda.c63
-rw-r--r--drivers/gpu/drm/tegra/hda.h20
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1908
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h557
-rw-r--r--drivers/gpu/drm/tegra/hub.c1230
-rw-r--r--drivers/gpu/drm/tegra/hub.h97
-rw-r--r--drivers/gpu/drm/tegra/mipi-phy.c134
-rw-r--r--drivers/gpu/drm/tegra/mipi-phy.h48
-rw-r--r--drivers/gpu/drm/tegra/nvdec.c478
-rw-r--r--drivers/gpu/drm/tegra/output.c265
-rw-r--r--drivers/gpu/drm/tegra/plane.c798
-rw-r--r--drivers/gpu/drm/tegra/plane.h98
-rw-r--r--drivers/gpu/drm/tegra/rgb.c355
-rw-r--r--drivers/gpu/drm/tegra/sor.c4063
-rw-r--r--drivers/gpu/drm/tegra/sor.h457
-rw-r--r--drivers/gpu/drm/tegra/submit.c698
-rw-r--r--drivers/gpu/drm/tegra/submit.h21
-rw-r--r--drivers/gpu/drm/tegra/trace.c2
-rw-r--r--drivers/gpu/drm/tegra/trace.h68
-rw-r--r--drivers/gpu/drm/tegra/uapi.c359
-rw-r--r--drivers/gpu/drm/tegra/uapi.h57
-rw-r--r--drivers/gpu/drm/tegra/vic.c597
-rw-r--r--drivers/gpu/drm/tegra/vic.h36
45 files changed, 25074 insertions, 0 deletions
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
new file mode 100644
index 000000000..c36323f1c
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DRM_TEGRA
+ tristate "NVIDIA Tegra DRM"
+ depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
+ depends on COMMON_CLK
+ depends on DRM
+ depends on OF
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HDMI_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_DP_AUX_BUS
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ select TEGRA_HOST1X
+ select INTERCONNECT
+ select IOMMU_IOVA
+ select CEC_CORE if CEC_NOTIFIER
+ select SND_SIMPLE_CARD if SND_SOC_TEGRA20_SPDIF
+ select SND_SOC_HDMI_CODEC if SND_SOC_TEGRA20_SPDIF
+ select SND_AUDIO_GRAPH_CARD if SND_SOC_TEGRA20_SPDIF
+ help
+ Choose this option if you have an NVIDIA Tegra SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tegra-drm.
+
+if DRM_TEGRA
+
+config DRM_TEGRA_DEBUG
+ bool "NVIDIA Tegra DRM debug support"
+ help
+ Say yes here to enable debugging support.
+
+config DRM_TEGRA_STAGING
+ bool "Enable HOST1X interface"
+ depends on STAGING
+ help
+ Say yes if HOST1X should be available for userspace DRM users.
+
+ If unsure, choose N.
+
+endif
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
new file mode 100644
index 000000000..df6cc986a
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+
+tegra-drm-y := \
+ drm.o \
+ uapi.o \
+ submit.o \
+ firewall.o \
+ gem.o \
+ fb.o \
+ dp.o \
+ hub.o \
+ plane.o \
+ dc.o \
+ output.o \
+ rgb.o \
+ hda.o \
+ hdmi.o \
+ mipi-phy.o \
+ dsi.o \
+ sor.o \
+ dpaux.o \
+ gr2d.o \
+ gr3d.o \
+ falcon.o \
+ vic.o \
+ nvdec.o
+
+tegra-drm-y += trace.o
+
+obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
new file mode 100644
index 000000000..a67453cee
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -0,0 +1,3296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/interconnect.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/pmc.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_vblank.h>
+
+#include "dc.h"
+#include "drm.h"
+#include "gem.h"
+#include "hub.h"
+#include "plane.h"
+
+static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+static void tegra_dc_stats_reset(struct tegra_dc_stats *stats)
+{
+ stats->frames = 0;
+ stats->vblank = 0;
+ stats->underflow = 0;
+ stats->overflow = 0;
+}
+
+/* Reads the active copy of a register. */
+static u32 tegra_dc_readl_active(struct tegra_dc *dc, unsigned long offset)
+{
+ u32 value;
+
+ tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
+ value = tegra_dc_readl(dc, offset);
+ tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
+
+ return value;
+}
+
+static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
+ unsigned int offset)
+{
+ if (offset >= 0x500 && offset <= 0x638) {
+ offset = 0x000 + (offset - 0x500);
+ return plane->offset + offset;
+ }
+
+ if (offset >= 0x700 && offset <= 0x719) {
+ offset = 0x180 + (offset - 0x700);
+ return plane->offset + offset;
+ }
+
+ if (offset >= 0x800 && offset <= 0x839) {
+ offset = 0x1c0 + (offset - 0x800);
+ return plane->offset + offset;
+ }
+
+ dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
+
+ return plane->offset + offset;
+}
+
+static inline u32 tegra_plane_readl(struct tegra_plane *plane,
+ unsigned int offset)
+{
+ return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
+}
+
+static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
+ unsigned int offset)
+{
+ tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
+}
+
+bool tegra_dc_has_output(struct tegra_dc *dc, struct device *dev)
+{
+ struct device_node *np = dc->dev->of_node;
+ struct of_phandle_iterator it;
+ int err;
+
+ of_for_each_phandle(&it, err, np, "nvidia,outputs", NULL, 0)
+ if (it.node == dev->of_node)
+ return true;
+
+ return false;
+}
+
+/*
+ * Double-buffered registers have two copies: ASSEMBLY and ACTIVE. When the
+ * *_ACT_REQ bits are set the ASSEMBLY copy is latched into the ACTIVE copy.
+ * Latching happens mmediately if the display controller is in STOP mode or
+ * on the next frame boundary otherwise.
+ *
+ * Triple-buffered registers have three copies: ASSEMBLY, ARM and ACTIVE. The
+ * ASSEMBLY copy is latched into the ARM copy immediately after *_UPDATE bits
+ * are written. When the *_ACT_REQ bits are written, the ARM copy is latched
+ * into the ACTIVE copy, either immediately if the display controller is in
+ * STOP mode, or at the next frame boundary otherwise.
+ */
+void tegra_dc_commit(struct tegra_dc *dc)
+{
+ tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+}
+
+static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v,
+ unsigned int bpp)
+{
+ fixed20_12 outf = dfixed_init(out);
+ fixed20_12 inf = dfixed_init(in);
+ u32 dda_inc;
+ int max;
+
+ if (v)
+ max = 15;
+ else {
+ switch (bpp) {
+ case 2:
+ max = 8;
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ fallthrough;
+ case 4:
+ max = 4;
+ break;
+ }
+ }
+
+ outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1));
+ inf.full -= dfixed_const(1);
+
+ dda_inc = dfixed_div(inf, outf);
+ dda_inc = min_t(u32, dda_inc, dfixed_const(max));
+
+ return dda_inc;
+}
+
+static inline u32 compute_initial_dda(unsigned int in)
+{
+ fixed20_12 inf = dfixed_init(in);
+ return dfixed_frac(inf);
+}
+
+static void tegra_plane_setup_blending_legacy(struct tegra_plane *plane)
+{
+ u32 background[3] = {
+ BLEND_WEIGHT1(0) | BLEND_WEIGHT0(0) | BLEND_COLOR_KEY_NONE,
+ BLEND_WEIGHT1(0) | BLEND_WEIGHT0(0) | BLEND_COLOR_KEY_NONE,
+ BLEND_WEIGHT1(0) | BLEND_WEIGHT0(0) | BLEND_COLOR_KEY_NONE,
+ };
+ u32 foreground = BLEND_WEIGHT1(255) | BLEND_WEIGHT0(255) |
+ BLEND_COLOR_KEY_NONE;
+ u32 blendnokey = BLEND_WEIGHT1(255) | BLEND_WEIGHT0(255);
+ struct tegra_plane_state *state;
+ u32 blending[2];
+ unsigned int i;
+
+ /* disable blending for non-overlapping case */
+ tegra_plane_writel(plane, blendnokey, DC_WIN_BLEND_NOKEY);
+ tegra_plane_writel(plane, foreground, DC_WIN_BLEND_1WIN);
+
+ state = to_tegra_plane_state(plane->base.state);
+
+ if (state->opaque) {
+ /*
+ * Since custom fix-weight blending isn't utilized and weight
+ * of top window is set to max, we can enforce dependent
+ * blending which in this case results in transparent bottom
+ * window if top window is opaque and if top window enables
+ * alpha blending, then bottom window is getting alpha value
+ * of 1 minus the sum of alpha components of the overlapping
+ * plane.
+ */
+ background[0] |= BLEND_CONTROL_DEPENDENT;
+ background[1] |= BLEND_CONTROL_DEPENDENT;
+
+ /*
+ * The region where three windows overlap is the intersection
+ * of the two regions where two windows overlap. It contributes
+ * to the area if all of the windows on top of it have an alpha
+ * component.
+ */
+ switch (state->base.normalized_zpos) {
+ case 0:
+ if (state->blending[0].alpha &&
+ state->blending[1].alpha)
+ background[2] |= BLEND_CONTROL_DEPENDENT;
+ break;
+
+ case 1:
+ background[2] |= BLEND_CONTROL_DEPENDENT;
+ break;
+ }
+ } else {
+ /*
+ * Enable alpha blending if pixel format has an alpha
+ * component.
+ */
+ foreground |= BLEND_CONTROL_ALPHA;
+
+ /*
+ * If any of the windows on top of this window is opaque, it
+ * will completely conceal this window within that area. If
+ * top window has an alpha component, it is blended over the
+ * bottom window.
+ */
+ for (i = 0; i < 2; i++) {
+ if (state->blending[i].alpha &&
+ state->blending[i].top)
+ background[i] |= BLEND_CONTROL_DEPENDENT;
+ }
+
+ switch (state->base.normalized_zpos) {
+ case 0:
+ if (state->blending[0].alpha &&
+ state->blending[1].alpha)
+ background[2] |= BLEND_CONTROL_DEPENDENT;
+ break;
+
+ case 1:
+ /*
+ * When both middle and topmost windows have an alpha,
+ * these windows a mixed together and then the result
+ * is blended over the bottom window.
+ */
+ if (state->blending[0].alpha &&
+ state->blending[0].top)
+ background[2] |= BLEND_CONTROL_ALPHA;
+
+ if (state->blending[1].alpha &&
+ state->blending[1].top)
+ background[2] |= BLEND_CONTROL_ALPHA;
+ break;
+ }
+ }
+
+ switch (state->base.normalized_zpos) {
+ case 0:
+ tegra_plane_writel(plane, background[0], DC_WIN_BLEND_2WIN_X);
+ tegra_plane_writel(plane, background[1], DC_WIN_BLEND_2WIN_Y);
+ tegra_plane_writel(plane, background[2], DC_WIN_BLEND_3WIN_XY);
+ break;
+
+ case 1:
+ /*
+ * If window B / C is topmost, then X / Y registers are
+ * matching the order of blending[...] state indices,
+ * otherwise a swap is required.
+ */
+ if (!state->blending[0].top && state->blending[1].top) {
+ blending[0] = foreground;
+ blending[1] = background[1];
+ } else {
+ blending[0] = background[0];
+ blending[1] = foreground;
+ }
+
+ tegra_plane_writel(plane, blending[0], DC_WIN_BLEND_2WIN_X);
+ tegra_plane_writel(plane, blending[1], DC_WIN_BLEND_2WIN_Y);
+ tegra_plane_writel(plane, background[2], DC_WIN_BLEND_3WIN_XY);
+ break;
+
+ case 2:
+ tegra_plane_writel(plane, foreground, DC_WIN_BLEND_2WIN_X);
+ tegra_plane_writel(plane, foreground, DC_WIN_BLEND_2WIN_Y);
+ tegra_plane_writel(plane, foreground, DC_WIN_BLEND_3WIN_XY);
+ break;
+ }
+}
+
+static void tegra_plane_setup_blending(struct tegra_plane *plane,
+ const struct tegra_dc_window *window)
+{
+ u32 value;
+
+ value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
+ BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
+ BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
+ tegra_plane_writel(plane, value, DC_WIN_BLEND_MATCH_SELECT);
+
+ value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
+ BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
+ BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
+ tegra_plane_writel(plane, value, DC_WIN_BLEND_NOMATCH_SELECT);
+
+ value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - window->zpos);
+ tegra_plane_writel(plane, value, DC_WIN_BLEND_LAYER_CONTROL);
+}
+
+static bool
+tegra_plane_use_horizontal_filtering(struct tegra_plane *plane,
+ const struct tegra_dc_window *window)
+{
+ struct tegra_dc *dc = plane->dc;
+
+ if (window->src.w == window->dst.w)
+ return false;
+
+ if (plane->index == 0 && dc->soc->has_win_a_without_filters)
+ return false;
+
+ return true;
+}
+
+static bool
+tegra_plane_use_vertical_filtering(struct tegra_plane *plane,
+ const struct tegra_dc_window *window)
+{
+ struct tegra_dc *dc = plane->dc;
+
+ if (window->src.h == window->dst.h)
+ return false;
+
+ if (plane->index == 0 && dc->soc->has_win_a_without_filters)
+ return false;
+
+ if (plane->index == 2 && dc->soc->has_win_c_without_vert_filter)
+ return false;
+
+ return true;
+}
+
+static void tegra_dc_setup_window(struct tegra_plane *plane,
+ const struct tegra_dc_window *window)
+{
+ unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
+ struct tegra_dc *dc = plane->dc;
+ unsigned int planes;
+ u32 value;
+ bool yuv;
+
+ /*
+ * For YUV planar modes, the number of bytes per pixel takes into
+ * account only the luma component and therefore is 1.
+ */
+ yuv = tegra_plane_format_is_yuv(window->format, &planes, NULL);
+ if (!yuv)
+ bpp = window->bits_per_pixel / 8;
+ else
+ bpp = (planes > 1) ? 1 : 2;
+
+ tegra_plane_writel(plane, window->format, DC_WIN_COLOR_DEPTH);
+ tegra_plane_writel(plane, window->swap, DC_WIN_BYTE_SWAP);
+
+ value = V_POSITION(window->dst.y) | H_POSITION(window->dst.x);
+ tegra_plane_writel(plane, value, DC_WIN_POSITION);
+
+ value = V_SIZE(window->dst.h) | H_SIZE(window->dst.w);
+ tegra_plane_writel(plane, value, DC_WIN_SIZE);
+
+ h_offset = window->src.x * bpp;
+ v_offset = window->src.y;
+ h_size = window->src.w * bpp;
+ v_size = window->src.h;
+
+ if (window->reflect_x)
+ h_offset += (window->src.w - 1) * bpp;
+
+ if (window->reflect_y)
+ v_offset += window->src.h - 1;
+
+ value = V_PRESCALED_SIZE(v_size) | H_PRESCALED_SIZE(h_size);
+ tegra_plane_writel(plane, value, DC_WIN_PRESCALED_SIZE);
+
+ /*
+ * For DDA computations the number of bytes per pixel for YUV planar
+ * modes needs to take into account all Y, U and V components.
+ */
+ if (yuv && planes > 1)
+ bpp = 2;
+
+ h_dda = compute_dda_inc(window->src.w, window->dst.w, false, bpp);
+ v_dda = compute_dda_inc(window->src.h, window->dst.h, true, bpp);
+
+ value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
+ tegra_plane_writel(plane, value, DC_WIN_DDA_INC);
+
+ h_dda = compute_initial_dda(window->src.x);
+ v_dda = compute_initial_dda(window->src.y);
+
+ tegra_plane_writel(plane, h_dda, DC_WIN_H_INITIAL_DDA);
+ tegra_plane_writel(plane, v_dda, DC_WIN_V_INITIAL_DDA);
+
+ tegra_plane_writel(plane, 0, DC_WIN_UV_BUF_STRIDE);
+ tegra_plane_writel(plane, 0, DC_WIN_BUF_STRIDE);
+
+ tegra_plane_writel(plane, window->base[0], DC_WINBUF_START_ADDR);
+
+ if (yuv && planes > 1) {
+ tegra_plane_writel(plane, window->base[1], DC_WINBUF_START_ADDR_U);
+
+ if (planes > 2)
+ tegra_plane_writel(plane, window->base[2], DC_WINBUF_START_ADDR_V);
+
+ value = window->stride[1] << 16 | window->stride[0];
+ tegra_plane_writel(plane, value, DC_WIN_LINE_STRIDE);
+ } else {
+ tegra_plane_writel(plane, window->stride[0], DC_WIN_LINE_STRIDE);
+ }
+
+ tegra_plane_writel(plane, h_offset, DC_WINBUF_ADDR_H_OFFSET);
+ tegra_plane_writel(plane, v_offset, DC_WINBUF_ADDR_V_OFFSET);
+
+ if (dc->soc->supports_block_linear) {
+ unsigned long height = window->tiling.value;
+
+ switch (window->tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WINBUF_SURFACE_KIND_PITCH;
+ break;
+
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WINBUF_SURFACE_KIND_TILED;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
+ DC_WINBUF_SURFACE_KIND_BLOCK;
+ break;
+ }
+
+ tegra_plane_writel(plane, value, DC_WINBUF_SURFACE_KIND);
+ } else {
+ switch (window->tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+ break;
+
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+ DC_WIN_BUFFER_ADDR_MODE_TILE;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ /*
+ * No need to handle this here because ->atomic_check
+ * will already have filtered it out.
+ */
+ break;
+ }
+
+ tegra_plane_writel(plane, value, DC_WIN_BUFFER_ADDR_MODE);
+ }
+
+ value = WIN_ENABLE;
+
+ if (yuv) {
+ /* setup default colorspace conversion coefficients */
+ tegra_plane_writel(plane, 0x00f0, DC_WIN_CSC_YOF);
+ tegra_plane_writel(plane, 0x012a, DC_WIN_CSC_KYRGB);
+ tegra_plane_writel(plane, 0x0000, DC_WIN_CSC_KUR);
+ tegra_plane_writel(plane, 0x0198, DC_WIN_CSC_KVR);
+ tegra_plane_writel(plane, 0x039b, DC_WIN_CSC_KUG);
+ tegra_plane_writel(plane, 0x032f, DC_WIN_CSC_KVG);
+ tegra_plane_writel(plane, 0x0204, DC_WIN_CSC_KUB);
+ tegra_plane_writel(plane, 0x0000, DC_WIN_CSC_KVB);
+
+ value |= CSC_ENABLE;
+ } else if (window->bits_per_pixel < 24) {
+ value |= COLOR_EXPAND;
+ }
+
+ if (window->reflect_x)
+ value |= H_DIRECTION;
+
+ if (window->reflect_y)
+ value |= V_DIRECTION;
+
+ if (tegra_plane_use_horizontal_filtering(plane, window)) {
+ /*
+ * Enable horizontal 6-tap filter and set filtering
+ * coefficients to the default values defined in TRM.
+ */
+ tegra_plane_writel(plane, 0x00008000, DC_WIN_H_FILTER_P(0));
+ tegra_plane_writel(plane, 0x3e087ce1, DC_WIN_H_FILTER_P(1));
+ tegra_plane_writel(plane, 0x3b117ac1, DC_WIN_H_FILTER_P(2));
+ tegra_plane_writel(plane, 0x591b73aa, DC_WIN_H_FILTER_P(3));
+ tegra_plane_writel(plane, 0x57256d9a, DC_WIN_H_FILTER_P(4));
+ tegra_plane_writel(plane, 0x552f668b, DC_WIN_H_FILTER_P(5));
+ tegra_plane_writel(plane, 0x73385e8b, DC_WIN_H_FILTER_P(6));
+ tegra_plane_writel(plane, 0x72435583, DC_WIN_H_FILTER_P(7));
+ tegra_plane_writel(plane, 0x714c4c8b, DC_WIN_H_FILTER_P(8));
+ tegra_plane_writel(plane, 0x70554393, DC_WIN_H_FILTER_P(9));
+ tegra_plane_writel(plane, 0x715e389b, DC_WIN_H_FILTER_P(10));
+ tegra_plane_writel(plane, 0x71662faa, DC_WIN_H_FILTER_P(11));
+ tegra_plane_writel(plane, 0x536d25ba, DC_WIN_H_FILTER_P(12));
+ tegra_plane_writel(plane, 0x55731bca, DC_WIN_H_FILTER_P(13));
+ tegra_plane_writel(plane, 0x387a11d9, DC_WIN_H_FILTER_P(14));
+ tegra_plane_writel(plane, 0x3c7c08f1, DC_WIN_H_FILTER_P(15));
+
+ value |= H_FILTER;
+ }
+
+ if (tegra_plane_use_vertical_filtering(plane, window)) {
+ unsigned int i, k;
+
+ /*
+ * Enable vertical 2-tap filter and set filtering
+ * coefficients to the default values defined in TRM.
+ */
+ for (i = 0, k = 128; i < 16; i++, k -= 8)
+ tegra_plane_writel(plane, k, DC_WIN_V_FILTER_P(i));
+
+ value |= V_FILTER;
+ }
+
+ tegra_plane_writel(plane, value, DC_WIN_WIN_OPTIONS);
+
+ if (dc->soc->has_legacy_blending)
+ tegra_plane_setup_blending_legacy(plane);
+ else
+ tegra_plane_setup_blending(plane, window);
+}
+
+static const u32 tegra20_primary_formats[] = {
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ /* non-native formats */
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+};
+
+static const u64 tegra20_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const u32 tegra114_primary_formats[] = {
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ /* new on Tegra114 */
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+};
+
+static const u32 tegra124_primary_formats[] = {
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ /* new on Tegra114 */
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ /* new on Tegra124 */
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+};
+
+static const u64 tegra124_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
+ DRM_FORMAT_MOD_INVALID
+};
+
+static int tegra_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
+ unsigned int supported_rotation = DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y;
+ unsigned int rotation = new_plane_state->rotation;
+ struct tegra_bo_tiling *tiling = &plane_state->tiling;
+ struct tegra_plane *tegra = to_tegra_plane(plane);
+ struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
+ int err;
+
+ plane_state->peak_memory_bandwidth = 0;
+ plane_state->avg_memory_bandwidth = 0;
+
+ /* no need for further checks if the plane is being disabled */
+ if (!new_plane_state->crtc) {
+ plane_state->total_peak_memory_bandwidth = 0;
+ return 0;
+ }
+
+ err = tegra_plane_format(new_plane_state->fb->format->format,
+ &plane_state->format,
+ &plane_state->swap);
+ if (err < 0)
+ return err;
+
+ /*
+ * Tegra20 and Tegra30 are special cases here because they support
+ * only variants of specific formats with an alpha component, but not
+ * the corresponding opaque formats. However, the opaque formats can
+ * be emulated by disabling alpha blending for the plane.
+ */
+ if (dc->soc->has_legacy_blending) {
+ err = tegra_plane_setup_legacy_state(tegra, plane_state);
+ if (err < 0)
+ return err;
+ }
+
+ err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
+ if (err < 0)
+ return err;
+
+ if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
+ !dc->soc->supports_block_linear) {
+ DRM_ERROR("hardware doesn't support block linear mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Older userspace used custom BO flag in order to specify the Y
+ * reflection, while modern userspace uses the generic DRM rotation
+ * property in order to achieve the same result. The legacy BO flag
+ * duplicates the DRM rotation property when both are set.
+ */
+ if (tegra_fb_is_bottom_up(new_plane_state->fb))
+ rotation |= DRM_MODE_REFLECT_Y;
+
+ rotation = drm_rotation_simplify(rotation, supported_rotation);
+
+ if (rotation & DRM_MODE_REFLECT_X)
+ plane_state->reflect_x = true;
+ else
+ plane_state->reflect_x = false;
+
+ if (rotation & DRM_MODE_REFLECT_Y)
+ plane_state->reflect_y = true;
+ else
+ plane_state->reflect_y = false;
+
+ /*
+ * Tegra doesn't support different strides for U and V planes so we
+ * error out if the user tries to display a framebuffer with such a
+ * configuration.
+ */
+ if (new_plane_state->fb->format->num_planes > 2) {
+ if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
+ DRM_ERROR("unsupported UV-plane configuration\n");
+ return -EINVAL;
+ }
+ }
+
+ err = tegra_plane_state_add(tegra, new_plane_state);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void tegra_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct tegra_plane *p = to_tegra_plane(plane);
+ u32 value;
+
+ /* rien ne va plus */
+ if (!old_state || !old_state->crtc)
+ return;
+
+ value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
+ value &= ~WIN_ENABLE;
+ tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
+}
+
+static void tegra_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
+ struct drm_framebuffer *fb = new_state->fb;
+ struct tegra_plane *p = to_tegra_plane(plane);
+ struct tegra_dc_window window;
+ unsigned int i;
+
+ /* rien ne va plus */
+ if (!new_state->crtc || !new_state->fb)
+ return;
+
+ if (!new_state->visible)
+ return tegra_plane_atomic_disable(plane, state);
+
+ memset(&window, 0, sizeof(window));
+ window.src.x = new_state->src.x1 >> 16;
+ window.src.y = new_state->src.y1 >> 16;
+ window.src.w = drm_rect_width(&new_state->src) >> 16;
+ window.src.h = drm_rect_height(&new_state->src) >> 16;
+ window.dst.x = new_state->dst.x1;
+ window.dst.y = new_state->dst.y1;
+ window.dst.w = drm_rect_width(&new_state->dst);
+ window.dst.h = drm_rect_height(&new_state->dst);
+ window.bits_per_pixel = fb->format->cpp[0] * 8;
+ window.reflect_x = tegra_plane_state->reflect_x;
+ window.reflect_y = tegra_plane_state->reflect_y;
+
+ /* copy from state */
+ window.zpos = new_state->normalized_zpos;
+ window.tiling = tegra_plane_state->tiling;
+ window.format = tegra_plane_state->format;
+ window.swap = tegra_plane_state->swap;
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ window.base[i] = tegra_plane_state->iova[i] + fb->offsets[i];
+
+ /*
+ * Tegra uses a shared stride for UV planes. Framebuffers are
+ * already checked for this in the tegra_plane_atomic_check()
+ * function, so it's safe to ignore the V-plane pitch here.
+ */
+ if (i < 2)
+ window.stride[i] = fb->pitches[i];
+ }
+
+ tegra_dc_setup_window(p, &window);
+}
+
+static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
+ .atomic_check = tegra_plane_atomic_check,
+ .atomic_disable = tegra_plane_atomic_disable,
+ .atomic_update = tegra_plane_atomic_update,
+};
+
+static unsigned long tegra_plane_get_possible_crtcs(struct drm_device *drm)
+{
+ /*
+ * Ideally this would use drm_crtc_mask(), but that would require the
+ * CRTC to already be in the mode_config's list of CRTCs. However, it
+ * will only be added to that list in the drm_crtc_init_with_planes()
+ * (in tegra_dc_init()), which in turn requires registration of these
+ * planes. So we have ourselves a nice little chicken and egg problem
+ * here.
+ *
+ * We work around this by manually creating the mask from the number
+ * of CRTCs that have been registered, and should therefore always be
+ * the same as drm_crtc_index() after registration.
+ */
+ return 1 << drm->mode_config.num_crtc;
+}
+
+static struct drm_plane *tegra_primary_plane_create(struct drm_device *drm,
+ struct tegra_dc *dc)
+{
+ unsigned long possible_crtcs = tegra_plane_get_possible_crtcs(drm);
+ enum drm_plane_type type = DRM_PLANE_TYPE_PRIMARY;
+ struct tegra_plane *plane;
+ unsigned int num_formats;
+ const u64 *modifiers;
+ const u32 *formats;
+ int err;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ /* Always use window A as primary window */
+ plane->offset = 0xa00;
+ plane->index = 0;
+ plane->dc = dc;
+
+ num_formats = dc->soc->num_primary_formats;
+ formats = dc->soc->primary_formats;
+ modifiers = dc->soc->modifiers;
+
+ err = tegra_plane_interconnect_init(plane);
+ if (err) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+
+ err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
+ &tegra_plane_funcs, formats,
+ num_formats, modifiers, type, NULL);
+ if (err < 0) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+
+ drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs);
+ drm_plane_create_zpos_property(&plane->base, plane->index, 0, 255);
+
+ err = drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+ if (err < 0)
+ dev_err(dc->dev, "failed to create rotation property: %d\n",
+ err);
+
+ return &plane->base;
+}
+
+static const u32 tegra_legacy_cursor_plane_formats[] = {
+ DRM_FORMAT_RGBA8888,
+};
+
+static const u32 tegra_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+static int tegra_cursor_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
+ struct tegra_plane *tegra = to_tegra_plane(plane);
+ int err;
+
+ plane_state->peak_memory_bandwidth = 0;
+ plane_state->avg_memory_bandwidth = 0;
+
+ /* no need for further checks if the plane is being disabled */
+ if (!new_plane_state->crtc) {
+ plane_state->total_peak_memory_bandwidth = 0;
+ return 0;
+ }
+
+ /* scaling not supported for cursor */
+ if ((new_plane_state->src_w >> 16 != new_plane_state->crtc_w) ||
+ (new_plane_state->src_h >> 16 != new_plane_state->crtc_h))
+ return -EINVAL;
+
+ /* only square cursors supported */
+ if (new_plane_state->src_w != new_plane_state->src_h)
+ return -EINVAL;
+
+ if (new_plane_state->crtc_w != 32 && new_plane_state->crtc_w != 64 &&
+ new_plane_state->crtc_w != 128 && new_plane_state->crtc_w != 256)
+ return -EINVAL;
+
+ err = tegra_plane_state_add(tegra, new_plane_state);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void __tegra_cursor_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
+ struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
+ struct tegra_drm *tegra = plane->dev->dev_private;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ u64 dma_mask = *dc->dev->dma_mask;
+#endif
+ unsigned int x, y;
+ u32 value = 0;
+
+ /* rien ne va plus */
+ if (!new_state->crtc || !new_state->fb)
+ return;
+
+ /*
+ * Legacy display supports hardware clipping of the cursor, but
+ * nvdisplay relies on software to clip the cursor to the screen.
+ */
+ if (!dc->soc->has_nvdisplay)
+ value |= CURSOR_CLIP_DISPLAY;
+
+ switch (new_state->crtc_w) {
+ case 32:
+ value |= CURSOR_SIZE_32x32;
+ break;
+
+ case 64:
+ value |= CURSOR_SIZE_64x64;
+ break;
+
+ case 128:
+ value |= CURSOR_SIZE_128x128;
+ break;
+
+ case 256:
+ value |= CURSOR_SIZE_256x256;
+ break;
+
+ default:
+ WARN(1, "cursor size %ux%u not supported\n",
+ new_state->crtc_w, new_state->crtc_h);
+ return;
+ }
+
+ value |= (tegra_plane_state->iova[0] >> 10) & 0x3fffff;
+ tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ value = (tegra_plane_state->iova[0] >> 32) & (dma_mask >> 32);
+ tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
+#endif
+
+ /* enable cursor and set blend mode */
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= CURSOR_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL);
+ value &= ~CURSOR_DST_BLEND_MASK;
+ value &= ~CURSOR_SRC_BLEND_MASK;
+
+ if (dc->soc->has_nvdisplay)
+ value &= ~CURSOR_COMPOSITION_MODE_XOR;
+ else
+ value |= CURSOR_MODE_NORMAL;
+
+ value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC;
+ value |= CURSOR_SRC_BLEND_K1_TIMES_SRC;
+ value |= CURSOR_ALPHA;
+ tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
+
+ /* nvdisplay relies on software for clipping */
+ if (dc->soc->has_nvdisplay) {
+ struct drm_rect src;
+
+ x = new_state->dst.x1;
+ y = new_state->dst.y1;
+
+ drm_rect_fp_to_int(&src, &new_state->src);
+
+ value = (src.y1 & tegra->vmask) << 16 | (src.x1 & tegra->hmask);
+ tegra_dc_writel(dc, value, DC_DISP_PCALC_HEAD_SET_CROPPED_POINT_IN_CURSOR);
+
+ value = (drm_rect_height(&src) & tegra->vmask) << 16 |
+ (drm_rect_width(&src) & tegra->hmask);
+ tegra_dc_writel(dc, value, DC_DISP_PCALC_HEAD_SET_CROPPED_SIZE_IN_CURSOR);
+ } else {
+ x = new_state->crtc_x;
+ y = new_state->crtc_y;
+ }
+
+ /* position the cursor */
+ value = ((y & tegra->vmask) << 16) | (x & tegra->hmask);
+ tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
+}
+
+static void tegra_cursor_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+
+ __tegra_cursor_atomic_update(plane, new_state);
+}
+
+static void tegra_cursor_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct tegra_dc *dc;
+ u32 value;
+
+ /* rien ne va plus */
+ if (!old_state || !old_state->crtc)
+ return;
+
+ dc = to_tegra_dc(old_state->crtc);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~CURSOR_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+}
+
+static int tegra_cursor_atomic_async_check(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc_state *crtc_state;
+ int min_scale, max_scale;
+ int err;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ if (!crtc_state->active)
+ return -EINVAL;
+
+ if (plane->state->crtc != new_state->crtc ||
+ plane->state->src_w != new_state->src_w ||
+ plane->state->src_h != new_state->src_h ||
+ plane->state->crtc_w != new_state->crtc_w ||
+ plane->state->crtc_h != new_state->crtc_h ||
+ plane->state->fb != new_state->fb ||
+ plane->state->fb == NULL)
+ return -EINVAL;
+
+ min_scale = (1 << 16) / 8;
+ max_scale = (8 << 16) / 1;
+
+ err = drm_atomic_helper_check_plane_state(new_state, crtc_state, min_scale, max_scale,
+ true, true);
+ if (err < 0)
+ return err;
+
+ if (new_state->visible != plane->state->visible)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void tegra_cursor_atomic_async_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
+
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+
+ if (new_state->visible) {
+ struct tegra_plane *p = to_tegra_plane(plane);
+ u32 value;
+
+ __tegra_cursor_atomic_update(plane, new_state);
+
+ value = (WIN_A_ACT_REQ << p->index) << 8 | GENERAL_UPDATE;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+ (void)tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+
+ value = (WIN_A_ACT_REQ << p->index) | GENERAL_ACT_REQ;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+ (void)tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+ }
+}
+
+static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
+ .atomic_check = tegra_cursor_atomic_check,
+ .atomic_update = tegra_cursor_atomic_update,
+ .atomic_disable = tegra_cursor_atomic_disable,
+ .atomic_async_check = tegra_cursor_atomic_async_check,
+ .atomic_async_update = tegra_cursor_atomic_async_update,
+};
+
+static const uint64_t linear_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
+ struct tegra_dc *dc)
+{
+ unsigned long possible_crtcs = tegra_plane_get_possible_crtcs(drm);
+ struct tegra_plane *plane;
+ unsigned int num_formats;
+ const u32 *formats;
+ int err;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * This index is kind of fake. The cursor isn't a regular plane, but
+ * its update and activation request bits in DC_CMD_STATE_CONTROL do
+ * use the same programming. Setting this fake index here allows the
+ * code in tegra_add_plane_state() to do the right thing without the
+ * need to special-casing the cursor plane.
+ */
+ plane->index = 6;
+ plane->dc = dc;
+
+ if (!dc->soc->has_nvdisplay) {
+ num_formats = ARRAY_SIZE(tegra_legacy_cursor_plane_formats);
+ formats = tegra_legacy_cursor_plane_formats;
+
+ err = tegra_plane_interconnect_init(plane);
+ if (err) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+ } else {
+ num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
+ formats = tegra_cursor_plane_formats;
+ }
+
+ err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
+ &tegra_plane_funcs, formats,
+ num_formats, linear_modifiers,
+ DRM_PLANE_TYPE_CURSOR, NULL);
+ if (err < 0) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+
+ drm_plane_helper_add(&plane->base, &tegra_cursor_plane_helper_funcs);
+ drm_plane_create_zpos_immutable_property(&plane->base, 255);
+
+ return &plane->base;
+}
+
+static const u32 tegra20_overlay_formats[] = {
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ /* non-native formats */
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ /* planar formats */
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+};
+
+static const u32 tegra114_overlay_formats[] = {
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ /* new on Tegra114 */
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ /* planar formats */
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+ /* semi-planar formats */
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_NV24,
+ DRM_FORMAT_NV42,
+};
+
+static const u32 tegra124_overlay_formats[] = {
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ /* new on Tegra114 */
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ /* new on Tegra124 */
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ /* planar formats */
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YUV420, /* YU12 */
+ DRM_FORMAT_YUV422, /* YU16 */
+ DRM_FORMAT_YUV444, /* YU24 */
+ /* semi-planar formats */
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_NV24,
+ DRM_FORMAT_NV42,
+};
+
+static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
+ struct tegra_dc *dc,
+ unsigned int index,
+ bool cursor)
+{
+ unsigned long possible_crtcs = tegra_plane_get_possible_crtcs(drm);
+ struct tegra_plane *plane;
+ unsigned int num_formats;
+ enum drm_plane_type type;
+ const u32 *formats;
+ int err;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ plane->offset = 0xa00 + 0x200 * index;
+ plane->index = index;
+ plane->dc = dc;
+
+ num_formats = dc->soc->num_overlay_formats;
+ formats = dc->soc->overlay_formats;
+
+ err = tegra_plane_interconnect_init(plane);
+ if (err) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+
+ if (!cursor)
+ type = DRM_PLANE_TYPE_OVERLAY;
+ else
+ type = DRM_PLANE_TYPE_CURSOR;
+
+ err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
+ &tegra_plane_funcs, formats,
+ num_formats, linear_modifiers,
+ type, NULL);
+ if (err < 0) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+
+ drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs);
+ drm_plane_create_zpos_property(&plane->base, plane->index, 0, 255);
+
+ err = drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+ if (err < 0)
+ dev_err(dc->dev, "failed to create rotation property: %d\n",
+ err);
+
+ return &plane->base;
+}
+
+static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
+ struct tegra_dc *dc)
+{
+ struct drm_plane *plane, *primary = NULL;
+ unsigned int i, j;
+
+ for (i = 0; i < dc->soc->num_wgrps; i++) {
+ const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
+
+ if (wgrp->dc == dc->pipe) {
+ for (j = 0; j < wgrp->num_windows; j++) {
+ unsigned int index = wgrp->windows[j];
+
+ plane = tegra_shared_plane_create(drm, dc,
+ wgrp->index,
+ index);
+ if (IS_ERR(plane))
+ return plane;
+
+ /*
+ * Choose the first shared plane owned by this
+ * head as the primary plane.
+ */
+ if (!primary) {
+ plane->type = DRM_PLANE_TYPE_PRIMARY;
+ primary = plane;
+ }
+ }
+ }
+ }
+
+ return primary;
+}
+
+static struct drm_plane *tegra_dc_add_planes(struct drm_device *drm,
+ struct tegra_dc *dc)
+{
+ struct drm_plane *planes[2], *primary;
+ unsigned int planes_num;
+ unsigned int i;
+ int err;
+
+ primary = tegra_primary_plane_create(drm, dc);
+ if (IS_ERR(primary))
+ return primary;
+
+ if (dc->soc->supports_cursor)
+ planes_num = 2;
+ else
+ planes_num = 1;
+
+ for (i = 0; i < planes_num; i++) {
+ planes[i] = tegra_dc_overlay_plane_create(drm, dc, 1 + i,
+ false);
+ if (IS_ERR(planes[i])) {
+ err = PTR_ERR(planes[i]);
+
+ while (i--)
+ planes[i]->funcs->destroy(planes[i]);
+
+ primary->funcs->destroy(primary);
+ return ERR_PTR(err);
+ }
+ }
+
+ return primary;
+}
+
+static void tegra_dc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+}
+
+static void tegra_crtc_reset(struct drm_crtc *crtc)
+{
+ struct tegra_dc_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (crtc->state)
+ tegra_crtc_atomic_destroy_state(crtc, crtc->state);
+
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
+}
+
+static struct drm_crtc_state *
+tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct tegra_dc_state *state = to_dc_state(crtc->state);
+ struct tegra_dc_state *copy;
+
+ copy = kmalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &copy->base);
+ copy->clk = state->clk;
+ copy->pclk = state->pclk;
+ copy->div = state->div;
+ copy->planes = state->planes;
+
+ return &copy->base;
+}
+
+static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ __drm_atomic_helper_crtc_destroy_state(state);
+ kfree(state);
+}
+
+#define DEBUGFS_REG32(_name) { .name = #_name, .offset = _name }
+
+static const struct debugfs_reg32 tegra_dc_regs[] = {
+ DEBUGFS_REG32(DC_CMD_GENERAL_INCR_SYNCPT),
+ DEBUGFS_REG32(DC_CMD_GENERAL_INCR_SYNCPT_CNTRL),
+ DEBUGFS_REG32(DC_CMD_GENERAL_INCR_SYNCPT_ERROR),
+ DEBUGFS_REG32(DC_CMD_WIN_A_INCR_SYNCPT),
+ DEBUGFS_REG32(DC_CMD_WIN_A_INCR_SYNCPT_CNTRL),
+ DEBUGFS_REG32(DC_CMD_WIN_A_INCR_SYNCPT_ERROR),
+ DEBUGFS_REG32(DC_CMD_WIN_B_INCR_SYNCPT),
+ DEBUGFS_REG32(DC_CMD_WIN_B_INCR_SYNCPT_CNTRL),
+ DEBUGFS_REG32(DC_CMD_WIN_B_INCR_SYNCPT_ERROR),
+ DEBUGFS_REG32(DC_CMD_WIN_C_INCR_SYNCPT),
+ DEBUGFS_REG32(DC_CMD_WIN_C_INCR_SYNCPT_CNTRL),
+ DEBUGFS_REG32(DC_CMD_WIN_C_INCR_SYNCPT_ERROR),
+ DEBUGFS_REG32(DC_CMD_CONT_SYNCPT_VSYNC),
+ DEBUGFS_REG32(DC_CMD_DISPLAY_COMMAND_OPTION0),
+ DEBUGFS_REG32(DC_CMD_DISPLAY_COMMAND),
+ DEBUGFS_REG32(DC_CMD_SIGNAL_RAISE),
+ DEBUGFS_REG32(DC_CMD_DISPLAY_POWER_CONTROL),
+ DEBUGFS_REG32(DC_CMD_INT_STATUS),
+ DEBUGFS_REG32(DC_CMD_INT_MASK),
+ DEBUGFS_REG32(DC_CMD_INT_ENABLE),
+ DEBUGFS_REG32(DC_CMD_INT_TYPE),
+ DEBUGFS_REG32(DC_CMD_INT_POLARITY),
+ DEBUGFS_REG32(DC_CMD_SIGNAL_RAISE1),
+ DEBUGFS_REG32(DC_CMD_SIGNAL_RAISE2),
+ DEBUGFS_REG32(DC_CMD_SIGNAL_RAISE3),
+ DEBUGFS_REG32(DC_CMD_STATE_ACCESS),
+ DEBUGFS_REG32(DC_CMD_STATE_CONTROL),
+ DEBUGFS_REG32(DC_CMD_DISPLAY_WINDOW_HEADER),
+ DEBUGFS_REG32(DC_CMD_REG_ACT_CONTROL),
+ DEBUGFS_REG32(DC_COM_CRC_CONTROL),
+ DEBUGFS_REG32(DC_COM_CRC_CHECKSUM),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_ENABLE(0)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_ENABLE(1)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_ENABLE(2)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_ENABLE(3)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_POLARITY(0)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_POLARITY(1)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_POLARITY(2)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_POLARITY(3)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_DATA(0)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_DATA(1)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_DATA(2)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_DATA(3)),
+ DEBUGFS_REG32(DC_COM_PIN_INPUT_ENABLE(0)),
+ DEBUGFS_REG32(DC_COM_PIN_INPUT_ENABLE(1)),
+ DEBUGFS_REG32(DC_COM_PIN_INPUT_ENABLE(2)),
+ DEBUGFS_REG32(DC_COM_PIN_INPUT_ENABLE(3)),
+ DEBUGFS_REG32(DC_COM_PIN_INPUT_DATA(0)),
+ DEBUGFS_REG32(DC_COM_PIN_INPUT_DATA(1)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(0)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(1)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(2)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(3)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(4)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(5)),
+ DEBUGFS_REG32(DC_COM_PIN_OUTPUT_SELECT(6)),
+ DEBUGFS_REG32(DC_COM_PIN_MISC_CONTROL),
+ DEBUGFS_REG32(DC_COM_PIN_PM0_CONTROL),
+ DEBUGFS_REG32(DC_COM_PIN_PM0_DUTY_CYCLE),
+ DEBUGFS_REG32(DC_COM_PIN_PM1_CONTROL),
+ DEBUGFS_REG32(DC_COM_PIN_PM1_DUTY_CYCLE),
+ DEBUGFS_REG32(DC_COM_SPI_CONTROL),
+ DEBUGFS_REG32(DC_COM_SPI_START_BYTE),
+ DEBUGFS_REG32(DC_COM_HSPI_WRITE_DATA_AB),
+ DEBUGFS_REG32(DC_COM_HSPI_WRITE_DATA_CD),
+ DEBUGFS_REG32(DC_COM_HSPI_CS_DC),
+ DEBUGFS_REG32(DC_COM_SCRATCH_REGISTER_A),
+ DEBUGFS_REG32(DC_COM_SCRATCH_REGISTER_B),
+ DEBUGFS_REG32(DC_COM_GPIO_CTRL),
+ DEBUGFS_REG32(DC_COM_GPIO_DEBOUNCE_COUNTER),
+ DEBUGFS_REG32(DC_COM_CRC_CHECKSUM_LATCHED),
+ DEBUGFS_REG32(DC_DISP_DISP_SIGNAL_OPTIONS0),
+ DEBUGFS_REG32(DC_DISP_DISP_SIGNAL_OPTIONS1),
+ DEBUGFS_REG32(DC_DISP_DISP_WIN_OPTIONS),
+ DEBUGFS_REG32(DC_DISP_DISP_MEM_HIGH_PRIORITY),
+ DEBUGFS_REG32(DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER),
+ DEBUGFS_REG32(DC_DISP_DISP_TIMING_OPTIONS),
+ DEBUGFS_REG32(DC_DISP_REF_TO_SYNC),
+ DEBUGFS_REG32(DC_DISP_SYNC_WIDTH),
+ DEBUGFS_REG32(DC_DISP_BACK_PORCH),
+ DEBUGFS_REG32(DC_DISP_ACTIVE),
+ DEBUGFS_REG32(DC_DISP_FRONT_PORCH),
+ DEBUGFS_REG32(DC_DISP_H_PULSE0_CONTROL),
+ DEBUGFS_REG32(DC_DISP_H_PULSE0_POSITION_A),
+ DEBUGFS_REG32(DC_DISP_H_PULSE0_POSITION_B),
+ DEBUGFS_REG32(DC_DISP_H_PULSE0_POSITION_C),
+ DEBUGFS_REG32(DC_DISP_H_PULSE0_POSITION_D),
+ DEBUGFS_REG32(DC_DISP_H_PULSE1_CONTROL),
+ DEBUGFS_REG32(DC_DISP_H_PULSE1_POSITION_A),
+ DEBUGFS_REG32(DC_DISP_H_PULSE1_POSITION_B),
+ DEBUGFS_REG32(DC_DISP_H_PULSE1_POSITION_C),
+ DEBUGFS_REG32(DC_DISP_H_PULSE1_POSITION_D),
+ DEBUGFS_REG32(DC_DISP_H_PULSE2_CONTROL),
+ DEBUGFS_REG32(DC_DISP_H_PULSE2_POSITION_A),
+ DEBUGFS_REG32(DC_DISP_H_PULSE2_POSITION_B),
+ DEBUGFS_REG32(DC_DISP_H_PULSE2_POSITION_C),
+ DEBUGFS_REG32(DC_DISP_H_PULSE2_POSITION_D),
+ DEBUGFS_REG32(DC_DISP_V_PULSE0_CONTROL),
+ DEBUGFS_REG32(DC_DISP_V_PULSE0_POSITION_A),
+ DEBUGFS_REG32(DC_DISP_V_PULSE0_POSITION_B),
+ DEBUGFS_REG32(DC_DISP_V_PULSE0_POSITION_C),
+ DEBUGFS_REG32(DC_DISP_V_PULSE1_CONTROL),
+ DEBUGFS_REG32(DC_DISP_V_PULSE1_POSITION_A),
+ DEBUGFS_REG32(DC_DISP_V_PULSE1_POSITION_B),
+ DEBUGFS_REG32(DC_DISP_V_PULSE1_POSITION_C),
+ DEBUGFS_REG32(DC_DISP_V_PULSE2_CONTROL),
+ DEBUGFS_REG32(DC_DISP_V_PULSE2_POSITION_A),
+ DEBUGFS_REG32(DC_DISP_V_PULSE3_CONTROL),
+ DEBUGFS_REG32(DC_DISP_V_PULSE3_POSITION_A),
+ DEBUGFS_REG32(DC_DISP_M0_CONTROL),
+ DEBUGFS_REG32(DC_DISP_M1_CONTROL),
+ DEBUGFS_REG32(DC_DISP_DI_CONTROL),
+ DEBUGFS_REG32(DC_DISP_PP_CONTROL),
+ DEBUGFS_REG32(DC_DISP_PP_SELECT_A),
+ DEBUGFS_REG32(DC_DISP_PP_SELECT_B),
+ DEBUGFS_REG32(DC_DISP_PP_SELECT_C),
+ DEBUGFS_REG32(DC_DISP_PP_SELECT_D),
+ DEBUGFS_REG32(DC_DISP_DISP_CLOCK_CONTROL),
+ DEBUGFS_REG32(DC_DISP_DISP_INTERFACE_CONTROL),
+ DEBUGFS_REG32(DC_DISP_DISP_COLOR_CONTROL),
+ DEBUGFS_REG32(DC_DISP_SHIFT_CLOCK_OPTIONS),
+ DEBUGFS_REG32(DC_DISP_DATA_ENABLE_OPTIONS),
+ DEBUGFS_REG32(DC_DISP_SERIAL_INTERFACE_OPTIONS),
+ DEBUGFS_REG32(DC_DISP_LCD_SPI_OPTIONS),
+ DEBUGFS_REG32(DC_DISP_BORDER_COLOR),
+ DEBUGFS_REG32(DC_DISP_COLOR_KEY0_LOWER),
+ DEBUGFS_REG32(DC_DISP_COLOR_KEY0_UPPER),
+ DEBUGFS_REG32(DC_DISP_COLOR_KEY1_LOWER),
+ DEBUGFS_REG32(DC_DISP_COLOR_KEY1_UPPER),
+ DEBUGFS_REG32(DC_DISP_CURSOR_FOREGROUND),
+ DEBUGFS_REG32(DC_DISP_CURSOR_BACKGROUND),
+ DEBUGFS_REG32(DC_DISP_CURSOR_START_ADDR),
+ DEBUGFS_REG32(DC_DISP_CURSOR_START_ADDR_NS),
+ DEBUGFS_REG32(DC_DISP_CURSOR_POSITION),
+ DEBUGFS_REG32(DC_DISP_CURSOR_POSITION_NS),
+ DEBUGFS_REG32(DC_DISP_INIT_SEQ_CONTROL),
+ DEBUGFS_REG32(DC_DISP_SPI_INIT_SEQ_DATA_A),
+ DEBUGFS_REG32(DC_DISP_SPI_INIT_SEQ_DATA_B),
+ DEBUGFS_REG32(DC_DISP_SPI_INIT_SEQ_DATA_C),
+ DEBUGFS_REG32(DC_DISP_SPI_INIT_SEQ_DATA_D),
+ DEBUGFS_REG32(DC_DISP_DC_MCCIF_FIFOCTRL),
+ DEBUGFS_REG32(DC_DISP_MCCIF_DISPLAY0A_HYST),
+ DEBUGFS_REG32(DC_DISP_MCCIF_DISPLAY0B_HYST),
+ DEBUGFS_REG32(DC_DISP_MCCIF_DISPLAY1A_HYST),
+ DEBUGFS_REG32(DC_DISP_MCCIF_DISPLAY1B_HYST),
+ DEBUGFS_REG32(DC_DISP_DAC_CRT_CTRL),
+ DEBUGFS_REG32(DC_DISP_DISP_MISC_CONTROL),
+ DEBUGFS_REG32(DC_DISP_SD_CONTROL),
+ DEBUGFS_REG32(DC_DISP_SD_CSC_COEFF),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(0)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(1)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(2)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(3)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(4)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(5)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(6)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(7)),
+ DEBUGFS_REG32(DC_DISP_SD_LUT(8)),
+ DEBUGFS_REG32(DC_DISP_SD_FLICKER_CONTROL),
+ DEBUGFS_REG32(DC_DISP_DC_PIXEL_COUNT),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(0)),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(1)),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(2)),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(3)),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(4)),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(5)),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(6)),
+ DEBUGFS_REG32(DC_DISP_SD_HISTOGRAM(7)),
+ DEBUGFS_REG32(DC_DISP_SD_BL_TF(0)),
+ DEBUGFS_REG32(DC_DISP_SD_BL_TF(1)),
+ DEBUGFS_REG32(DC_DISP_SD_BL_TF(2)),
+ DEBUGFS_REG32(DC_DISP_SD_BL_TF(3)),
+ DEBUGFS_REG32(DC_DISP_SD_BL_CONTROL),
+ DEBUGFS_REG32(DC_DISP_SD_HW_K_VALUES),
+ DEBUGFS_REG32(DC_DISP_SD_MAN_K_VALUES),
+ DEBUGFS_REG32(DC_DISP_CURSOR_START_ADDR_HI),
+ DEBUGFS_REG32(DC_DISP_BLEND_CURSOR_CONTROL),
+ DEBUGFS_REG32(DC_WIN_WIN_OPTIONS),
+ DEBUGFS_REG32(DC_WIN_BYTE_SWAP),
+ DEBUGFS_REG32(DC_WIN_BUFFER_CONTROL),
+ DEBUGFS_REG32(DC_WIN_COLOR_DEPTH),
+ DEBUGFS_REG32(DC_WIN_POSITION),
+ DEBUGFS_REG32(DC_WIN_SIZE),
+ DEBUGFS_REG32(DC_WIN_PRESCALED_SIZE),
+ DEBUGFS_REG32(DC_WIN_H_INITIAL_DDA),
+ DEBUGFS_REG32(DC_WIN_V_INITIAL_DDA),
+ DEBUGFS_REG32(DC_WIN_DDA_INC),
+ DEBUGFS_REG32(DC_WIN_LINE_STRIDE),
+ DEBUGFS_REG32(DC_WIN_BUF_STRIDE),
+ DEBUGFS_REG32(DC_WIN_UV_BUF_STRIDE),
+ DEBUGFS_REG32(DC_WIN_BUFFER_ADDR_MODE),
+ DEBUGFS_REG32(DC_WIN_DV_CONTROL),
+ DEBUGFS_REG32(DC_WIN_BLEND_NOKEY),
+ DEBUGFS_REG32(DC_WIN_BLEND_1WIN),
+ DEBUGFS_REG32(DC_WIN_BLEND_2WIN_X),
+ DEBUGFS_REG32(DC_WIN_BLEND_2WIN_Y),
+ DEBUGFS_REG32(DC_WIN_BLEND_3WIN_XY),
+ DEBUGFS_REG32(DC_WIN_HP_FETCH_CONTROL),
+ DEBUGFS_REG32(DC_WINBUF_START_ADDR),
+ DEBUGFS_REG32(DC_WINBUF_START_ADDR_NS),
+ DEBUGFS_REG32(DC_WINBUF_START_ADDR_U),
+ DEBUGFS_REG32(DC_WINBUF_START_ADDR_U_NS),
+ DEBUGFS_REG32(DC_WINBUF_START_ADDR_V),
+ DEBUGFS_REG32(DC_WINBUF_START_ADDR_V_NS),
+ DEBUGFS_REG32(DC_WINBUF_ADDR_H_OFFSET),
+ DEBUGFS_REG32(DC_WINBUF_ADDR_H_OFFSET_NS),
+ DEBUGFS_REG32(DC_WINBUF_ADDR_V_OFFSET),
+ DEBUGFS_REG32(DC_WINBUF_ADDR_V_OFFSET_NS),
+ DEBUGFS_REG32(DC_WINBUF_UFLOW_STATUS),
+ DEBUGFS_REG32(DC_WINBUF_AD_UFLOW_STATUS),
+ DEBUGFS_REG32(DC_WINBUF_BD_UFLOW_STATUS),
+ DEBUGFS_REG32(DC_WINBUF_CD_UFLOW_STATUS),
+};
+
+static int tegra_dc_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dc *dc = node->info_ent->data;
+ unsigned int i;
+ int err = 0;
+
+ drm_modeset_lock(&dc->base.mutex, NULL);
+
+ if (!dc->base.state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_dc_regs); i++) {
+ unsigned int offset = tegra_dc_regs[i].offset;
+
+ seq_printf(s, "%-40s %#05x %08x\n", tegra_dc_regs[i].name,
+ offset, tegra_dc_readl(dc, offset));
+ }
+
+unlock:
+ drm_modeset_unlock(&dc->base.mutex);
+ return err;
+}
+
+static int tegra_dc_show_crc(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dc *dc = node->info_ent->data;
+ int err = 0;
+ u32 value;
+
+ drm_modeset_lock(&dc->base.mutex, NULL);
+
+ if (!dc->base.state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ value = DC_COM_CRC_CONTROL_ACTIVE_DATA | DC_COM_CRC_CONTROL_ENABLE;
+ tegra_dc_writel(dc, value, DC_COM_CRC_CONTROL);
+ tegra_dc_commit(dc);
+
+ drm_crtc_wait_one_vblank(&dc->base);
+ drm_crtc_wait_one_vblank(&dc->base);
+
+ value = tegra_dc_readl(dc, DC_COM_CRC_CHECKSUM);
+ seq_printf(s, "%08x\n", value);
+
+ tegra_dc_writel(dc, 0, DC_COM_CRC_CONTROL);
+
+unlock:
+ drm_modeset_unlock(&dc->base.mutex);
+ return err;
+}
+
+static int tegra_dc_show_stats(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dc *dc = node->info_ent->data;
+
+ seq_printf(s, "frames: %lu\n", dc->stats.frames);
+ seq_printf(s, "vblank: %lu\n", dc->stats.vblank);
+ seq_printf(s, "underflow: %lu\n", dc->stats.underflow);
+ seq_printf(s, "overflow: %lu\n", dc->stats.overflow);
+
+ seq_printf(s, "frames total: %lu\n", dc->stats.frames_total);
+ seq_printf(s, "vblank total: %lu\n", dc->stats.vblank_total);
+ seq_printf(s, "underflow total: %lu\n", dc->stats.underflow_total);
+ seq_printf(s, "overflow total: %lu\n", dc->stats.overflow_total);
+
+ return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_dc_show_regs, 0, NULL },
+ { "crc", tegra_dc_show_crc, 0, NULL },
+ { "stats", tegra_dc_show_stats, 0, NULL },
+};
+
+static int tegra_dc_late_register(struct drm_crtc *crtc)
+{
+ unsigned int i, count = ARRAY_SIZE(debugfs_files);
+ struct drm_minor *minor = crtc->dev->primary;
+ struct dentry *root;
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+#ifdef CONFIG_DEBUG_FS
+ root = crtc->debugfs_entry;
+#else
+ root = NULL;
+#endif
+
+ dc->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!dc->debugfs_files)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++)
+ dc->debugfs_files[i].data = dc;
+
+ drm_debugfs_create_files(dc->debugfs_files, count, root, minor);
+
+ return 0;
+}
+
+static void tegra_dc_early_unregister(struct drm_crtc *crtc)
+{
+ unsigned int count = ARRAY_SIZE(debugfs_files);
+ struct drm_minor *minor = crtc->dev->primary;
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ drm_debugfs_remove_files(dc->debugfs_files, count, minor);
+ kfree(dc->debugfs_files);
+ dc->debugfs_files = NULL;
+}
+
+static u32 tegra_dc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ /* XXX vblank syncpoints don't work with nvdisplay yet */
+ if (dc->syncpt && !dc->soc->has_nvdisplay)
+ return host1x_syncpt_read(dc->syncpt);
+
+ /* fallback to software emulated VBLANK counter */
+ return (u32)drm_crtc_vblank_count(&dc->base);
+}
+
+static int tegra_dc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ u32 value;
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ value |= VBLANK_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ return 0;
+}
+
+static void tegra_dc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ u32 value;
+
+ value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+ value &= ~VBLANK_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+}
+
+static const struct drm_crtc_funcs tegra_crtc_funcs = {
+ .page_flip = drm_atomic_helper_page_flip,
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = tegra_dc_destroy,
+ .reset = tegra_crtc_reset,
+ .atomic_duplicate_state = tegra_crtc_atomic_duplicate_state,
+ .atomic_destroy_state = tegra_crtc_atomic_destroy_state,
+ .late_register = tegra_dc_late_register,
+ .early_unregister = tegra_dc_early_unregister,
+ .get_vblank_counter = tegra_dc_get_vblank_counter,
+ .enable_vblank = tegra_dc_enable_vblank,
+ .disable_vblank = tegra_dc_disable_vblank,
+};
+
+static int tegra_dc_set_timings(struct tegra_dc *dc,
+ struct drm_display_mode *mode)
+{
+ unsigned int h_ref_to_sync = 1;
+ unsigned int v_ref_to_sync = 1;
+ unsigned long value;
+
+ if (!dc->soc->has_nvdisplay) {
+ tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
+
+ value = (v_ref_to_sync << 16) | h_ref_to_sync;
+ tegra_dc_writel(dc, value, DC_DISP_REF_TO_SYNC);
+ }
+
+ value = ((mode->vsync_end - mode->vsync_start) << 16) |
+ ((mode->hsync_end - mode->hsync_start) << 0);
+ tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH);
+
+ value = ((mode->vtotal - mode->vsync_end) << 16) |
+ ((mode->htotal - mode->hsync_end) << 0);
+ tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
+
+ value = ((mode->vsync_start - mode->vdisplay) << 16) |
+ ((mode->hsync_start - mode->hdisplay) << 0);
+ tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH);
+
+ value = (mode->vdisplay << 16) | mode->hdisplay;
+ tegra_dc_writel(dc, value, DC_DISP_ACTIVE);
+
+ return 0;
+}
+
+/**
+ * tegra_dc_state_setup_clock - check clock settings and store them in atomic
+ * state
+ * @dc: display controller
+ * @crtc_state: CRTC atomic state
+ * @clk: parent clock for display controller
+ * @pclk: pixel clock
+ * @div: shift clock divider
+ *
+ * Returns:
+ * 0 on success or a negative error-code on failure.
+ */
+int tegra_dc_state_setup_clock(struct tegra_dc *dc,
+ struct drm_crtc_state *crtc_state,
+ struct clk *clk, unsigned long pclk,
+ unsigned int div)
+{
+ struct tegra_dc_state *state = to_dc_state(crtc_state);
+
+ if (!clk_has_parent(dc->clk, clk))
+ return -EINVAL;
+
+ state->clk = clk;
+ state->pclk = pclk;
+ state->div = div;
+
+ return 0;
+}
+
+static void tegra_dc_update_voltage_state(struct tegra_dc *dc,
+ struct tegra_dc_state *state)
+{
+ unsigned long rate, pstate;
+ struct dev_pm_opp *opp;
+ int err;
+
+ if (!dc->has_opp_table)
+ return;
+
+ /* calculate actual pixel clock rate which depends on internal divider */
+ rate = DIV_ROUND_UP(clk_get_rate(dc->clk) * 2, state->div + 2);
+
+ /* find suitable OPP for the rate */
+ opp = dev_pm_opp_find_freq_ceil(dc->dev, &rate);
+
+ /*
+ * Very high resolution modes may results in a clock rate that is
+ * above the characterized maximum. In this case it's okay to fall
+ * back to the characterized maximum.
+ */
+ if (opp == ERR_PTR(-ERANGE))
+ opp = dev_pm_opp_find_freq_floor(dc->dev, &rate);
+
+ if (IS_ERR(opp)) {
+ dev_err(dc->dev, "failed to find OPP for %luHz: %pe\n",
+ rate, opp);
+ return;
+ }
+
+ pstate = dev_pm_opp_get_required_pstate(opp, 0);
+ dev_pm_opp_put(opp);
+
+ /*
+ * The minimum core voltage depends on the pixel clock rate (which
+ * depends on internal clock divider of the CRTC) and not on the
+ * rate of the display controller clock. This is why we're not using
+ * dev_pm_opp_set_rate() API and instead controlling the power domain
+ * directly.
+ */
+ err = dev_pm_genpd_set_performance_state(dc->dev, pstate);
+ if (err)
+ dev_err(dc->dev, "failed to set power domain state to %lu: %d\n",
+ pstate, err);
+}
+
+static void tegra_dc_set_clock_rate(struct tegra_dc *dc,
+ struct tegra_dc_state *state)
+{
+ int err;
+
+ err = clk_set_parent(dc->clk, state->clk);
+ if (err < 0)
+ dev_err(dc->dev, "failed to set parent clock: %d\n", err);
+
+ /*
+ * Outputs may not want to change the parent clock rate. This is only
+ * relevant to Tegra20 where only a single display PLL is available.
+ * Since that PLL would typically be used for HDMI, an internal LVDS
+ * panel would need to be driven by some other clock such as PLL_P
+ * which is shared with other peripherals. Changing the clock rate
+ * should therefore be avoided.
+ */
+ if (state->pclk > 0) {
+ err = clk_set_rate(state->clk, state->pclk);
+ if (err < 0)
+ dev_err(dc->dev,
+ "failed to set clock rate to %lu Hz\n",
+ state->pclk);
+
+ err = clk_set_rate(dc->clk, state->pclk);
+ if (err < 0)
+ dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
+ dc->clk, state->pclk, err);
+ }
+
+ DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk),
+ state->div);
+ DRM_DEBUG_KMS("pclk: %lu\n", state->pclk);
+
+ tegra_dc_update_voltage_state(dc, state);
+}
+
+static void tegra_dc_stop(struct tegra_dc *dc)
+{
+ u32 value;
+
+ /* stop the display controller */
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ tegra_dc_commit(dc);
+}
+
+static bool tegra_dc_idle(struct tegra_dc *dc)
+{
+ u32 value;
+
+ value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND);
+
+ return (value & DISP_CTRL_MODE_MASK) == 0;
+}
+
+static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
+{
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ if (tegra_dc_idle(dc))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ dev_dbg(dc->dev, "timeout waiting for DC to become idle\n");
+ return -ETIMEDOUT;
+}
+
+static void
+tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc,
+ struct drm_atomic_state *state,
+ bool prepare_bandwidth_transition)
+{
+ const struct tegra_plane_state *old_tegra_state, *new_tegra_state;
+ u32 i, new_avg_bw, old_avg_bw, new_peak_bw, old_peak_bw;
+ const struct drm_plane_state *old_plane_state;
+ const struct drm_crtc_state *old_crtc_state;
+ struct tegra_dc_window window, old_window;
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ struct tegra_plane *tegra;
+ struct drm_plane *plane;
+
+ if (dc->soc->has_nvdisplay)
+ return;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+
+ if (!crtc->state->active) {
+ if (!old_crtc_state->active)
+ return;
+
+ /*
+ * When CRTC is disabled on DPMS, the state of attached planes
+ * is kept unchanged. Hence we need to enforce removal of the
+ * bandwidths from the ICC paths.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ tegra = to_tegra_plane(plane);
+
+ icc_set_bw(tegra->icc_mem, 0, 0);
+ icc_set_bw(tegra->icc_mem_vfilter, 0, 0);
+ }
+
+ return;
+ }
+
+ for_each_old_plane_in_state(old_crtc_state->state, plane,
+ old_plane_state, i) {
+ old_tegra_state = to_const_tegra_plane_state(old_plane_state);
+ new_tegra_state = to_const_tegra_plane_state(plane->state);
+ tegra = to_tegra_plane(plane);
+
+ /*
+ * We're iterating over the global atomic state and it contains
+ * planes from another CRTC, hence we need to filter out the
+ * planes unrelated to this CRTC.
+ */
+ if (tegra->dc != dc)
+ continue;
+
+ new_avg_bw = new_tegra_state->avg_memory_bandwidth;
+ old_avg_bw = old_tegra_state->avg_memory_bandwidth;
+
+ new_peak_bw = new_tegra_state->total_peak_memory_bandwidth;
+ old_peak_bw = old_tegra_state->total_peak_memory_bandwidth;
+
+ /*
+ * See the comment related to !crtc->state->active above,
+ * which explains why bandwidths need to be updated when
+ * CRTC is turning ON.
+ */
+ if (new_avg_bw == old_avg_bw && new_peak_bw == old_peak_bw &&
+ old_crtc_state->active)
+ continue;
+
+ window.src.h = drm_rect_height(&plane->state->src) >> 16;
+ window.dst.h = drm_rect_height(&plane->state->dst);
+
+ old_window.src.h = drm_rect_height(&old_plane_state->src) >> 16;
+ old_window.dst.h = drm_rect_height(&old_plane_state->dst);
+
+ /*
+ * During the preparation phase (atomic_begin), the memory
+ * freq should go high before the DC changes are committed
+ * if bandwidth requirement goes up, otherwise memory freq
+ * should to stay high if BW requirement goes down. The
+ * opposite applies to the completion phase (post_commit).
+ */
+ if (prepare_bandwidth_transition) {
+ new_avg_bw = max(old_avg_bw, new_avg_bw);
+ new_peak_bw = max(old_peak_bw, new_peak_bw);
+
+ if (tegra_plane_use_vertical_filtering(tegra, &old_window))
+ window = old_window;
+ }
+
+ icc_set_bw(tegra->icc_mem, new_avg_bw, new_peak_bw);
+
+ if (tegra_plane_use_vertical_filtering(tegra, &window))
+ icc_set_bw(tegra->icc_mem_vfilter, new_avg_bw, new_peak_bw);
+ else
+ icc_set_bw(tegra->icc_mem_vfilter, 0, 0);
+ }
+}
+
+static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ u32 value;
+ int err;
+
+ if (!tegra_dc_idle(dc)) {
+ tegra_dc_stop(dc);
+
+ /*
+ * Ignore the return value, there isn't anything useful to do
+ * in case this fails.
+ */
+ tegra_dc_wait_idle(dc, 100);
+ }
+
+ /*
+ * This should really be part of the RGB encoder driver, but clearing
+ * these bits has the side-effect of stopping the display controller.
+ * When that happens no VBLANK interrupts will be raised. At the same
+ * time the encoder is disabled before the display controller, so the
+ * above code is always going to timeout waiting for the controller
+ * to go idle.
+ *
+ * Given the close coupling between the RGB encoder and the display
+ * controller doing it here is still kind of okay. None of the other
+ * encoder drivers require these bits to be cleared.
+ *
+ * XXX: Perhaps given that the display controller is switched off at
+ * this point anyway maybe clearing these bits isn't even useful for
+ * the RGB encoder?
+ */
+ if (dc->rgb) {
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+ }
+
+ tegra_dc_stats_reset(&dc->stats);
+ drm_crtc_vblank_off(crtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ err = host1x_client_suspend(&dc->client);
+ if (err < 0)
+ dev_err(dc->dev, "failed to suspend: %d\n", err);
+
+ if (dc->has_opp_table) {
+ err = dev_pm_genpd_set_performance_state(dc->dev, 0);
+ if (err)
+ dev_err(dc->dev,
+ "failed to clear power domain state: %d\n", err);
+ }
+}
+
+static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct tegra_dc_state *crtc_state = to_dc_state(crtc->state);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ u32 value;
+ int err;
+
+ /* apply PLL changes */
+ tegra_dc_set_clock_rate(dc, crtc_state);
+
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
+
+ /* initialize display controller */
+ if (dc->syncpt) {
+ u32 syncpt = host1x_syncpt_id(dc->syncpt), enable;
+
+ if (dc->soc->has_nvdisplay)
+ enable = 1 << 31;
+ else
+ enable = 1 << 8;
+
+ value = SYNCPT_CNTRL_NO_STALL;
+ tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+
+ value = enable | syncpt;
+ tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
+ }
+
+ if (dc->soc->has_nvdisplay) {
+ value = DSC_TO_UF_INT | DSC_BBUF_UF_INT | DSC_RBUF_UF_INT |
+ DSC_OBUF_UF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+ value = DSC_TO_UF_INT | DSC_BBUF_UF_INT | DSC_RBUF_UF_INT |
+ DSC_OBUF_UF_INT | SD3_BUCKET_WALK_DONE_INT |
+ HEAD_UF_INT | MSF_INT | REG_TMOUT_INT |
+ REGION_CRC_INT | V_PULSE2_INT | V_PULSE3_INT |
+ VBLANK_INT | FRAME_END_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+ value = SD3_BUCKET_WALK_DONE_INT | HEAD_UF_INT | VBLANK_INT |
+ FRAME_END_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+ value = HEAD_UF_INT | REG_TMOUT_INT | FRAME_END_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
+ } else {
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+ /* initialize timer */
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+ WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+ WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+ value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+ }
+
+ if (dc->soc->supports_background_color)
+ tegra_dc_writel(dc, 0, DC_DISP_BLEND_BACKGROUND_COLOR);
+ else
+ tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
+
+ /* apply pixel clock changes */
+ if (!dc->soc->has_nvdisplay) {
+ value = SHIFT_CLK_DIVIDER(crtc_state->div) | PIXEL_CLK_DIVIDER_PCD1;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
+ }
+
+ /* program display mode */
+ tegra_dc_set_timings(dc, mode);
+
+ /* interlacing isn't supported yet, so disable it */
+ if (dc->soc->supports_interlacing) {
+ value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL);
+ value &= ~INTERLACE_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_INTERLACE_CONTROL);
+ }
+
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+ value &= ~DISP_CTRL_MODE_MASK;
+ value |= DISP_CTRL_MODE_C_DISPLAY;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+ if (!dc->soc->has_nvdisplay) {
+ value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+ value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+ PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+ tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+ }
+
+ /* enable underflow reporting and display red for missing pixels */
+ if (dc->soc->has_nvdisplay) {
+ value = UNDERFLOW_MODE_RED | UNDERFLOW_REPORT_ENABLE;
+ tegra_dc_writel(dc, value, DC_COM_RG_UNDERFLOW);
+ }
+
+ if (dc->rgb) {
+ /* XXX: parameterize? */
+ value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE;
+ tegra_dc_writel(dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS);
+ }
+
+ tegra_dc_commit(dc);
+
+ drm_crtc_vblank_on(crtc);
+}
+
+static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ unsigned long flags;
+
+ tegra_crtc_update_memory_bandwidth(crtc, state, true);
+
+ if (crtc->state->event) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ else
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ crtc->state->event = NULL;
+ }
+}
+
+static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ struct tegra_dc_state *dc_state = to_dc_state(crtc_state);
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ u32 value;
+
+ value = dc_state->planes << 8 | GENERAL_UPDATE;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+ value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+
+ value = dc_state->planes | GENERAL_ACT_REQ;
+ tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+ value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+}
+
+static bool tegra_plane_is_cursor(const struct drm_plane_state *state)
+{
+ const struct tegra_dc_soc_info *soc = to_tegra_dc(state->crtc)->soc;
+ const struct drm_format_info *fmt = state->fb->format;
+ unsigned int src_w = drm_rect_width(&state->src) >> 16;
+ unsigned int dst_w = drm_rect_width(&state->dst);
+
+ if (state->plane->type != DRM_PLANE_TYPE_CURSOR)
+ return false;
+
+ if (soc->supports_cursor)
+ return true;
+
+ if (src_w != dst_w || fmt->num_planes != 1 || src_w * fmt->cpp[0] > 256)
+ return false;
+
+ return true;
+}
+
+static unsigned long
+tegra_plane_overlap_mask(struct drm_crtc_state *state,
+ const struct drm_plane_state *plane_state)
+{
+ const struct drm_plane_state *other_state;
+ const struct tegra_plane *tegra;
+ unsigned long overlap_mask = 0;
+ struct drm_plane *plane;
+ struct drm_rect rect;
+
+ if (!plane_state->visible || !plane_state->fb)
+ return 0;
+
+ /*
+ * Data-prefetch FIFO will easily help to overcome temporal memory
+ * pressure if other plane overlaps with the cursor plane.
+ */
+ if (tegra_plane_is_cursor(plane_state))
+ return 0;
+
+ drm_atomic_crtc_state_for_each_plane_state(plane, other_state, state) {
+ rect = plane_state->dst;
+
+ tegra = to_tegra_plane(other_state->plane);
+
+ if (!other_state->visible || !other_state->fb)
+ continue;
+
+ /*
+ * Ignore cursor plane overlaps because it's not practical to
+ * assume that it contributes to the bandwidth in overlapping
+ * area if window width is small.
+ */
+ if (tegra_plane_is_cursor(other_state))
+ continue;
+
+ if (drm_rect_intersect(&rect, &other_state->dst))
+ overlap_mask |= BIT(tegra->index);
+ }
+
+ return overlap_mask;
+}
+
+static int tegra_crtc_calculate_memory_bandwidth(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ ulong overlap_mask[TEGRA_DC_LEGACY_PLANES_NUM] = {}, mask;
+ u32 plane_peak_bw[TEGRA_DC_LEGACY_PLANES_NUM] = {};
+ bool all_planes_overlap_simultaneously = true;
+ const struct tegra_plane_state *tegra_state;
+ const struct drm_plane_state *plane_state;
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+ const struct drm_crtc_state *old_state;
+ struct drm_crtc_state *new_state;
+ struct tegra_plane *tegra;
+ struct drm_plane *plane;
+
+ /*
+ * The nv-display uses shared planes. The algorithm below assumes
+ * maximum 3 planes per-CRTC, this assumption isn't applicable to
+ * the nv-display. Note that T124 support has additional windows,
+ * but currently they aren't supported by the driver.
+ */
+ if (dc->soc->has_nvdisplay)
+ return 0;
+
+ new_state = drm_atomic_get_new_crtc_state(state, crtc);
+ old_state = drm_atomic_get_old_crtc_state(state, crtc);
+
+ /*
+ * For overlapping planes pixel's data is fetched for each plane at
+ * the same time, hence bandwidths are accumulated in this case.
+ * This needs to be taken into account for calculating total bandwidth
+ * consumed by all planes.
+ *
+ * Here we get the overlapping state of each plane, which is a
+ * bitmask of plane indices telling with what planes there is an
+ * overlap. Note that bitmask[plane] includes BIT(plane) in order
+ * to make further code nicer and simpler.
+ */
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, new_state) {
+ tegra_state = to_const_tegra_plane_state(plane_state);
+ tegra = to_tegra_plane(plane);
+
+ if (WARN_ON_ONCE(tegra->index >= TEGRA_DC_LEGACY_PLANES_NUM))
+ return -EINVAL;
+
+ plane_peak_bw[tegra->index] = tegra_state->peak_memory_bandwidth;
+ mask = tegra_plane_overlap_mask(new_state, plane_state);
+ overlap_mask[tegra->index] = mask;
+
+ if (hweight_long(mask) != 3)
+ all_planes_overlap_simultaneously = false;
+ }
+
+ /*
+ * Then we calculate maximum bandwidth of each plane state.
+ * The bandwidth includes the plane BW + BW of the "simultaneously"
+ * overlapping planes, where "simultaneously" means areas where DC
+ * fetches from the planes simultaneously during of scan-out process.
+ *
+ * For example, if plane A overlaps with planes B and C, but B and C
+ * don't overlap, then the peak bandwidth will be either in area where
+ * A-and-B or A-and-C planes overlap.
+ *
+ * The plane_peak_bw[] contains peak memory bandwidth values of
+ * each plane, this information is needed by interconnect provider
+ * in order to set up latency allowance based on the peak BW, see
+ * tegra_crtc_update_memory_bandwidth().
+ */
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, new_state) {
+ u32 i, old_peak_bw, new_peak_bw, overlap_bw = 0;
+
+ /*
+ * Note that plane's atomic check doesn't touch the
+ * total_peak_memory_bandwidth of enabled plane, hence the
+ * current state contains the old bandwidth state from the
+ * previous CRTC commit.
+ */
+ tegra_state = to_const_tegra_plane_state(plane_state);
+ tegra = to_tegra_plane(plane);
+
+ for_each_set_bit(i, &overlap_mask[tegra->index], 3) {
+ if (i == tegra->index)
+ continue;
+
+ if (all_planes_overlap_simultaneously)
+ overlap_bw += plane_peak_bw[i];
+ else
+ overlap_bw = max(overlap_bw, plane_peak_bw[i]);
+ }
+
+ new_peak_bw = plane_peak_bw[tegra->index] + overlap_bw;
+ old_peak_bw = tegra_state->total_peak_memory_bandwidth;
+
+ /*
+ * If plane's peak bandwidth changed (for example plane isn't
+ * overlapped anymore) and plane isn't in the atomic state,
+ * then add plane to the state in order to have the bandwidth
+ * updated.
+ */
+ if (old_peak_bw != new_peak_bw) {
+ struct tegra_plane_state *new_tegra_state;
+ struct drm_plane_state *new_plane_state;
+
+ new_plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(new_plane_state))
+ return PTR_ERR(new_plane_state);
+
+ new_tegra_state = to_tegra_plane_state(new_plane_state);
+ new_tegra_state->total_peak_memory_bandwidth = new_peak_bw;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ int err;
+
+ err = tegra_crtc_calculate_memory_bandwidth(crtc, state);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void tegra_crtc_atomic_post_commit(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ /*
+ * Display bandwidth is allowed to go down only once hardware state
+ * is known to be armed, i.e. state was committed and VBLANK event
+ * received.
+ */
+ tegra_crtc_update_memory_bandwidth(crtc, state, false);
+}
+
+static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
+ .atomic_check = tegra_crtc_atomic_check,
+ .atomic_begin = tegra_crtc_atomic_begin,
+ .atomic_flush = tegra_crtc_atomic_flush,
+ .atomic_enable = tegra_crtc_atomic_enable,
+ .atomic_disable = tegra_crtc_atomic_disable,
+};
+
+static irqreturn_t tegra_dc_irq(int irq, void *data)
+{
+ struct tegra_dc *dc = data;
+ unsigned long status;
+
+ status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
+ tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
+
+ if (status & FRAME_END_INT) {
+ /*
+ dev_dbg(dc->dev, "%s(): frame end\n", __func__);
+ */
+ dc->stats.frames_total++;
+ dc->stats.frames++;
+ }
+
+ if (status & VBLANK_INT) {
+ /*
+ dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
+ */
+ drm_crtc_handle_vblank(&dc->base);
+ dc->stats.vblank_total++;
+ dc->stats.vblank++;
+ }
+
+ if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
+ /*
+ dev_dbg(dc->dev, "%s(): underflow\n", __func__);
+ */
+ dc->stats.underflow_total++;
+ dc->stats.underflow++;
+ }
+
+ if (status & (WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT)) {
+ /*
+ dev_dbg(dc->dev, "%s(): overflow\n", __func__);
+ */
+ dc->stats.overflow_total++;
+ dc->stats.overflow++;
+ }
+
+ if (status & HEAD_UF_INT) {
+ dev_dbg_ratelimited(dc->dev, "%s(): head underflow\n", __func__);
+ dc->stats.underflow_total++;
+ dc->stats.underflow++;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static bool tegra_dc_has_window_groups(struct tegra_dc *dc)
+{
+ unsigned int i;
+
+ if (!dc->soc->wgrps)
+ return true;
+
+ for (i = 0; i < dc->soc->num_wgrps; i++) {
+ const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
+
+ if (wgrp->dc == dc->pipe && wgrp->num_windows > 0)
+ return true;
+ }
+
+ return false;
+}
+
+static int tegra_dc_early_init(struct host1x_client *client)
+{
+ struct drm_device *drm = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = drm->dev_private;
+
+ tegra->num_crtcs++;
+
+ return 0;
+}
+
+static int tegra_dc_init(struct host1x_client *client)
+{
+ struct drm_device *drm = dev_get_drvdata(client->host);
+ unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ struct tegra_drm *tegra = drm->dev_private;
+ struct drm_plane *primary = NULL;
+ struct drm_plane *cursor = NULL;
+ int err;
+
+ /*
+ * DC has been reset by now, so VBLANK syncpoint can be released
+ * for general use.
+ */
+ host1x_syncpt_release_vblank_reservation(client, 26 + dc->pipe);
+
+ /*
+ * XXX do not register DCs with no window groups because we cannot
+ * assign a primary plane to them, which in turn will cause KMS to
+ * crash.
+ */
+ if (!tegra_dc_has_window_groups(dc))
+ return 0;
+
+ /*
+ * Set the display hub as the host1x client parent for the display
+ * controller. This is needed for the runtime reference counting that
+ * ensures the display hub is always powered when any of the display
+ * controllers are.
+ */
+ if (dc->soc->has_nvdisplay)
+ client->parent = &tegra->hub->client;
+
+ dc->syncpt = host1x_syncpt_request(client, flags);
+ if (!dc->syncpt)
+ dev_warn(dc->dev, "failed to allocate syncpoint\n");
+
+ err = host1x_client_iommu_attach(client);
+ if (err < 0 && err != -ENODEV) {
+ dev_err(client->dev, "failed to attach to domain: %d\n", err);
+ return err;
+ }
+
+ if (dc->soc->wgrps)
+ primary = tegra_dc_add_shared_planes(drm, dc);
+ else
+ primary = tegra_dc_add_planes(drm, dc);
+
+ if (IS_ERR(primary)) {
+ err = PTR_ERR(primary);
+ goto cleanup;
+ }
+
+ if (dc->soc->supports_cursor) {
+ cursor = tegra_dc_cursor_plane_create(drm, dc);
+ if (IS_ERR(cursor)) {
+ err = PTR_ERR(cursor);
+ goto cleanup;
+ }
+ } else {
+ /* dedicate one overlay to mouse cursor */
+ cursor = tegra_dc_overlay_plane_create(drm, dc, 2, true);
+ if (IS_ERR(cursor)) {
+ err = PTR_ERR(cursor);
+ goto cleanup;
+ }
+ }
+
+ err = drm_crtc_init_with_planes(drm, &dc->base, primary, cursor,
+ &tegra_crtc_funcs, NULL);
+ if (err < 0)
+ goto cleanup;
+
+ drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
+
+ /*
+ * Keep track of the minimum pitch alignment across all display
+ * controllers.
+ */
+ if (dc->soc->pitch_align > tegra->pitch_align)
+ tegra->pitch_align = dc->soc->pitch_align;
+
+ /* track maximum resolution */
+ if (dc->soc->has_nvdisplay)
+ drm->mode_config.max_width = drm->mode_config.max_height = 16384;
+ else
+ drm->mode_config.max_width = drm->mode_config.max_height = 4096;
+
+ err = tegra_dc_rgb_init(drm, dc);
+ if (err < 0 && err != -ENODEV) {
+ dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
+ goto cleanup;
+ }
+
+ err = devm_request_irq(dc->dev, dc->irq, tegra_dc_irq, 0,
+ dev_name(dc->dev), dc);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
+ err);
+ goto cleanup;
+ }
+
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent host1x device.
+ */
+ client->dev->dma_parms = client->host->dma_parms;
+
+ return 0;
+
+cleanup:
+ if (!IS_ERR_OR_NULL(cursor))
+ drm_plane_cleanup(cursor);
+
+ if (!IS_ERR(primary))
+ drm_plane_cleanup(primary);
+
+ host1x_client_iommu_detach(client);
+ host1x_syncpt_put(dc->syncpt);
+
+ return err;
+}
+
+static int tegra_dc_exit(struct host1x_client *client)
+{
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ int err;
+
+ if (!tegra_dc_has_window_groups(dc))
+ return 0;
+
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
+ devm_free_irq(dc->dev, dc->irq, dc);
+
+ err = tegra_dc_rgb_exit(dc);
+ if (err) {
+ dev_err(dc->dev, "failed to shutdown RGB output: %d\n", err);
+ return err;
+ }
+
+ host1x_client_iommu_detach(client);
+ host1x_syncpt_put(dc->syncpt);
+
+ return 0;
+}
+
+static int tegra_dc_late_exit(struct host1x_client *client)
+{
+ struct drm_device *drm = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = drm->dev_private;
+
+ tegra->num_crtcs--;
+
+ return 0;
+}
+
+static int tegra_dc_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = reset_control_assert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ if (dc->soc->has_powergate)
+ tegra_powergate_power_off(dc->powergate);
+
+ clk_disable_unprepare(dc->clk);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_dc_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_dc *dc = host1x_client_to_dc(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ if (dc->soc->has_powergate) {
+ err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
+ dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to power partition: %d\n", err);
+ goto put_rpm;
+ }
+ } else {
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto put_rpm;
+ }
+
+ err = reset_control_deassert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
+ }
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(dc->clk);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
+static const struct host1x_client_ops dc_client_ops = {
+ .early_init = tegra_dc_early_init,
+ .init = tegra_dc_init,
+ .exit = tegra_dc_exit,
+ .late_exit = tegra_dc_late_exit,
+ .suspend = tegra_dc_runtime_suspend,
+ .resume = tegra_dc_runtime_resume,
+};
+
+static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
+ .supports_background_color = false,
+ .supports_interlacing = false,
+ .supports_cursor = false,
+ .supports_block_linear = false,
+ .supports_sector_layout = false,
+ .has_legacy_blending = true,
+ .pitch_align = 8,
+ .has_powergate = false,
+ .coupled_pm = true,
+ .has_nvdisplay = false,
+ .num_primary_formats = ARRAY_SIZE(tegra20_primary_formats),
+ .primary_formats = tegra20_primary_formats,
+ .num_overlay_formats = ARRAY_SIZE(tegra20_overlay_formats),
+ .overlay_formats = tegra20_overlay_formats,
+ .modifiers = tegra20_modifiers,
+ .has_win_a_without_filters = true,
+ .has_win_b_vfilter_mem_client = true,
+ .has_win_c_without_vert_filter = true,
+ .plane_tiled_memory_bandwidth_x2 = false,
+ .has_pll_d2_out0 = false,
+};
+
+static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
+ .supports_background_color = false,
+ .supports_interlacing = false,
+ .supports_cursor = false,
+ .supports_block_linear = false,
+ .supports_sector_layout = false,
+ .has_legacy_blending = true,
+ .pitch_align = 8,
+ .has_powergate = false,
+ .coupled_pm = false,
+ .has_nvdisplay = false,
+ .num_primary_formats = ARRAY_SIZE(tegra20_primary_formats),
+ .primary_formats = tegra20_primary_formats,
+ .num_overlay_formats = ARRAY_SIZE(tegra20_overlay_formats),
+ .overlay_formats = tegra20_overlay_formats,
+ .modifiers = tegra20_modifiers,
+ .has_win_a_without_filters = false,
+ .has_win_b_vfilter_mem_client = true,
+ .has_win_c_without_vert_filter = false,
+ .plane_tiled_memory_bandwidth_x2 = true,
+ .has_pll_d2_out0 = true,
+};
+
+static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
+ .supports_background_color = false,
+ .supports_interlacing = false,
+ .supports_cursor = false,
+ .supports_block_linear = false,
+ .supports_sector_layout = false,
+ .has_legacy_blending = true,
+ .pitch_align = 64,
+ .has_powergate = true,
+ .coupled_pm = false,
+ .has_nvdisplay = false,
+ .num_primary_formats = ARRAY_SIZE(tegra114_primary_formats),
+ .primary_formats = tegra114_primary_formats,
+ .num_overlay_formats = ARRAY_SIZE(tegra114_overlay_formats),
+ .overlay_formats = tegra114_overlay_formats,
+ .modifiers = tegra20_modifiers,
+ .has_win_a_without_filters = false,
+ .has_win_b_vfilter_mem_client = false,
+ .has_win_c_without_vert_filter = false,
+ .plane_tiled_memory_bandwidth_x2 = true,
+ .has_pll_d2_out0 = true,
+};
+
+static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
+ .supports_background_color = true,
+ .supports_interlacing = true,
+ .supports_cursor = true,
+ .supports_block_linear = true,
+ .supports_sector_layout = false,
+ .has_legacy_blending = false,
+ .pitch_align = 64,
+ .has_powergate = true,
+ .coupled_pm = false,
+ .has_nvdisplay = false,
+ .num_primary_formats = ARRAY_SIZE(tegra124_primary_formats),
+ .primary_formats = tegra124_primary_formats,
+ .num_overlay_formats = ARRAY_SIZE(tegra124_overlay_formats),
+ .overlay_formats = tegra124_overlay_formats,
+ .modifiers = tegra124_modifiers,
+ .has_win_a_without_filters = false,
+ .has_win_b_vfilter_mem_client = false,
+ .has_win_c_without_vert_filter = false,
+ .plane_tiled_memory_bandwidth_x2 = false,
+ .has_pll_d2_out0 = true,
+};
+
+static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
+ .supports_background_color = true,
+ .supports_interlacing = true,
+ .supports_cursor = true,
+ .supports_block_linear = true,
+ .supports_sector_layout = false,
+ .has_legacy_blending = false,
+ .pitch_align = 64,
+ .has_powergate = true,
+ .coupled_pm = false,
+ .has_nvdisplay = false,
+ .num_primary_formats = ARRAY_SIZE(tegra114_primary_formats),
+ .primary_formats = tegra114_primary_formats,
+ .num_overlay_formats = ARRAY_SIZE(tegra114_overlay_formats),
+ .overlay_formats = tegra114_overlay_formats,
+ .modifiers = tegra124_modifiers,
+ .has_win_a_without_filters = false,
+ .has_win_b_vfilter_mem_client = false,
+ .has_win_c_without_vert_filter = false,
+ .plane_tiled_memory_bandwidth_x2 = false,
+ .has_pll_d2_out0 = true,
+};
+
+static const struct tegra_windowgroup_soc tegra186_dc_wgrps[] = {
+ {
+ .index = 0,
+ .dc = 0,
+ .windows = (const unsigned int[]) { 0 },
+ .num_windows = 1,
+ }, {
+ .index = 1,
+ .dc = 1,
+ .windows = (const unsigned int[]) { 1 },
+ .num_windows = 1,
+ }, {
+ .index = 2,
+ .dc = 1,
+ .windows = (const unsigned int[]) { 2 },
+ .num_windows = 1,
+ }, {
+ .index = 3,
+ .dc = 2,
+ .windows = (const unsigned int[]) { 3 },
+ .num_windows = 1,
+ }, {
+ .index = 4,
+ .dc = 2,
+ .windows = (const unsigned int[]) { 4 },
+ .num_windows = 1,
+ }, {
+ .index = 5,
+ .dc = 2,
+ .windows = (const unsigned int[]) { 5 },
+ .num_windows = 1,
+ },
+};
+
+static const struct tegra_dc_soc_info tegra186_dc_soc_info = {
+ .supports_background_color = true,
+ .supports_interlacing = true,
+ .supports_cursor = true,
+ .supports_block_linear = true,
+ .supports_sector_layout = false,
+ .has_legacy_blending = false,
+ .pitch_align = 64,
+ .has_powergate = false,
+ .coupled_pm = false,
+ .has_nvdisplay = true,
+ .wgrps = tegra186_dc_wgrps,
+ .num_wgrps = ARRAY_SIZE(tegra186_dc_wgrps),
+ .plane_tiled_memory_bandwidth_x2 = false,
+ .has_pll_d2_out0 = false,
+};
+
+static const struct tegra_windowgroup_soc tegra194_dc_wgrps[] = {
+ {
+ .index = 0,
+ .dc = 0,
+ .windows = (const unsigned int[]) { 0 },
+ .num_windows = 1,
+ }, {
+ .index = 1,
+ .dc = 1,
+ .windows = (const unsigned int[]) { 1 },
+ .num_windows = 1,
+ }, {
+ .index = 2,
+ .dc = 1,
+ .windows = (const unsigned int[]) { 2 },
+ .num_windows = 1,
+ }, {
+ .index = 3,
+ .dc = 2,
+ .windows = (const unsigned int[]) { 3 },
+ .num_windows = 1,
+ }, {
+ .index = 4,
+ .dc = 2,
+ .windows = (const unsigned int[]) { 4 },
+ .num_windows = 1,
+ }, {
+ .index = 5,
+ .dc = 2,
+ .windows = (const unsigned int[]) { 5 },
+ .num_windows = 1,
+ },
+};
+
+static const struct tegra_dc_soc_info tegra194_dc_soc_info = {
+ .supports_background_color = true,
+ .supports_interlacing = true,
+ .supports_cursor = true,
+ .supports_block_linear = true,
+ .supports_sector_layout = true,
+ .has_legacy_blending = false,
+ .pitch_align = 64,
+ .has_powergate = false,
+ .coupled_pm = false,
+ .has_nvdisplay = true,
+ .wgrps = tegra194_dc_wgrps,
+ .num_wgrps = ARRAY_SIZE(tegra194_dc_wgrps),
+ .plane_tiled_memory_bandwidth_x2 = false,
+ .has_pll_d2_out0 = false,
+};
+
+static const struct of_device_id tegra_dc_of_match[] = {
+ {
+ .compatible = "nvidia,tegra194-dc",
+ .data = &tegra194_dc_soc_info,
+ }, {
+ .compatible = "nvidia,tegra186-dc",
+ .data = &tegra186_dc_soc_info,
+ }, {
+ .compatible = "nvidia,tegra210-dc",
+ .data = &tegra210_dc_soc_info,
+ }, {
+ .compatible = "nvidia,tegra124-dc",
+ .data = &tegra124_dc_soc_info,
+ }, {
+ .compatible = "nvidia,tegra114-dc",
+ .data = &tegra114_dc_soc_info,
+ }, {
+ .compatible = "nvidia,tegra30-dc",
+ .data = &tegra30_dc_soc_info,
+ }, {
+ .compatible = "nvidia,tegra20-dc",
+ .data = &tegra20_dc_soc_info,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, tegra_dc_of_match);
+
+static int tegra_dc_parse_dt(struct tegra_dc *dc)
+{
+ struct device_node *np;
+ u32 value = 0;
+ int err;
+
+ err = of_property_read_u32(dc->dev->of_node, "nvidia,head", &value);
+ if (err < 0) {
+ dev_err(dc->dev, "missing \"nvidia,head\" property\n");
+
+ /*
+ * If the nvidia,head property isn't present, try to find the
+ * correct head number by looking up the position of this
+ * display controller's node within the device tree. Assuming
+ * that the nodes are ordered properly in the DTS file and
+ * that the translation into a flattened device tree blob
+ * preserves that ordering this will actually yield the right
+ * head number.
+ *
+ * If those assumptions don't hold, this will still work for
+ * cases where only a single display controller is used.
+ */
+ for_each_matching_node(np, tegra_dc_of_match) {
+ if (np == dc->dev->of_node) {
+ of_node_put(np);
+ break;
+ }
+
+ value++;
+ }
+ }
+
+ dc->pipe = value;
+
+ return 0;
+}
+
+static int tegra_dc_match_by_pipe(struct device *dev, const void *data)
+{
+ struct tegra_dc *dc = dev_get_drvdata(dev);
+ unsigned int pipe = (unsigned long)(void *)data;
+
+ return dc->pipe == pipe;
+}
+
+static int tegra_dc_couple(struct tegra_dc *dc)
+{
+ /*
+ * On Tegra20, DC1 requires DC0 to be taken out of reset in order to
+ * be enabled, otherwise CPU hangs on writing to CMD_DISPLAY_COMMAND /
+ * POWER_CONTROL registers during CRTC enabling.
+ */
+ if (dc->soc->coupled_pm && dc->pipe == 1) {
+ struct device *companion;
+ struct tegra_dc *parent;
+
+ companion = driver_find_device(dc->dev->driver, NULL, (const void *)0,
+ tegra_dc_match_by_pipe);
+ if (!companion)
+ return -EPROBE_DEFER;
+
+ parent = dev_get_drvdata(companion);
+ dc->client.parent = &parent->client;
+
+ dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
+ }
+
+ return 0;
+}
+
+static int tegra_dc_init_opp_table(struct tegra_dc *dc)
+{
+ struct tegra_core_opp_params opp_params = {};
+ int err;
+
+ err = devm_tegra_core_dev_init_opp_table(dc->dev, &opp_params);
+ if (err && err != -ENODEV)
+ return err;
+
+ if (err)
+ dc->has_opp_table = false;
+ else
+ dc->has_opp_table = true;
+
+ return 0;
+}
+
+static int tegra_dc_probe(struct platform_device *pdev)
+{
+ u64 dma_mask = dma_get_mask(pdev->dev.parent);
+ struct tegra_dc *dc;
+ int err;
+
+ err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
+ dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
+ if (!dc)
+ return -ENOMEM;
+
+ dc->soc = of_device_get_match_data(&pdev->dev);
+
+ INIT_LIST_HEAD(&dc->list);
+ dc->dev = &pdev->dev;
+
+ err = tegra_dc_parse_dt(dc);
+ if (err < 0)
+ return err;
+
+ err = tegra_dc_couple(dc);
+ if (err < 0)
+ return err;
+
+ dc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dc->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(dc->clk);
+ }
+
+ dc->rst = devm_reset_control_get(&pdev->dev, "dc");
+ if (IS_ERR(dc->rst)) {
+ dev_err(&pdev->dev, "failed to get reset\n");
+ return PTR_ERR(dc->rst);
+ }
+
+ /* assert reset and disable clock */
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0)
+ return err;
+
+ usleep_range(2000, 4000);
+
+ err = reset_control_assert(dc->rst);
+ if (err < 0) {
+ clk_disable_unprepare(dc->clk);
+ return err;
+ }
+
+ usleep_range(2000, 4000);
+
+ clk_disable_unprepare(dc->clk);
+
+ if (dc->soc->has_powergate) {
+ if (dc->pipe == 0)
+ dc->powergate = TEGRA_POWERGATE_DIS;
+ else
+ dc->powergate = TEGRA_POWERGATE_DISB;
+
+ tegra_powergate_power_off(dc->powergate);
+ }
+
+ err = tegra_dc_init_opp_table(dc);
+ if (err < 0)
+ return err;
+
+ dc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dc->regs))
+ return PTR_ERR(dc->regs);
+
+ dc->irq = platform_get_irq(pdev, 0);
+ if (dc->irq < 0)
+ return -ENXIO;
+
+ err = tegra_dc_rgb_probe(dc);
+ if (err < 0 && err != -ENODEV)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to probe RGB output\n");
+
+ platform_set_drvdata(pdev, dc);
+ pm_runtime_enable(&pdev->dev);
+
+ INIT_LIST_HEAD(&dc->client.list);
+ dc->client.ops = &dc_client_ops;
+ dc->client.dev = &pdev->dev;
+
+ err = host1x_client_register(&dc->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ goto disable_pm;
+ }
+
+ return 0;
+
+disable_pm:
+ pm_runtime_disable(&pdev->dev);
+ tegra_dc_rgb_remove(dc);
+
+ return err;
+}
+
+static int tegra_dc_remove(struct platform_device *pdev)
+{
+ struct tegra_dc *dc = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&dc->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ err = tegra_dc_rgb_remove(dc);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to remove RGB output: %d\n", err);
+ return err;
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+struct platform_driver tegra_dc_driver = {
+ .driver = {
+ .name = "tegra-dc",
+ .of_match_table = tegra_dc_of_match,
+ },
+ .probe = tegra_dc_probe,
+ .remove = tegra_dc_remove,
+};
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
new file mode 100644
index 000000000..f902794d4
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -0,0 +1,840 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef TEGRA_DC_H
+#define TEGRA_DC_H 1
+
+#include <linux/host1x.h>
+
+#include <drm/drm_crtc.h>
+
+#include "drm.h"
+
+struct tegra_output;
+
+#define TEGRA_DC_LEGACY_PLANES_NUM 7
+
+struct tegra_dc_state {
+ struct drm_crtc_state base;
+
+ struct clk *clk;
+ unsigned long pclk;
+ unsigned int div;
+
+ u32 planes;
+};
+
+static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state)
+{
+ if (state)
+ return container_of(state, struct tegra_dc_state, base);
+
+ return NULL;
+}
+
+struct tegra_dc_stats {
+ unsigned long frames;
+ unsigned long vblank;
+ unsigned long underflow;
+ unsigned long overflow;
+
+ unsigned long frames_total;
+ unsigned long vblank_total;
+ unsigned long underflow_total;
+ unsigned long overflow_total;
+};
+
+struct tegra_windowgroup_soc {
+ unsigned int index;
+ unsigned int dc;
+ const unsigned int *windows;
+ unsigned int num_windows;
+};
+
+struct tegra_dc_soc_info {
+ bool supports_background_color;
+ bool supports_interlacing;
+ bool supports_cursor;
+ bool supports_block_linear;
+ bool supports_sector_layout;
+ bool has_legacy_blending;
+ unsigned int pitch_align;
+ bool has_powergate;
+ bool coupled_pm;
+ bool has_nvdisplay;
+ const struct tegra_windowgroup_soc *wgrps;
+ unsigned int num_wgrps;
+ const u32 *primary_formats;
+ unsigned int num_primary_formats;
+ const u32 *overlay_formats;
+ unsigned int num_overlay_formats;
+ const u64 *modifiers;
+ bool has_win_a_without_filters;
+ bool has_win_b_vfilter_mem_client;
+ bool has_win_c_without_vert_filter;
+ bool plane_tiled_memory_bandwidth_x2;
+ bool has_pll_d2_out0;
+};
+
+struct tegra_dc {
+ struct host1x_client client;
+ struct host1x_syncpt *syncpt;
+ struct device *dev;
+
+ struct drm_crtc base;
+ unsigned int powergate;
+ int pipe;
+
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *regs;
+ int irq;
+
+ struct tegra_output *rgb;
+
+ struct tegra_dc_stats stats;
+ struct list_head list;
+
+ struct drm_info_list *debugfs_files;
+
+ const struct tegra_dc_soc_info *soc;
+
+ bool has_opp_table;
+};
+
+static inline struct tegra_dc *
+host1x_client_to_dc(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_dc, client);
+}
+
+static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
+{
+ return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
+}
+
+static inline void tegra_dc_writel(struct tegra_dc *dc, u32 value,
+ unsigned int offset)
+{
+ trace_dc_writel(dc->dev, offset, value);
+ writel(value, dc->regs + (offset << 2));
+}
+
+static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned int offset)
+{
+ u32 value = readl(dc->regs + (offset << 2));
+
+ trace_dc_readl(dc->dev, offset, value);
+
+ return value;
+}
+
+struct tegra_dc_window {
+ struct {
+ unsigned int x;
+ unsigned int y;
+ unsigned int w;
+ unsigned int h;
+ } src;
+ struct {
+ unsigned int x;
+ unsigned int y;
+ unsigned int w;
+ unsigned int h;
+ } dst;
+ unsigned int bits_per_pixel;
+ unsigned int stride[2];
+ unsigned long base[3];
+ unsigned int zpos;
+ bool reflect_x;
+ bool reflect_y;
+
+ struct tegra_bo_tiling tiling;
+ u32 format;
+ u32 swap;
+};
+
+/* from dc.c */
+bool tegra_dc_has_output(struct tegra_dc *dc, struct device *dev);
+void tegra_dc_commit(struct tegra_dc *dc);
+int tegra_dc_state_setup_clock(struct tegra_dc *dc,
+ struct drm_crtc_state *crtc_state,
+ struct clk *clk, unsigned long pclk,
+ unsigned int div);
+void tegra_crtc_atomic_post_commit(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+
+/* from rgb.c */
+int tegra_dc_rgb_probe(struct tegra_dc *dc);
+int tegra_dc_rgb_remove(struct tegra_dc *dc);
+int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
+int tegra_dc_rgb_exit(struct tegra_dc *dc);
+
+#define DC_CMD_GENERAL_INCR_SYNCPT 0x000
+#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001
+#define SYNCPT_CNTRL_NO_STALL (1 << 8)
+#define SYNCPT_CNTRL_SOFT_RESET (1 << 0)
+#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002
+#define DC_CMD_WIN_A_INCR_SYNCPT 0x008
+#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009
+#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR 0x00a
+#define DC_CMD_WIN_B_INCR_SYNCPT 0x010
+#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL 0x011
+#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR 0x012
+#define DC_CMD_WIN_C_INCR_SYNCPT 0x018
+#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019
+#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a
+#define DC_CMD_CONT_SYNCPT_VSYNC 0x028
+#define SYNCPT_VSYNC_ENABLE (1 << 8)
+#define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031
+#define DC_CMD_DISPLAY_COMMAND 0x032
+#define DISP_CTRL_MODE_STOP (0 << 5)
+#define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
+#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
+#define DISP_CTRL_MODE_MASK (3 << 5)
+#define DC_CMD_SIGNAL_RAISE 0x033
+#define DC_CMD_DISPLAY_POWER_CONTROL 0x036
+#define PW0_ENABLE (1 << 0)
+#define PW1_ENABLE (1 << 2)
+#define PW2_ENABLE (1 << 4)
+#define PW3_ENABLE (1 << 6)
+#define PW4_ENABLE (1 << 8)
+#define PM0_ENABLE (1 << 16)
+#define PM1_ENABLE (1 << 18)
+
+#define DC_CMD_INT_STATUS 0x037
+#define DC_CMD_INT_MASK 0x038
+#define DC_CMD_INT_ENABLE 0x039
+#define DC_CMD_INT_TYPE 0x03a
+#define DC_CMD_INT_POLARITY 0x03b
+#define CTXSW_INT (1 << 0)
+#define FRAME_END_INT (1 << 1)
+#define VBLANK_INT (1 << 2)
+#define V_PULSE3_INT (1 << 4)
+#define V_PULSE2_INT (1 << 5)
+#define REGION_CRC_INT (1 << 6)
+#define REG_TMOUT_INT (1 << 7)
+#define WIN_A_UF_INT (1 << 8)
+#define WIN_B_UF_INT (1 << 9)
+#define WIN_C_UF_INT (1 << 10)
+#define MSF_INT (1 << 12)
+#define WIN_A_OF_INT (1 << 14)
+#define WIN_B_OF_INT (1 << 15)
+#define WIN_C_OF_INT (1 << 16)
+#define HEAD_UF_INT (1 << 23)
+#define SD3_BUCKET_WALK_DONE_INT (1 << 24)
+#define DSC_OBUF_UF_INT (1 << 26)
+#define DSC_RBUF_UF_INT (1 << 27)
+#define DSC_BBUF_UF_INT (1 << 28)
+#define DSC_TO_UF_INT (1 << 29)
+
+#define DC_CMD_SIGNAL_RAISE1 0x03c
+#define DC_CMD_SIGNAL_RAISE2 0x03d
+#define DC_CMD_SIGNAL_RAISE3 0x03e
+
+#define DC_CMD_STATE_ACCESS 0x040
+#define READ_MUX (1 << 0)
+#define WRITE_MUX (1 << 2)
+
+#define DC_CMD_STATE_CONTROL 0x041
+#define GENERAL_ACT_REQ (1 << 0)
+#define WIN_A_ACT_REQ (1 << 1)
+#define WIN_B_ACT_REQ (1 << 2)
+#define WIN_C_ACT_REQ (1 << 3)
+#define CURSOR_ACT_REQ (1 << 7)
+#define GENERAL_UPDATE (1 << 8)
+#define WIN_A_UPDATE (1 << 9)
+#define WIN_B_UPDATE (1 << 10)
+#define WIN_C_UPDATE (1 << 11)
+#define CURSOR_UPDATE (1 << 15)
+#define COMMON_ACTREQ (1 << 16)
+#define COMMON_UPDATE (1 << 17)
+#define NC_HOST_TRIG (1 << 24)
+
+#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042
+#define WINDOW_A_SELECT (1 << 4)
+#define WINDOW_B_SELECT (1 << 5)
+#define WINDOW_C_SELECT (1 << 6)
+
+#define DC_CMD_REG_ACT_CONTROL 0x043
+
+#define DC_COM_CRC_CONTROL 0x300
+#define DC_COM_CRC_CONTROL_ALWAYS (1 << 3)
+#define DC_COM_CRC_CONTROL_FULL_FRAME (0 << 2)
+#define DC_COM_CRC_CONTROL_ACTIVE_DATA (1 << 2)
+#define DC_COM_CRC_CONTROL_WAIT (1 << 1)
+#define DC_COM_CRC_CONTROL_ENABLE (1 << 0)
+#define DC_COM_CRC_CHECKSUM 0x301
+#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x))
+#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x))
+#define LVS_OUTPUT_POLARITY_LOW (1 << 28)
+#define LHS_OUTPUT_POLARITY_LOW (1 << 30)
+#define DC_COM_PIN_OUTPUT_DATA(x) (0x30a + (x))
+#define DC_COM_PIN_INPUT_ENABLE(x) (0x30e + (x))
+#define DC_COM_PIN_INPUT_DATA(x) (0x312 + (x))
+#define DC_COM_PIN_OUTPUT_SELECT(x) (0x314 + (x))
+
+#define DC_COM_PIN_MISC_CONTROL 0x31b
+#define DC_COM_PIN_PM0_CONTROL 0x31c
+#define DC_COM_PIN_PM0_DUTY_CYCLE 0x31d
+#define DC_COM_PIN_PM1_CONTROL 0x31e
+#define DC_COM_PIN_PM1_DUTY_CYCLE 0x31f
+
+#define DC_COM_SPI_CONTROL 0x320
+#define DC_COM_SPI_START_BYTE 0x321
+#define DC_COM_HSPI_WRITE_DATA_AB 0x322
+#define DC_COM_HSPI_WRITE_DATA_CD 0x323
+#define DC_COM_HSPI_CS_DC 0x324
+#define DC_COM_SCRATCH_REGISTER_A 0x325
+#define DC_COM_SCRATCH_REGISTER_B 0x326
+#define DC_COM_GPIO_CTRL 0x327
+#define DC_COM_GPIO_DEBOUNCE_COUNTER 0x328
+#define DC_COM_CRC_CHECKSUM_LATCHED 0x329
+
+#define DC_COM_RG_UNDERFLOW 0x365
+#define UNDERFLOW_MODE_RED (1 << 8)
+#define UNDERFLOW_REPORT_ENABLE (1 << 0)
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400
+#define H_PULSE0_ENABLE (1 << 8)
+#define H_PULSE1_ENABLE (1 << 10)
+#define H_PULSE2_ENABLE (1 << 12)
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401
+
+#define DC_DISP_DISP_WIN_OPTIONS 0x402
+#define HDMI_ENABLE (1 << 30)
+#define DSI_ENABLE (1 << 29)
+#define SOR1_TIMING_CYA (1 << 27)
+#define CURSOR_ENABLE (1 << 16)
+
+#define SOR_ENABLE(x) (1 << (25 + (((x) > 1) ? ((x) + 1) : (x))))
+
+#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403
+#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24)
+#define WINDOW_A_THRESHOLD(x) (((x) & 0x7f) << 16)
+#define WINDOW_B_THRESHOLD(x) (((x) & 0x7f) << 8)
+#define WINDOW_C_THRESHOLD(x) (((x) & 0xff) << 0)
+
+#define DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER 0x404
+#define CURSOR_DELAY(x) (((x) & 0x3f) << 24)
+#define WINDOW_A_DELAY(x) (((x) & 0x3f) << 16)
+#define WINDOW_B_DELAY(x) (((x) & 0x3f) << 8)
+#define WINDOW_C_DELAY(x) (((x) & 0x3f) << 0)
+
+#define DC_DISP_DISP_TIMING_OPTIONS 0x405
+#define VSYNC_H_POSITION(x) ((x) & 0xfff)
+
+#define DC_DISP_REF_TO_SYNC 0x406
+#define DC_DISP_SYNC_WIDTH 0x407
+#define DC_DISP_BACK_PORCH 0x408
+#define DC_DISP_ACTIVE 0x409
+#define DC_DISP_FRONT_PORCH 0x40a
+#define DC_DISP_H_PULSE0_CONTROL 0x40b
+#define DC_DISP_H_PULSE0_POSITION_A 0x40c
+#define DC_DISP_H_PULSE0_POSITION_B 0x40d
+#define DC_DISP_H_PULSE0_POSITION_C 0x40e
+#define DC_DISP_H_PULSE0_POSITION_D 0x40f
+#define DC_DISP_H_PULSE1_CONTROL 0x410
+#define DC_DISP_H_PULSE1_POSITION_A 0x411
+#define DC_DISP_H_PULSE1_POSITION_B 0x412
+#define DC_DISP_H_PULSE1_POSITION_C 0x413
+#define DC_DISP_H_PULSE1_POSITION_D 0x414
+#define DC_DISP_H_PULSE2_CONTROL 0x415
+#define DC_DISP_H_PULSE2_POSITION_A 0x416
+#define DC_DISP_H_PULSE2_POSITION_B 0x417
+#define DC_DISP_H_PULSE2_POSITION_C 0x418
+#define DC_DISP_H_PULSE2_POSITION_D 0x419
+#define DC_DISP_V_PULSE0_CONTROL 0x41a
+#define DC_DISP_V_PULSE0_POSITION_A 0x41b
+#define DC_DISP_V_PULSE0_POSITION_B 0x41c
+#define DC_DISP_V_PULSE0_POSITION_C 0x41d
+#define DC_DISP_V_PULSE1_CONTROL 0x41e
+#define DC_DISP_V_PULSE1_POSITION_A 0x41f
+#define DC_DISP_V_PULSE1_POSITION_B 0x420
+#define DC_DISP_V_PULSE1_POSITION_C 0x421
+#define DC_DISP_V_PULSE2_CONTROL 0x422
+#define DC_DISP_V_PULSE2_POSITION_A 0x423
+#define DC_DISP_V_PULSE3_CONTROL 0x424
+#define DC_DISP_V_PULSE3_POSITION_A 0x425
+#define DC_DISP_M0_CONTROL 0x426
+#define DC_DISP_M1_CONTROL 0x427
+#define DC_DISP_DI_CONTROL 0x428
+#define DC_DISP_PP_CONTROL 0x429
+#define DC_DISP_PP_SELECT_A 0x42a
+#define DC_DISP_PP_SELECT_B 0x42b
+#define DC_DISP_PP_SELECT_C 0x42c
+#define DC_DISP_PP_SELECT_D 0x42d
+
+#define PULSE_MODE_NORMAL (0 << 3)
+#define PULSE_MODE_ONE_CLOCK (1 << 3)
+#define PULSE_POLARITY_HIGH (0 << 4)
+#define PULSE_POLARITY_LOW (1 << 4)
+#define PULSE_QUAL_ALWAYS (0 << 6)
+#define PULSE_QUAL_VACTIVE (2 << 6)
+#define PULSE_QUAL_VACTIVE1 (3 << 6)
+#define PULSE_LAST_START_A (0 << 8)
+#define PULSE_LAST_END_A (1 << 8)
+#define PULSE_LAST_START_B (2 << 8)
+#define PULSE_LAST_END_B (3 << 8)
+#define PULSE_LAST_START_C (4 << 8)
+#define PULSE_LAST_END_C (5 << 8)
+#define PULSE_LAST_START_D (6 << 8)
+#define PULSE_LAST_END_D (7 << 8)
+
+#define PULSE_START(x) (((x) & 0xfff) << 0)
+#define PULSE_END(x) (((x) & 0xfff) << 16)
+
+#define DC_DISP_DISP_CLOCK_CONTROL 0x42e
+#define PIXEL_CLK_DIVIDER_PCD1 (0 << 8)
+#define PIXEL_CLK_DIVIDER_PCD1H (1 << 8)
+#define PIXEL_CLK_DIVIDER_PCD2 (2 << 8)
+#define PIXEL_CLK_DIVIDER_PCD3 (3 << 8)
+#define PIXEL_CLK_DIVIDER_PCD4 (4 << 8)
+#define PIXEL_CLK_DIVIDER_PCD6 (5 << 8)
+#define PIXEL_CLK_DIVIDER_PCD8 (6 << 8)
+#define PIXEL_CLK_DIVIDER_PCD9 (7 << 8)
+#define PIXEL_CLK_DIVIDER_PCD12 (8 << 8)
+#define PIXEL_CLK_DIVIDER_PCD16 (9 << 8)
+#define PIXEL_CLK_DIVIDER_PCD18 (10 << 8)
+#define PIXEL_CLK_DIVIDER_PCD24 (11 << 8)
+#define PIXEL_CLK_DIVIDER_PCD13 (12 << 8)
+#define SHIFT_CLK_DIVIDER(x) ((x) & 0xff)
+
+#define DC_DISP_DISP_INTERFACE_CONTROL 0x42f
+#define DISP_DATA_FORMAT_DF1P1C (0 << 0)
+#define DISP_DATA_FORMAT_DF1P2C24B (1 << 0)
+#define DISP_DATA_FORMAT_DF1P2C18B (2 << 0)
+#define DISP_DATA_FORMAT_DF1P2C16B (3 << 0)
+#define DISP_DATA_FORMAT_DF2S (4 << 0)
+#define DISP_DATA_FORMAT_DF3S (5 << 0)
+#define DISP_DATA_FORMAT_DFSPI (6 << 0)
+#define DISP_DATA_FORMAT_DF1P3C24B (7 << 0)
+#define DISP_DATA_FORMAT_DF1P3C18B (8 << 0)
+#define DISP_ALIGNMENT_MSB (0 << 8)
+#define DISP_ALIGNMENT_LSB (1 << 8)
+#define DISP_ORDER_RED_BLUE (0 << 9)
+#define DISP_ORDER_BLUE_RED (1 << 9)
+
+#define DC_DISP_DISP_COLOR_CONTROL 0x430
+#define BASE_COLOR_SIZE666 ( 0 << 0)
+#define BASE_COLOR_SIZE111 ( 1 << 0)
+#define BASE_COLOR_SIZE222 ( 2 << 0)
+#define BASE_COLOR_SIZE333 ( 3 << 0)
+#define BASE_COLOR_SIZE444 ( 4 << 0)
+#define BASE_COLOR_SIZE555 ( 5 << 0)
+#define BASE_COLOR_SIZE565 ( 6 << 0)
+#define BASE_COLOR_SIZE332 ( 7 << 0)
+#define BASE_COLOR_SIZE888 ( 8 << 0)
+#define BASE_COLOR_SIZE101010 (10 << 0)
+#define BASE_COLOR_SIZE121212 (12 << 0)
+#define DITHER_CONTROL_MASK (3 << 8)
+#define DITHER_CONTROL_DISABLE (0 << 8)
+#define DITHER_CONTROL_ORDERED (2 << 8)
+#define DITHER_CONTROL_ERRDIFF (3 << 8)
+#define BASE_COLOR_SIZE_MASK (0xf << 0)
+#define BASE_COLOR_SIZE_666 ( 0 << 0)
+#define BASE_COLOR_SIZE_111 ( 1 << 0)
+#define BASE_COLOR_SIZE_222 ( 2 << 0)
+#define BASE_COLOR_SIZE_333 ( 3 << 0)
+#define BASE_COLOR_SIZE_444 ( 4 << 0)
+#define BASE_COLOR_SIZE_555 ( 5 << 0)
+#define BASE_COLOR_SIZE_565 ( 6 << 0)
+#define BASE_COLOR_SIZE_332 ( 7 << 0)
+#define BASE_COLOR_SIZE_888 ( 8 << 0)
+#define BASE_COLOR_SIZE_101010 ( 10 << 0)
+#define BASE_COLOR_SIZE_121212 ( 12 << 0)
+
+#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431
+#define SC1_H_QUALIFIER_NONE (1 << 16)
+#define SC0_H_QUALIFIER_NONE (1 << 0)
+
+#define DC_DISP_DATA_ENABLE_OPTIONS 0x432
+#define DE_SELECT_ACTIVE_BLANK (0 << 0)
+#define DE_SELECT_ACTIVE (1 << 0)
+#define DE_SELECT_ACTIVE_IS (2 << 0)
+#define DE_CONTROL_ONECLK (0 << 2)
+#define DE_CONTROL_NORMAL (1 << 2)
+#define DE_CONTROL_EARLY_EXT (2 << 2)
+#define DE_CONTROL_EARLY (3 << 2)
+#define DE_CONTROL_ACTIVE_BLANK (4 << 2)
+
+#define DC_DISP_SERIAL_INTERFACE_OPTIONS 0x433
+#define DC_DISP_LCD_SPI_OPTIONS 0x434
+#define DC_DISP_BORDER_COLOR 0x435
+#define DC_DISP_COLOR_KEY0_LOWER 0x436
+#define DC_DISP_COLOR_KEY0_UPPER 0x437
+#define DC_DISP_COLOR_KEY1_LOWER 0x438
+#define DC_DISP_COLOR_KEY1_UPPER 0x439
+
+#define DC_DISP_CURSOR_FOREGROUND 0x43c
+#define DC_DISP_CURSOR_BACKGROUND 0x43d
+
+#define DC_DISP_CURSOR_START_ADDR 0x43e
+#define CURSOR_CLIP_DISPLAY (0 << 28)
+#define CURSOR_CLIP_WIN_A (1 << 28)
+#define CURSOR_CLIP_WIN_B (2 << 28)
+#define CURSOR_CLIP_WIN_C (3 << 28)
+#define CURSOR_SIZE_32x32 (0 << 24)
+#define CURSOR_SIZE_64x64 (1 << 24)
+#define CURSOR_SIZE_128x128 (2 << 24)
+#define CURSOR_SIZE_256x256 (3 << 24)
+#define DC_DISP_CURSOR_START_ADDR_NS 0x43f
+
+#define DC_DISP_CURSOR_POSITION 0x440
+#define DC_DISP_CURSOR_POSITION_NS 0x441
+
+#define DC_DISP_INIT_SEQ_CONTROL 0x442
+#define DC_DISP_SPI_INIT_SEQ_DATA_A 0x443
+#define DC_DISP_SPI_INIT_SEQ_DATA_B 0x444
+#define DC_DISP_SPI_INIT_SEQ_DATA_C 0x445
+#define DC_DISP_SPI_INIT_SEQ_DATA_D 0x446
+
+#define DC_DISP_DC_MCCIF_FIFOCTRL 0x480
+#define DC_DISP_MCCIF_DISPLAY0A_HYST 0x481
+#define DC_DISP_MCCIF_DISPLAY0B_HYST 0x482
+#define DC_DISP_MCCIF_DISPLAY1A_HYST 0x483
+#define DC_DISP_MCCIF_DISPLAY1B_HYST 0x484
+
+#define DC_DISP_DAC_CRT_CTRL 0x4c0
+#define DC_DISP_DISP_MISC_CONTROL 0x4c1
+#define DC_DISP_SD_CONTROL 0x4c2
+#define DC_DISP_SD_CSC_COEFF 0x4c3
+#define DC_DISP_SD_LUT(x) (0x4c4 + (x))
+#define DC_DISP_SD_FLICKER_CONTROL 0x4cd
+#define DC_DISP_DC_PIXEL_COUNT 0x4ce
+#define DC_DISP_SD_HISTOGRAM(x) (0x4cf + (x))
+#define DC_DISP_SD_BL_PARAMETERS 0x4d7
+#define DC_DISP_SD_BL_TF(x) (0x4d8 + (x))
+#define DC_DISP_SD_BL_CONTROL 0x4dc
+#define DC_DISP_SD_HW_K_VALUES 0x4dd
+#define DC_DISP_SD_MAN_K_VALUES 0x4de
+
+#define DC_DISP_BLEND_BACKGROUND_COLOR 0x4e4
+#define BACKGROUND_COLOR_ALPHA(x) (((x) & 0xff) << 24)
+#define BACKGROUND_COLOR_BLUE(x) (((x) & 0xff) << 16)
+#define BACKGROUND_COLOR_GREEN(x) (((x) & 0xff) << 8)
+#define BACKGROUND_COLOR_RED(x) (((x) & 0xff) << 0)
+
+#define DC_DISP_INTERLACE_CONTROL 0x4e5
+#define INTERLACE_STATUS (1 << 2)
+#define INTERLACE_START (1 << 1)
+#define INTERLACE_ENABLE (1 << 0)
+
+#define DC_DISP_CURSOR_START_ADDR_HI 0x4ec
+#define DC_DISP_BLEND_CURSOR_CONTROL 0x4f1
+#define CURSOR_COMPOSITION_MODE_BLEND (0 << 25)
+#define CURSOR_COMPOSITION_MODE_XOR (1 << 25)
+#define CURSOR_MODE_LEGACY (0 << 24)
+#define CURSOR_MODE_NORMAL (1 << 24)
+#define CURSOR_DST_BLEND_ZERO (0 << 16)
+#define CURSOR_DST_BLEND_K1 (1 << 16)
+#define CURSOR_DST_BLEND_NEG_K1_TIMES_SRC (2 << 16)
+#define CURSOR_DST_BLEND_MASK (3 << 16)
+#define CURSOR_SRC_BLEND_K1 (0 << 8)
+#define CURSOR_SRC_BLEND_K1_TIMES_SRC (1 << 8)
+#define CURSOR_SRC_BLEND_MASK (3 << 8)
+#define CURSOR_ALPHA 0xff
+
+#define DC_WIN_CORE_ACT_CONTROL 0x50e
+#define VCOUNTER (0 << 0)
+#define HCOUNTER (1 << 0)
+
+#define DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA 0x543
+#define LATENCY_CTL_MODE_ENABLE (1 << 2)
+
+#define DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB 0x544
+#define WATERMARK_MASK 0x1fffffff
+
+#define DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER 0x560
+#define PIPE_METER_INT(x) (((x) & 0xff) << 8)
+#define PIPE_METER_FRAC(x) (((x) & 0xff) << 0)
+
+#define DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG 0x561
+#define MEMPOOL_ENTRIES(x) (((x) & 0xffff) << 0)
+
+#define DC_WIN_CORE_IHUB_WGRP_FETCH_METER 0x562
+#define SLOTS(x) (((x) & 0xff) << 0)
+
+#define DC_WIN_CORE_IHUB_LINEBUF_CONFIG 0x563
+#define MODE_TWO_LINES (0 << 14)
+#define MODE_FOUR_LINES (1 << 14)
+
+#define DC_WIN_CORE_IHUB_THREAD_GROUP 0x568
+#define THREAD_NUM_MASK (0x1f << 1)
+#define THREAD_NUM(x) (((x) & 0x1f) << 1)
+#define THREAD_GROUP_ENABLE (1 << 0)
+
+#define DC_WIN_H_FILTER_P(p) (0x601 + (p))
+#define DC_WIN_V_FILTER_P(p) (0x619 + (p))
+
+#define DC_WIN_CSC_YOF 0x611
+#define DC_WIN_CSC_KYRGB 0x612
+#define DC_WIN_CSC_KUR 0x613
+#define DC_WIN_CSC_KVR 0x614
+#define DC_WIN_CSC_KUG 0x615
+#define DC_WIN_CSC_KVG 0x616
+#define DC_WIN_CSC_KUB 0x617
+#define DC_WIN_CSC_KVB 0x618
+
+#define DC_WIN_WIN_OPTIONS 0x700
+#define H_DIRECTION (1 << 0)
+#define V_DIRECTION (1 << 2)
+#define COLOR_EXPAND (1 << 6)
+#define H_FILTER (1 << 8)
+#define V_FILTER (1 << 10)
+#define CSC_ENABLE (1 << 18)
+#define WIN_ENABLE (1 << 30)
+
+#define DC_WIN_BYTE_SWAP 0x701
+#define BYTE_SWAP_NOSWAP (0 << 0)
+#define BYTE_SWAP_SWAP2 (1 << 0)
+#define BYTE_SWAP_SWAP4 (2 << 0)
+#define BYTE_SWAP_SWAP4HW (3 << 0)
+
+#define DC_WIN_BUFFER_CONTROL 0x702
+#define BUFFER_CONTROL_HOST (0 << 0)
+#define BUFFER_CONTROL_VI (1 << 0)
+#define BUFFER_CONTROL_EPP (2 << 0)
+#define BUFFER_CONTROL_MPEGE (3 << 0)
+#define BUFFER_CONTROL_SB2D (4 << 0)
+
+#define DC_WIN_COLOR_DEPTH 0x703
+#define WIN_COLOR_DEPTH_P1 0
+#define WIN_COLOR_DEPTH_P2 1
+#define WIN_COLOR_DEPTH_P4 2
+#define WIN_COLOR_DEPTH_P8 3
+#define WIN_COLOR_DEPTH_B4G4R4A4 4
+#define WIN_COLOR_DEPTH_B5G5R5A1 5
+#define WIN_COLOR_DEPTH_B5G6R5 6
+#define WIN_COLOR_DEPTH_A1B5G5R5 7
+#define WIN_COLOR_DEPTH_B8G8R8A8 12
+#define WIN_COLOR_DEPTH_R8G8B8A8 13
+#define WIN_COLOR_DEPTH_B6x2G6x2R6x2A8 14
+#define WIN_COLOR_DEPTH_R6x2G6x2B6x2A8 15
+#define WIN_COLOR_DEPTH_YCbCr422 16
+#define WIN_COLOR_DEPTH_YUV422 17
+#define WIN_COLOR_DEPTH_YCbCr420P 18
+#define WIN_COLOR_DEPTH_YUV420P 19
+#define WIN_COLOR_DEPTH_YCbCr422P 20
+#define WIN_COLOR_DEPTH_YUV422P 21
+#define WIN_COLOR_DEPTH_YCbCr422R 22
+#define WIN_COLOR_DEPTH_YUV422R 23
+#define WIN_COLOR_DEPTH_YCbCr422RA 24
+#define WIN_COLOR_DEPTH_YUV422RA 25
+#define WIN_COLOR_DEPTH_R4G4B4A4 27
+#define WIN_COLOR_DEPTH_R5G5B5A 28
+#define WIN_COLOR_DEPTH_AR5G5B5 29
+#define WIN_COLOR_DEPTH_B5G5R5X1 30
+#define WIN_COLOR_DEPTH_X1B5G5R5 31
+#define WIN_COLOR_DEPTH_R5G5B5X1 32
+#define WIN_COLOR_DEPTH_X1R5G5B5 33
+#define WIN_COLOR_DEPTH_R5G6B5 34
+#define WIN_COLOR_DEPTH_A8R8G8B8 35
+#define WIN_COLOR_DEPTH_A8B8G8R8 36
+#define WIN_COLOR_DEPTH_B8G8R8X8 37
+#define WIN_COLOR_DEPTH_R8G8B8X8 38
+#define WIN_COLOR_DEPTH_YCbCr444P 41
+#define WIN_COLOR_DEPTH_YCrCb420SP 42
+#define WIN_COLOR_DEPTH_YCbCr420SP 43
+#define WIN_COLOR_DEPTH_YCrCb422SP 44
+#define WIN_COLOR_DEPTH_YCbCr422SP 45
+#define WIN_COLOR_DEPTH_YCrCb444SP 48
+#define WIN_COLOR_DEPTH_YCbCr444SP 49
+#define WIN_COLOR_DEPTH_X8B8G8R8 65
+#define WIN_COLOR_DEPTH_X8R8G8B8 66
+
+#define DC_WIN_POSITION 0x704
+#define H_POSITION(x) (((x) & 0x1fff) << 0) /* XXX 0x7fff on Tegra186 */
+#define V_POSITION(x) (((x) & 0x1fff) << 16) /* XXX 0x7fff on Tegra186 */
+
+#define DC_WIN_SIZE 0x705
+#define H_SIZE(x) (((x) & 0x1fff) << 0) /* XXX 0x7fff on Tegra186 */
+#define V_SIZE(x) (((x) & 0x1fff) << 16) /* XXX 0x7fff on Tegra186 */
+
+#define DC_WIN_PRESCALED_SIZE 0x706
+#define H_PRESCALED_SIZE(x) (((x) & 0x7fff) << 0)
+#define V_PRESCALED_SIZE(x) (((x) & 0x1fff) << 16) /* XXX 0x7fff on Tegra186 */
+
+#define DC_WIN_H_INITIAL_DDA 0x707
+#define DC_WIN_V_INITIAL_DDA 0x708
+#define DC_WIN_DDA_INC 0x709
+#define H_DDA_INC(x) (((x) & 0xffff) << 0)
+#define V_DDA_INC(x) (((x) & 0xffff) << 16)
+
+#define DC_WIN_LINE_STRIDE 0x70a
+#define DC_WIN_BUF_STRIDE 0x70b
+#define DC_WIN_UV_BUF_STRIDE 0x70c
+#define DC_WIN_BUFFER_ADDR_MODE 0x70d
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR (0 << 0)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE (1 << 0)
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV (0 << 16)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE_UV (1 << 16)
+
+#define DC_WIN_DV_CONTROL 0x70e
+
+#define DC_WIN_BLEND_NOKEY 0x70f
+#define BLEND_WEIGHT1(x) (((x) & 0xff) << 16)
+#define BLEND_WEIGHT0(x) (((x) & 0xff) << 8)
+
+#define DC_WIN_BLEND_1WIN 0x710
+#define BLEND_CONTROL_FIX (0 << 2)
+#define BLEND_CONTROL_ALPHA (1 << 2)
+#define BLEND_COLOR_KEY_NONE (0 << 0)
+#define BLEND_COLOR_KEY_0 (1 << 0)
+#define BLEND_COLOR_KEY_1 (2 << 0)
+#define BLEND_COLOR_KEY_BOTH (3 << 0)
+
+#define DC_WIN_BLEND_2WIN_X 0x711
+#define BLEND_CONTROL_DEPENDENT (2 << 2)
+
+#define DC_WIN_BLEND_2WIN_Y 0x712
+#define DC_WIN_BLEND_3WIN_XY 0x713
+
+#define DC_WIN_HP_FETCH_CONTROL 0x714
+
+#define DC_WINBUF_START_ADDR 0x800
+#define DC_WINBUF_START_ADDR_NS 0x801
+#define DC_WINBUF_START_ADDR_U 0x802
+#define DC_WINBUF_START_ADDR_U_NS 0x803
+#define DC_WINBUF_START_ADDR_V 0x804
+#define DC_WINBUF_START_ADDR_V_NS 0x805
+
+#define DC_WINBUF_ADDR_H_OFFSET 0x806
+#define DC_WINBUF_ADDR_H_OFFSET_NS 0x807
+#define DC_WINBUF_ADDR_V_OFFSET 0x808
+#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809
+
+#define DC_WINBUF_UFLOW_STATUS 0x80a
+#define DC_WINBUF_SURFACE_KIND 0x80b
+#define DC_WINBUF_SURFACE_KIND_PITCH (0 << 0)
+#define DC_WINBUF_SURFACE_KIND_TILED (1 << 0)
+#define DC_WINBUF_SURFACE_KIND_BLOCK (2 << 0)
+#define DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(x) (((x) & 0x7) << 4)
+
+#define DC_WINBUF_START_ADDR_HI 0x80d
+
+#define DC_WINBUF_START_ADDR_HI_U 0x80f
+#define DC_WINBUF_START_ADDR_HI_V 0x811
+
+#define DC_WINBUF_CDE_CONTROL 0x82f
+#define ENABLE_SURFACE (1 << 0)
+
+#define DC_WINBUF_AD_UFLOW_STATUS 0xbca
+#define DC_WINBUF_BD_UFLOW_STATUS 0xdca
+#define DC_WINBUF_CD_UFLOW_STATUS 0xfca
+
+/* Tegra186 and later */
+#define DC_DISP_CORE_SOR_SET_CONTROL(x) (0x403 + (x))
+#define PROTOCOL_MASK (0xf << 8)
+#define PROTOCOL_SINGLE_TMDS_A (0x1 << 8)
+
+#define DC_DISP_PCALC_HEAD_SET_CROPPED_POINT_IN_CURSOR 0x442
+#define DC_DISP_PCALC_HEAD_SET_CROPPED_SIZE_IN_CURSOR 0x446
+
+#define DC_WINC_PRECOMP_WGRP_PIPE_CAPA 0x500
+#define DC_WINC_PRECOMP_WGRP_PIPE_CAPB 0x501
+#define DC_WINC_PRECOMP_WGRP_PIPE_CAPC 0x502
+#define MAX_PIXELS_5TAP444(x) ((x) & 0xffff)
+#define DC_WINC_PRECOMP_WGRP_PIPE_CAPD 0x503
+#define DC_WINC_PRECOMP_WGRP_PIPE_CAPE 0x504
+#define MAX_PIXELS_2TAP444(x) ((x) & 0xffff)
+#define DC_WINC_PRECOMP_WGRP_PIPE_CAPF 0x505
+
+#define DC_WIN_CORE_WINDOWGROUP_SET_CONTROL 0x702
+#define OWNER_MASK (0xf << 0)
+#define OWNER(x) (((x) & 0xf) << 0)
+
+#define DC_WIN_CROPPED_SIZE 0x706
+
+#define DC_WIN_SET_INPUT_SCALER_H_START_PHASE 0x707
+#define DC_WIN_SET_INPUT_SCALER_V_START_PHASE 0x708
+
+#define DC_WIN_PLANAR_STORAGE 0x709
+#define PITCH(x) (((x) >> 6) & 0x1fff)
+
+#define DC_WIN_PLANAR_STORAGE_UV 0x70a
+#define PITCH_U(x) ((((x) >> 6) & 0x1fff) << 0)
+#define PITCH_V(x) ((((x) >> 6) & 0x1fff) << 16)
+
+#define DC_WIN_SET_INPUT_SCALER_HPHASE_INCR 0x70b
+#define DC_WIN_SET_INPUT_SCALER_VPHASE_INCR 0x70c
+
+#define DC_WIN_SET_PARAMS 0x70d
+#define CLAMP_BEFORE_BLEND (1 << 15)
+#define DEGAMMA_NONE (0 << 13)
+#define DEGAMMA_SRGB (1 << 13)
+#define DEGAMMA_YUV8_10 (2 << 13)
+#define DEGAMMA_YUV12 (3 << 13)
+#define INPUT_RANGE_BYPASS (0 << 10)
+#define INPUT_RANGE_LIMITED (1 << 10)
+#define INPUT_RANGE_FULL (2 << 10)
+#define COLOR_SPACE_RGB (0 << 8)
+#define COLOR_SPACE_YUV_601 (1 << 8)
+#define COLOR_SPACE_YUV_709 (2 << 8)
+#define COLOR_SPACE_YUV_2020 (3 << 8)
+
+#define DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER 0x70e
+#define HORIZONTAL_TAPS_2 (1 << 3)
+#define HORIZONTAL_TAPS_5 (4 << 3)
+#define VERTICAL_TAPS_2 (1 << 0)
+#define VERTICAL_TAPS_5 (4 << 0)
+
+#define DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF 0x70f
+#define COEFF_INDEX(x) (((x) & 0xff) << 15)
+#define COEFF_DATA(x) (((x) & 0x3ff) << 0)
+
+#define DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE 0x711
+#define INPUT_SCALER_USE422 (1 << 2)
+#define INPUT_SCALER_VBYPASS (1 << 1)
+#define INPUT_SCALER_HBYPASS (1 << 0)
+
+#define DC_WIN_BLEND_LAYER_CONTROL 0x716
+#define COLOR_KEY_NONE (0 << 25)
+#define COLOR_KEY_SRC (1 << 25)
+#define COLOR_KEY_DST (2 << 25)
+#define BLEND_BYPASS (1 << 24)
+#define K2(x) (((x) & 0xff) << 16)
+#define K1(x) (((x) & 0xff) << 8)
+#define WINDOW_LAYER_DEPTH(x) (((x) & 0xff) << 0)
+
+#define DC_WIN_BLEND_MATCH_SELECT 0x717
+#define BLEND_FACTOR_DST_ALPHA_ZERO (0 << 12)
+#define BLEND_FACTOR_DST_ALPHA_ONE (1 << 12)
+#define BLEND_FACTOR_DST_ALPHA_NEG_K1_TIMES_SRC (2 << 12)
+#define BLEND_FACTOR_DST_ALPHA_K2 (3 << 12)
+#define BLEND_FACTOR_SRC_ALPHA_ZERO (0 << 8)
+#define BLEND_FACTOR_SRC_ALPHA_K1 (1 << 8)
+#define BLEND_FACTOR_SRC_ALPHA_K2 (2 << 8)
+#define BLEND_FACTOR_SRC_ALPHA_NEG_K1_TIMES_DST (3 << 8)
+#define BLEND_FACTOR_DST_COLOR_ZERO (0 << 4)
+#define BLEND_FACTOR_DST_COLOR_ONE (1 << 4)
+#define BLEND_FACTOR_DST_COLOR_K1 (2 << 4)
+#define BLEND_FACTOR_DST_COLOR_K2 (3 << 4)
+#define BLEND_FACTOR_DST_COLOR_K1_TIMES_DST (4 << 4)
+#define BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_DST (5 << 4)
+#define BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC (6 << 4)
+#define BLEND_FACTOR_DST_COLOR_NEG_K1 (7 << 4)
+#define BLEND_FACTOR_SRC_COLOR_ZERO (0 << 0)
+#define BLEND_FACTOR_SRC_COLOR_ONE (1 << 0)
+#define BLEND_FACTOR_SRC_COLOR_K1 (2 << 0)
+#define BLEND_FACTOR_SRC_COLOR_K1_TIMES_DST (3 << 0)
+#define BLEND_FACTOR_SRC_COLOR_NEG_K1_TIMES_DST (4 << 0)
+#define BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC (5 << 0)
+
+#define DC_WIN_BLEND_NOMATCH_SELECT 0x718
+
+#define DC_WIN_PRECOMP_WGRP_PARAMS 0x724
+#define SWAP_UV (1 << 0)
+
+#define DC_WIN_WINDOW_SET_CONTROL 0x730
+#define CONTROL_CSC_ENABLE (1 << 5)
+
+#define DC_WINBUF_CROPPED_POINT 0x806
+#define OFFSET_Y(x) (((x) & 0xffff) << 16)
+#define OFFSET_X(x) (((x) & 0xffff) << 0)
+
+#endif /* TEGRA_DC_H */
diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c
new file mode 100644
index 000000000..08fbd8f15
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dp.c
@@ -0,0 +1,885 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2013-2019 NVIDIA Corporation
+ * Copyright (C) 2015 Rob Clark
+ */
+
+#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_print.h>
+
+#include "dp.h"
+
+static const u8 drm_dp_edp_revisions[] = { 0x11, 0x12, 0x13, 0x14 };
+
+static void drm_dp_link_caps_reset(struct drm_dp_link_caps *caps)
+{
+ caps->enhanced_framing = false;
+ caps->tps3_supported = false;
+ caps->fast_training = false;
+ caps->channel_coding = false;
+ caps->alternate_scrambler_reset = false;
+}
+
+void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
+ const struct drm_dp_link_caps *src)
+{
+ dest->enhanced_framing = src->enhanced_framing;
+ dest->tps3_supported = src->tps3_supported;
+ dest->fast_training = src->fast_training;
+ dest->channel_coding = src->channel_coding;
+ dest->alternate_scrambler_reset = src->alternate_scrambler_reset;
+}
+
+static void drm_dp_link_reset(struct drm_dp_link *link)
+{
+ unsigned int i;
+
+ if (!link)
+ return;
+
+ link->revision = 0;
+ link->max_rate = 0;
+ link->max_lanes = 0;
+
+ drm_dp_link_caps_reset(&link->caps);
+ link->aux_rd_interval.cr = 0;
+ link->aux_rd_interval.ce = 0;
+ link->edp = 0;
+
+ link->rate = 0;
+ link->lanes = 0;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++)
+ link->rates[i] = 0;
+
+ link->num_rates = 0;
+}
+
+/**
+ * drm_dp_link_add_rate() - add a rate to the list of supported rates
+ * @link: the link to add the rate to
+ * @rate: the rate to add
+ *
+ * Add a link rate to the list of supported link rates.
+ *
+ * Returns:
+ * 0 on success or one of the following negative error codes on failure:
+ * - ENOSPC if the maximum number of supported rates has been reached
+ * - EEXISTS if the link already supports this rate
+ *
+ * See also:
+ * drm_dp_link_remove_rate()
+ */
+int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate)
+{
+ unsigned int i, pivot;
+
+ if (link->num_rates == DP_MAX_SUPPORTED_RATES)
+ return -ENOSPC;
+
+ for (pivot = 0; pivot < link->num_rates; pivot++)
+ if (rate <= link->rates[pivot])
+ break;
+
+ if (pivot != link->num_rates && rate == link->rates[pivot])
+ return -EEXIST;
+
+ for (i = link->num_rates; i > pivot; i--)
+ link->rates[i] = link->rates[i - 1];
+
+ link->rates[pivot] = rate;
+ link->num_rates++;
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_remove_rate() - remove a rate from the list of supported rates
+ * @link: the link from which to remove the rate
+ * @rate: the rate to remove
+ *
+ * Removes a link rate from the list of supported link rates.
+ *
+ * Returns:
+ * 0 on success or one of the following negative error codes on failure:
+ * - EINVAL if the specified rate is not among the supported rates
+ *
+ * See also:
+ * drm_dp_link_add_rate()
+ */
+int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < link->num_rates; i++)
+ if (rate == link->rates[i])
+ break;
+
+ if (i == link->num_rates)
+ return -EINVAL;
+
+ link->num_rates--;
+
+ while (i < link->num_rates) {
+ link->rates[i] = link->rates[i + 1];
+ i++;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_update_rates() - normalize the supported link rates array
+ * @link: the link for which to normalize the supported link rates
+ *
+ * Users should call this function after they've manually modified the array
+ * of supported link rates. This function removes any stale entries, compacts
+ * the array and updates the supported link rate count. Note that calling the
+ * drm_dp_link_remove_rate() function already does this janitorial work.
+ *
+ * See also:
+ * drm_dp_link_add_rate(), drm_dp_link_remove_rate()
+ */
+void drm_dp_link_update_rates(struct drm_dp_link *link)
+{
+ unsigned int i, count = 0;
+
+ for (i = 0; i < link->num_rates; i++) {
+ if (link->rates[i] != 0)
+ link->rates[count++] = link->rates[i];
+ }
+
+ for (i = count; i < link->num_rates; i++)
+ link->rates[i] = 0;
+
+ link->num_rates = count;
+}
+
+/**
+ * drm_dp_link_probe() - probe a DisplayPort link for capabilities
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to structure in which to return link capabilities
+ *
+ * The structure filled in by this function can usually be passed directly
+ * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
+ * configure the link based on the link's capabilities.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 dpcd[DP_RECEIVER_CAP_SIZE], value;
+ unsigned int rd_interval;
+ int err;
+
+ drm_dp_link_reset(link);
+
+ err = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, sizeof(dpcd));
+ if (err < 0)
+ return err;
+
+ link->revision = dpcd[DP_DPCD_REV];
+ link->max_rate = drm_dp_max_link_rate(dpcd);
+ link->max_lanes = drm_dp_max_lane_count(dpcd);
+
+ link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(dpcd);
+ link->caps.tps3_supported = drm_dp_tps3_supported(dpcd);
+ link->caps.fast_training = drm_dp_fast_training_cap(dpcd);
+ link->caps.channel_coding = drm_dp_channel_coding_supported(dpcd);
+
+ if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
+ link->caps.alternate_scrambler_reset = true;
+
+ err = drm_dp_dpcd_readb(aux, DP_EDP_DPCD_REV, &value);
+ if (err < 0)
+ return err;
+
+ if (value >= ARRAY_SIZE(drm_dp_edp_revisions))
+ DRM_ERROR("unsupported eDP version: %02x\n", value);
+ else
+ link->edp = drm_dp_edp_revisions[value];
+ }
+
+ /*
+ * The DPCD stores the AUX read interval in units of 4 ms. There are
+ * two special cases:
+ *
+ * 1) if the TRAINING_AUX_RD_INTERVAL field is 0, the clock recovery
+ * and channel equalization should use 100 us or 400 us AUX read
+ * intervals, respectively
+ *
+ * 2) for DP v1.4 and above, clock recovery should always use 100 us
+ * AUX read intervals
+ */
+ rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
+
+ if (rd_interval > 4) {
+ DRM_DEBUG_KMS("AUX interval %u out of range (max. 4)\n",
+ rd_interval);
+ rd_interval = 4;
+ }
+
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ if (rd_interval == 0 || link->revision >= DP_DPCD_REV_14)
+ link->aux_rd_interval.cr = 100;
+
+ if (rd_interval == 0)
+ link->aux_rd_interval.ce = 400;
+
+ link->rate = link->max_rate;
+ link->lanes = link->max_lanes;
+
+ /* Parse SUPPORTED_LINK_RATES from eDP 1.4 */
+ if (link->edp >= 0x14) {
+ u8 supported_rates[DP_MAX_SUPPORTED_RATES * 2];
+ unsigned int i;
+ u16 rate;
+
+ err = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES,
+ supported_rates,
+ sizeof(supported_rates));
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++) {
+ rate = supported_rates[i * 2 + 1] << 8 |
+ supported_rates[i * 2 + 0];
+
+ drm_dp_link_add_rate(link, rate * 200);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_configure() - configure a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 values[2], value;
+ int err;
+
+ if (link->ops && link->ops->configure) {
+ err = link->ops->configure(link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+ }
+
+ values[0] = drm_dp_link_rate_to_bw_code(link->rate);
+ values[1] = link->lanes;
+
+ if (link->caps.enhanced_framing)
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ if (link->caps.channel_coding)
+ value = DP_SET_ANSI_8B10B;
+ else
+ value = 0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value);
+ if (err < 0)
+ return err;
+
+ if (link->caps.alternate_scrambler_reset) {
+ err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET,
+ DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_choose() - choose the lowest possible configuration for a mode
+ * @link: DRM DP link object
+ * @mode: DRM display mode
+ * @info: DRM display information
+ *
+ * According to the eDP specification, a source should select a configuration
+ * with the lowest number of lanes and the lowest possible link rate that can
+ * match the bitrate requirements of a video mode. However it must ensure not
+ * to exceed the capabilities of the sink.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_choose(struct drm_dp_link *link,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info)
+{
+ /* available link symbol clock rates */
+ static const unsigned int rates[3] = { 162000, 270000, 540000 };
+ /* available number of lanes */
+ static const unsigned int lanes[3] = { 1, 2, 4 };
+ unsigned long requirement, capacity;
+ unsigned int rate = link->max_rate;
+ unsigned int i, j;
+
+ /* bandwidth requirement */
+ requirement = mode->clock * info->bpc * 3;
+
+ for (i = 0; i < ARRAY_SIZE(lanes) && lanes[i] <= link->max_lanes; i++) {
+ for (j = 0; j < ARRAY_SIZE(rates) && rates[j] <= rate; j++) {
+ /*
+ * Capacity for this combination of lanes and rate,
+ * factoring in the ANSI 8B/10B encoding.
+ *
+ * Link rates in the DRM DP helpers are really link
+ * symbol frequencies, so a tenth of the actual rate
+ * of the link.
+ */
+ capacity = lanes[i] * (rates[j] * 10) * 8 / 10;
+
+ if (capacity >= requirement) {
+ DRM_DEBUG_KMS("using %u lanes at %u kHz (%lu/%lu kbps)\n",
+ lanes[i], rates[j], requirement,
+ capacity);
+ link->lanes = lanes[i];
+ link->rate = rates[j];
+ return 0;
+ }
+ }
+ }
+
+ return -ERANGE;
+}
+
+/**
+ * DOC: Link training
+ *
+ * These functions contain common logic and helpers to implement DisplayPort
+ * link training.
+ */
+
+/**
+ * drm_dp_link_train_init() - initialize DisplayPort link training state
+ * @train: DisplayPort link training state
+ */
+void drm_dp_link_train_init(struct drm_dp_link_train *train)
+{
+ struct drm_dp_link_train_set *request = &train->request;
+ struct drm_dp_link_train_set *adjust = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ request->voltage_swing[i] = 0;
+ adjust->voltage_swing[i] = 0;
+
+ request->pre_emphasis[i] = 0;
+ adjust->pre_emphasis[i] = 0;
+
+ request->post_cursor[i] = 0;
+ adjust->post_cursor[i] = 0;
+ }
+
+ train->pattern = DP_TRAINING_PATTERN_DISABLE;
+ train->clock_recovered = false;
+ train->channel_equalized = false;
+}
+
+static bool drm_dp_link_train_valid(const struct drm_dp_link_train *train)
+{
+ return train->clock_recovered && train->channel_equalized;
+}
+
+static int drm_dp_link_apply_training(struct drm_dp_link *link)
+{
+ struct drm_dp_link_train_set *request = &link->train.request;
+ unsigned int lanes = link->lanes, *vs, *pe, *pc, i;
+ struct drm_dp_aux *aux = link->aux;
+ u8 values[4], pattern = 0;
+ int err;
+
+ err = link->ops->apply_training(link);
+ if (err < 0) {
+ DRM_ERROR("failed to apply link training: %d\n", err);
+ return err;
+ }
+
+ vs = request->voltage_swing;
+ pe = request->pre_emphasis;
+ pc = request->post_cursor;
+
+ /* write currently selected voltage-swing and pre-emphasis levels */
+ for (i = 0; i < lanes; i++)
+ values[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL(vs[i]) |
+ DP_TRAIN_PRE_EMPHASIS_LEVEL(pe[i]);
+
+ err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values, lanes);
+ if (err < 0) {
+ DRM_ERROR("failed to set training parameters: %d\n", err);
+ return err;
+ }
+
+ /* write currently selected post-cursor level (if supported) */
+ if (link->revision >= 0x12 && link->rate == 540000) {
+ values[0] = values[1] = 0;
+
+ for (i = 0; i < lanes; i++)
+ values[i / 2] |= DP_LANE_POST_CURSOR(i, pc[i]);
+
+ err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_1_SET2, values,
+ DIV_ROUND_UP(lanes, 2));
+ if (err < 0) {
+ DRM_ERROR("failed to set post-cursor: %d\n", err);
+ return err;
+ }
+ }
+
+ /* write link pattern */
+ if (link->train.pattern != DP_TRAINING_PATTERN_DISABLE)
+ pattern |= DP_LINK_SCRAMBLING_DISABLE;
+
+ pattern |= link->train.pattern;
+
+ err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
+ if (err < 0) {
+ DRM_ERROR("failed to set training pattern: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void drm_dp_link_train_wait(struct drm_dp_link *link)
+{
+ unsigned long min = 0;
+
+ switch (link->train.pattern) {
+ case DP_TRAINING_PATTERN_1:
+ min = link->aux_rd_interval.cr;
+ break;
+
+ case DP_TRAINING_PATTERN_2:
+ case DP_TRAINING_PATTERN_3:
+ min = link->aux_rd_interval.ce;
+ break;
+
+ default:
+ break;
+ }
+
+ if (min > 0)
+ usleep_range(min, 2 * min);
+}
+
+static void drm_dp_link_get_adjustments(struct drm_dp_link *link,
+ u8 status[DP_LINK_STATUS_SIZE])
+{
+ struct drm_dp_link_train_set *adjust = &link->train.adjust;
+ unsigned int i;
+ u8 post_cursor;
+ int err;
+
+ err = drm_dp_dpcd_read(link->aux, DP_ADJUST_REQUEST_POST_CURSOR2,
+ &post_cursor, sizeof(post_cursor));
+ if (err < 0) {
+ DRM_ERROR("failed to read post_cursor2: %d\n", err);
+ post_cursor = 0;
+ }
+
+ for (i = 0; i < link->lanes; i++) {
+ adjust->voltage_swing[i] =
+ drm_dp_get_adjust_request_voltage(status, i) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+
+ adjust->pre_emphasis[i] =
+ drm_dp_get_adjust_request_pre_emphasis(status, i) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ adjust->post_cursor[i] =
+ (post_cursor >> (i << 1)) & 0x3;
+ }
+}
+
+static void drm_dp_link_train_adjust(struct drm_dp_link_train *train)
+{
+ struct drm_dp_link_train_set *request = &train->request;
+ struct drm_dp_link_train_set *adjust = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++)
+ if (request->voltage_swing[i] != adjust->voltage_swing[i])
+ request->voltage_swing[i] = adjust->voltage_swing[i];
+
+ for (i = 0; i < 4; i++)
+ if (request->pre_emphasis[i] != adjust->pre_emphasis[i])
+ request->pre_emphasis[i] = adjust->pre_emphasis[i];
+
+ for (i = 0; i < 4; i++)
+ if (request->post_cursor[i] != adjust->post_cursor[i])
+ request->post_cursor[i] = adjust->post_cursor[i];
+}
+
+static int drm_dp_link_recover_clock(struct drm_dp_link *link)
+{
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ return err;
+
+ drm_dp_link_train_wait(link);
+
+ err = drm_dp_dpcd_read_link_status(link->aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ return err;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes))
+ drm_dp_link_get_adjustments(link, status);
+ else
+ link->train.clock_recovered = true;
+
+ return 0;
+}
+
+static int drm_dp_link_clock_recovery(struct drm_dp_link *link)
+{
+ unsigned int repeat;
+ int err;
+
+ /* start clock recovery using training pattern 1 */
+ link->train.pattern = DP_TRAINING_PATTERN_1;
+
+ for (repeat = 1; repeat < 5; repeat++) {
+ err = drm_dp_link_recover_clock(link);
+ if (err < 0) {
+ DRM_ERROR("failed to recover clock: %d\n", err);
+ return err;
+ }
+
+ if (link->train.clock_recovered)
+ break;
+
+ drm_dp_link_train_adjust(&link->train);
+ }
+
+ return 0;
+}
+
+static int drm_dp_link_equalize_channel(struct drm_dp_link *link)
+{
+ struct drm_dp_aux *aux = link->aux;
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ return err;
+
+ drm_dp_link_train_wait(link);
+
+ err = drm_dp_dpcd_read_link_status(aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ return err;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ DRM_ERROR("clock recovery lost while equalizing channel\n");
+ link->train.clock_recovered = false;
+ return 0;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes))
+ drm_dp_link_get_adjustments(link, status);
+ else
+ link->train.channel_equalized = true;
+
+ return 0;
+}
+
+static int drm_dp_link_channel_equalization(struct drm_dp_link *link)
+{
+ unsigned int repeat;
+ int err;
+
+ /* start channel equalization using pattern 2 or 3 */
+ if (link->caps.tps3_supported)
+ link->train.pattern = DP_TRAINING_PATTERN_3;
+ else
+ link->train.pattern = DP_TRAINING_PATTERN_2;
+
+ for (repeat = 1; repeat < 5; repeat++) {
+ err = drm_dp_link_equalize_channel(link);
+ if (err < 0) {
+ DRM_ERROR("failed to equalize channel: %d\n", err);
+ return err;
+ }
+
+ if (link->train.channel_equalized)
+ break;
+
+ drm_dp_link_train_adjust(&link->train);
+ }
+
+ return 0;
+}
+
+static int drm_dp_link_downgrade(struct drm_dp_link *link)
+{
+ switch (link->rate) {
+ case 162000:
+ return -EINVAL;
+
+ case 270000:
+ link->rate = 162000;
+ break;
+
+ case 540000:
+ link->rate = 270000;
+ return 0;
+ }
+
+ return 0;
+}
+
+static void drm_dp_link_train_disable(struct drm_dp_link *link)
+{
+ int err;
+
+ link->train.pattern = DP_TRAINING_PATTERN_DISABLE;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ DRM_ERROR("failed to disable link training: %d\n", err);
+}
+
+static int drm_dp_link_train_full(struct drm_dp_link *link)
+{
+ int err;
+
+retry:
+ DRM_DEBUG_KMS("full-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "",
+ link->rate / 100);
+
+ err = drm_dp_link_configure(link->aux, link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+
+ err = drm_dp_link_clock_recovery(link);
+ if (err < 0) {
+ DRM_ERROR("clock recovery failed: %d\n", err);
+ goto out;
+ }
+
+ if (!link->train.clock_recovered) {
+ DRM_ERROR("clock recovery failed, downgrading link\n");
+
+ err = drm_dp_link_downgrade(link);
+ if (err < 0)
+ goto out;
+
+ goto retry;
+ }
+
+ DRM_DEBUG_KMS("clock recovery succeeded\n");
+
+ err = drm_dp_link_channel_equalization(link);
+ if (err < 0) {
+ DRM_ERROR("channel equalization failed: %d\n", err);
+ goto out;
+ }
+
+ if (!link->train.channel_equalized) {
+ DRM_ERROR("channel equalization failed, downgrading link\n");
+
+ err = drm_dp_link_downgrade(link);
+ if (err < 0)
+ goto out;
+
+ goto retry;
+ }
+
+ DRM_DEBUG_KMS("channel equalization succeeded\n");
+
+out:
+ drm_dp_link_train_disable(link);
+ return err;
+}
+
+static int drm_dp_link_train_fast(struct drm_dp_link *link)
+{
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ DRM_DEBUG_KMS("fast-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "",
+ link->rate / 100);
+
+ err = drm_dp_link_configure(link->aux, link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+
+ /* transmit training pattern 1 for 500 microseconds */
+ link->train.pattern = DP_TRAINING_PATTERN_1;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ /* transmit training pattern 2 or 3 for 500 microseconds */
+ if (link->caps.tps3_supported)
+ link->train.pattern = DP_TRAINING_PATTERN_3;
+ else
+ link->train.pattern = DP_TRAINING_PATTERN_2;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ err = drm_dp_dpcd_read_link_status(link->aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ goto out;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ DRM_ERROR("clock recovery failed\n");
+ err = -EIO;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes)) {
+ DRM_ERROR("channel equalization failed\n");
+ err = -EIO;
+ }
+
+out:
+ drm_dp_link_train_disable(link);
+ return err;
+}
+
+/**
+ * drm_dp_link_train() - perform DisplayPort link training
+ * @link: a DP link object
+ *
+ * Uses the context stored in the DP link object to perform link training. It
+ * is expected that drivers will call drm_dp_link_probe() to obtain the link
+ * capabilities before performing link training.
+ *
+ * If the sink supports fast link training (no AUX CH handshake) and valid
+ * training settings are available, this function will try to perform fast
+ * link training and fall back to full link training on failure.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_train(struct drm_dp_link *link)
+{
+ int err;
+
+ drm_dp_link_train_init(&link->train);
+
+ if (link->caps.fast_training) {
+ if (drm_dp_link_train_valid(&link->train)) {
+ err = drm_dp_link_train_fast(link);
+ if (err < 0)
+ DRM_ERROR("fast link training failed: %d\n",
+ err);
+ else
+ return 0;
+ } else {
+ DRM_DEBUG_KMS("training parameters not available\n");
+ }
+ } else {
+ DRM_DEBUG_KMS("fast link training not supported\n");
+ }
+
+ err = drm_dp_link_train_full(link);
+ if (err < 0)
+ DRM_ERROR("full link training failed: %d\n", err);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/tegra/dp.h b/drivers/gpu/drm/tegra/dp.h
new file mode 100644
index 000000000..cb12ed0c5
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dp.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2013-2019 NVIDIA Corporation.
+ * Copyright (C) 2015 Rob Clark
+ */
+
+#ifndef DRM_TEGRA_DP_H
+#define DRM_TEGRA_DP_H 1
+
+#include <linux/types.h>
+
+struct drm_display_info;
+struct drm_display_mode;
+struct drm_dp_aux;
+struct drm_dp_link;
+
+/**
+ * struct drm_dp_link_caps - DP link capabilities
+ */
+struct drm_dp_link_caps {
+ /**
+ * @enhanced_framing:
+ *
+ * enhanced framing capability (mandatory as of DP 1.2)
+ */
+ bool enhanced_framing;
+
+ /**
+ * tps3_supported:
+ *
+ * training pattern sequence 3 supported for equalization
+ */
+ bool tps3_supported;
+
+ /**
+ * @fast_training:
+ *
+ * AUX CH handshake not required for link training
+ */
+ bool fast_training;
+
+ /**
+ * @channel_coding:
+ *
+ * ANSI 8B/10B channel coding capability
+ */
+ bool channel_coding;
+
+ /**
+ * @alternate_scrambler_reset:
+ *
+ * eDP alternate scrambler reset capability
+ */
+ bool alternate_scrambler_reset;
+};
+
+void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
+ const struct drm_dp_link_caps *src);
+
+/**
+ * struct drm_dp_link_ops - DP link operations
+ */
+struct drm_dp_link_ops {
+ /**
+ * @apply_training:
+ */
+ int (*apply_training)(struct drm_dp_link *link);
+
+ /**
+ * @configure:
+ */
+ int (*configure)(struct drm_dp_link *link);
+};
+
+#define DP_TRAIN_VOLTAGE_SWING_LEVEL(x) ((x) << 0)
+#define DP_TRAIN_PRE_EMPHASIS_LEVEL(x) ((x) << 3)
+#define DP_LANE_POST_CURSOR(i, x) (((x) & 0x3) << (((i) & 1) << 2))
+
+/**
+ * struct drm_dp_link_train_set - link training settings
+ * @voltage_swing: per-lane voltage swing
+ * @pre_emphasis: per-lane pre-emphasis
+ * @post_cursor: per-lane post-cursor
+ */
+struct drm_dp_link_train_set {
+ unsigned int voltage_swing[4];
+ unsigned int pre_emphasis[4];
+ unsigned int post_cursor[4];
+};
+
+/**
+ * struct drm_dp_link_train - link training state information
+ * @request: currently requested settings
+ * @adjust: adjustments requested by sink
+ * @pattern: currently requested training pattern
+ * @clock_recovered: flag to track if clock recovery has completed
+ * @channel_equalized: flag to track if channel equalization has completed
+ */
+struct drm_dp_link_train {
+ struct drm_dp_link_train_set request;
+ struct drm_dp_link_train_set adjust;
+
+ unsigned int pattern;
+
+ bool clock_recovered;
+ bool channel_equalized;
+};
+
+/**
+ * struct drm_dp_link - DP link capabilities and configuration
+ * @revision: DP specification revision supported on the link
+ * @max_rate: maximum clock rate supported on the link
+ * @max_lanes: maximum number of lanes supported on the link
+ * @caps: capabilities supported on the link (see &drm_dp_link_caps)
+ * @aux_rd_interval: AUX read interval to use for training (in microseconds)
+ * @edp: eDP revision (0x11: eDP 1.1, 0x12: eDP 1.2, ...)
+ * @rate: currently configured link rate
+ * @lanes: currently configured number of lanes
+ * @rates: additional supported link rates in kHz (eDP 1.4)
+ * @num_rates: number of additional supported link rates (eDP 1.4)
+ */
+struct drm_dp_link {
+ unsigned char revision;
+ unsigned int max_rate;
+ unsigned int max_lanes;
+
+ struct drm_dp_link_caps caps;
+
+ /**
+ * @cr: clock recovery read interval
+ * @ce: channel equalization read interval
+ */
+ struct {
+ unsigned int cr;
+ unsigned int ce;
+ } aux_rd_interval;
+
+ unsigned char edp;
+
+ unsigned int rate;
+ unsigned int lanes;
+
+ unsigned long rates[DP_MAX_SUPPORTED_RATES];
+ unsigned int num_rates;
+
+ /**
+ * @ops: DP link operations
+ */
+ const struct drm_dp_link_ops *ops;
+
+ /**
+ * @aux: DP AUX channel
+ */
+ struct drm_dp_aux *aux;
+
+ /**
+ * @train: DP link training state
+ */
+ struct drm_dp_link_train train;
+};
+
+int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate);
+int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate);
+void drm_dp_link_update_rates(struct drm_dp_link *link);
+
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_choose(struct drm_dp_link *link,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info);
+
+void drm_dp_link_train_init(struct drm_dp_link_train *train);
+int drm_dp_link_train(struct drm_dp_link *link);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
new file mode 100644
index 000000000..d773ef485
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -0,0 +1,827 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/workqueue.h>
+
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dp_aux_bus.h>
+#include <drm/drm_panel.h>
+
+#include "dp.h"
+#include "dpaux.h"
+#include "drm.h"
+#include "trace.h"
+
+static DEFINE_MUTEX(dpaux_lock);
+static LIST_HEAD(dpaux_list);
+
+struct tegra_dpaux_soc {
+ unsigned int cmh;
+ unsigned int drvz;
+ unsigned int drvi;
+};
+
+struct tegra_dpaux {
+ struct drm_dp_aux aux;
+ struct device *dev;
+
+ const struct tegra_dpaux_soc *soc;
+
+ void __iomem *regs;
+ int irq;
+
+ struct tegra_output *output;
+
+ struct reset_control *rst;
+ struct clk *clk_parent;
+ struct clk *clk;
+
+ struct regulator *vdd;
+
+ struct completion complete;
+ struct work_struct work;
+ struct list_head list;
+
+#ifdef CONFIG_GENERIC_PINCONF
+ struct pinctrl_dev *pinctrl;
+ struct pinctrl_desc desc;
+#endif
+};
+
+static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux)
+{
+ return container_of(aux, struct tegra_dpaux, aux);
+}
+
+static inline struct tegra_dpaux *work_to_dpaux(struct work_struct *work)
+{
+ return container_of(work, struct tegra_dpaux, work);
+}
+
+static inline u32 tegra_dpaux_readl(struct tegra_dpaux *dpaux,
+ unsigned int offset)
+{
+ u32 value = readl(dpaux->regs + (offset << 2));
+
+ trace_dpaux_readl(dpaux->dev, offset, value);
+
+ return value;
+}
+
+static inline void tegra_dpaux_writel(struct tegra_dpaux *dpaux,
+ u32 value, unsigned int offset)
+{
+ trace_dpaux_writel(dpaux->dev, offset, value);
+ writel(value, dpaux->regs + (offset << 2));
+}
+
+static void tegra_dpaux_write_fifo(struct tegra_dpaux *dpaux, const u8 *buffer,
+ size_t size)
+{
+ size_t i, j;
+
+ for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
+ size_t num = min_t(size_t, size - i * 4, 4);
+ u32 value = 0;
+
+ for (j = 0; j < num; j++)
+ value |= buffer[i * 4 + j] << (j * 8);
+
+ tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXDATA_WRITE(i));
+ }
+}
+
+static void tegra_dpaux_read_fifo(struct tegra_dpaux *dpaux, u8 *buffer,
+ size_t size)
+{
+ size_t i, j;
+
+ for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
+ size_t num = min_t(size_t, size - i * 4, 4);
+ u32 value;
+
+ value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXDATA_READ(i));
+
+ for (j = 0; j < num; j++)
+ buffer[i * 4 + j] = value >> (j * 8);
+ }
+}
+
+static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ unsigned long timeout = msecs_to_jiffies(250);
+ struct tegra_dpaux *dpaux = to_dpaux(aux);
+ unsigned long status;
+ ssize_t ret = 0;
+ u8 reply = 0;
+ u32 value;
+
+ /* Tegra has 4x4 byte DP AUX transmit and receive FIFOs. */
+ if (msg->size > 16)
+ return -EINVAL;
+
+ /*
+ * Allow zero-sized messages only for I2C, in which case they specify
+ * address-only transactions.
+ */
+ if (msg->size < 1) {
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_I2C_READ:
+ value = DPAUX_DP_AUXCTL_CMD_ADDRESS_ONLY;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ } else {
+ /* For non-zero-sized messages, set the CMDLEN field. */
+ value = DPAUX_DP_AUXCTL_CMDLEN(msg->size - 1);
+ }
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_I2C_WRITE:
+ if (msg->request & DP_AUX_I2C_MOT)
+ value |= DPAUX_DP_AUXCTL_CMD_MOT_WR;
+ else
+ value |= DPAUX_DP_AUXCTL_CMD_I2C_WR;
+
+ break;
+
+ case DP_AUX_I2C_READ:
+ if (msg->request & DP_AUX_I2C_MOT)
+ value |= DPAUX_DP_AUXCTL_CMD_MOT_RD;
+ else
+ value |= DPAUX_DP_AUXCTL_CMD_I2C_RD;
+
+ break;
+
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+ if (msg->request & DP_AUX_I2C_MOT)
+ value |= DPAUX_DP_AUXCTL_CMD_MOT_RQ;
+ else
+ value |= DPAUX_DP_AUXCTL_CMD_I2C_RQ;
+
+ break;
+
+ case DP_AUX_NATIVE_WRITE:
+ value |= DPAUX_DP_AUXCTL_CMD_AUX_WR;
+ break;
+
+ case DP_AUX_NATIVE_READ:
+ value |= DPAUX_DP_AUXCTL_CMD_AUX_RD;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ tegra_dpaux_writel(dpaux, msg->address, DPAUX_DP_AUXADDR);
+ tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXCTL);
+
+ if ((msg->request & DP_AUX_I2C_READ) == 0) {
+ tegra_dpaux_write_fifo(dpaux, msg->buffer, msg->size);
+ ret = msg->size;
+ }
+
+ /* start transaction */
+ value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXCTL);
+ value |= DPAUX_DP_AUXCTL_TRANSACTREQ;
+ tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXCTL);
+
+ status = wait_for_completion_timeout(&dpaux->complete, timeout);
+ if (!status)
+ return -ETIMEDOUT;
+
+ /* read status and clear errors */
+ value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT);
+ tegra_dpaux_writel(dpaux, 0xf00, DPAUX_DP_AUXSTAT);
+
+ if (value & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR)
+ return -ETIMEDOUT;
+
+ if ((value & DPAUX_DP_AUXSTAT_RX_ERROR) ||
+ (value & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR) ||
+ (value & DPAUX_DP_AUXSTAT_NO_STOP_ERROR))
+ return -EIO;
+
+ switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) {
+ case 0x00:
+ reply = DP_AUX_NATIVE_REPLY_ACK;
+ break;
+
+ case 0x01:
+ reply = DP_AUX_NATIVE_REPLY_NACK;
+ break;
+
+ case 0x02:
+ reply = DP_AUX_NATIVE_REPLY_DEFER;
+ break;
+
+ case 0x04:
+ reply = DP_AUX_I2C_REPLY_NACK;
+ break;
+
+ case 0x08:
+ reply = DP_AUX_I2C_REPLY_DEFER;
+ break;
+ }
+
+ if ((msg->size > 0) && (msg->reply == DP_AUX_NATIVE_REPLY_ACK)) {
+ if (msg->request & DP_AUX_I2C_READ) {
+ size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK;
+
+ /*
+ * There might be a smarter way to do this, but since
+ * the DP helpers will already retry transactions for
+ * an -EBUSY return value, simply reuse that instead.
+ */
+ if (count != msg->size) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ tegra_dpaux_read_fifo(dpaux, msg->buffer, count);
+ ret = count;
+ }
+ }
+
+ msg->reply = reply;
+
+out:
+ return ret;
+}
+
+static void tegra_dpaux_hotplug(struct work_struct *work)
+{
+ struct tegra_dpaux *dpaux = work_to_dpaux(work);
+
+ if (dpaux->output)
+ drm_helper_hpd_irq_event(dpaux->output->connector.dev);
+}
+
+static irqreturn_t tegra_dpaux_irq(int irq, void *data)
+{
+ struct tegra_dpaux *dpaux = data;
+ u32 value;
+
+ /* clear interrupts */
+ value = tegra_dpaux_readl(dpaux, DPAUX_INTR_AUX);
+ tegra_dpaux_writel(dpaux, value, DPAUX_INTR_AUX);
+
+ if (value & (DPAUX_INTR_PLUG_EVENT | DPAUX_INTR_UNPLUG_EVENT))
+ schedule_work(&dpaux->work);
+
+ if (value & DPAUX_INTR_IRQ_EVENT) {
+ /* TODO: handle this */
+ }
+
+ if (value & DPAUX_INTR_AUX_DONE)
+ complete(&dpaux->complete);
+
+ return IRQ_HANDLED;
+}
+
+enum tegra_dpaux_functions {
+ DPAUX_PADCTL_FUNC_AUX,
+ DPAUX_PADCTL_FUNC_I2C,
+ DPAUX_PADCTL_FUNC_OFF,
+};
+
+static void tegra_dpaux_pad_power_down(struct tegra_dpaux *dpaux)
+{
+ u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+
+ value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+}
+
+static void tegra_dpaux_pad_power_up(struct tegra_dpaux *dpaux)
+{
+ u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+
+ value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+}
+
+static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
+{
+ u32 value;
+
+ switch (function) {
+ case DPAUX_PADCTL_FUNC_AUX:
+ value = DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
+ DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_MODE_AUX;
+ break;
+
+ case DPAUX_PADCTL_FUNC_I2C:
+ value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
+ DPAUX_HYBRID_PADCTL_MODE_I2C;
+ break;
+
+ case DPAUX_PADCTL_FUNC_OFF:
+ tegra_dpaux_pad_power_down(dpaux);
+ return 0;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+ tegra_dpaux_pad_power_up(dpaux);
+
+ return 0;
+}
+
+#ifdef CONFIG_GENERIC_PINCONF
+static const struct pinctrl_pin_desc tegra_dpaux_pins[] = {
+ PINCTRL_PIN(0, "DP_AUX_CHx_P"),
+ PINCTRL_PIN(1, "DP_AUX_CHx_N"),
+};
+
+static const unsigned tegra_dpaux_pin_numbers[] = { 0, 1 };
+
+static const char * const tegra_dpaux_groups[] = {
+ "dpaux-io",
+};
+
+static const char * const tegra_dpaux_functions[] = {
+ "aux",
+ "i2c",
+ "off",
+};
+
+static int tegra_dpaux_get_groups_count(struct pinctrl_dev *pinctrl)
+{
+ return ARRAY_SIZE(tegra_dpaux_groups);
+}
+
+static const char *tegra_dpaux_get_group_name(struct pinctrl_dev *pinctrl,
+ unsigned int group)
+{
+ return tegra_dpaux_groups[group];
+}
+
+static int tegra_dpaux_get_group_pins(struct pinctrl_dev *pinctrl,
+ unsigned group, const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = tegra_dpaux_pin_numbers;
+ *num_pins = ARRAY_SIZE(tegra_dpaux_pin_numbers);
+
+ return 0;
+}
+
+static const struct pinctrl_ops tegra_dpaux_pinctrl_ops = {
+ .get_groups_count = tegra_dpaux_get_groups_count,
+ .get_group_name = tegra_dpaux_get_group_name,
+ .get_group_pins = tegra_dpaux_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int tegra_dpaux_get_functions_count(struct pinctrl_dev *pinctrl)
+{
+ return ARRAY_SIZE(tegra_dpaux_functions);
+}
+
+static const char *tegra_dpaux_get_function_name(struct pinctrl_dev *pinctrl,
+ unsigned int function)
+{
+ return tegra_dpaux_functions[function];
+}
+
+static int tegra_dpaux_get_function_groups(struct pinctrl_dev *pinctrl,
+ unsigned int function,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *num_groups = ARRAY_SIZE(tegra_dpaux_groups);
+ *groups = tegra_dpaux_groups;
+
+ return 0;
+}
+
+static int tegra_dpaux_set_mux(struct pinctrl_dev *pinctrl,
+ unsigned int function, unsigned int group)
+{
+ struct tegra_dpaux *dpaux = pinctrl_dev_get_drvdata(pinctrl);
+
+ return tegra_dpaux_pad_config(dpaux, function);
+}
+
+static const struct pinmux_ops tegra_dpaux_pinmux_ops = {
+ .get_functions_count = tegra_dpaux_get_functions_count,
+ .get_function_name = tegra_dpaux_get_function_name,
+ .get_function_groups = tegra_dpaux_get_function_groups,
+ .set_mux = tegra_dpaux_set_mux,
+};
+#endif
+
+static int tegra_dpaux_probe(struct platform_device *pdev)
+{
+ struct tegra_dpaux *dpaux;
+ struct resource *regs;
+ u32 value;
+ int err;
+
+ dpaux = devm_kzalloc(&pdev->dev, sizeof(*dpaux), GFP_KERNEL);
+ if (!dpaux)
+ return -ENOMEM;
+
+ dpaux->soc = of_device_get_match_data(&pdev->dev);
+ INIT_WORK(&dpaux->work, tegra_dpaux_hotplug);
+ init_completion(&dpaux->complete);
+ INIT_LIST_HEAD(&dpaux->list);
+ dpaux->dev = &pdev->dev;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dpaux->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(dpaux->regs))
+ return PTR_ERR(dpaux->regs);
+
+ dpaux->irq = platform_get_irq(pdev, 0);
+ if (dpaux->irq < 0)
+ return dpaux->irq;
+
+ if (!pdev->dev.pm_domain) {
+ dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
+ if (IS_ERR(dpaux->rst)) {
+ dev_err(&pdev->dev,
+ "failed to get reset control: %ld\n",
+ PTR_ERR(dpaux->rst));
+ return PTR_ERR(dpaux->rst);
+ }
+ }
+
+ dpaux->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dpaux->clk)) {
+ dev_err(&pdev->dev, "failed to get module clock: %ld\n",
+ PTR_ERR(dpaux->clk));
+ return PTR_ERR(dpaux->clk);
+ }
+
+ dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
+ if (IS_ERR(dpaux->clk_parent)) {
+ dev_err(&pdev->dev, "failed to get parent clock: %ld\n",
+ PTR_ERR(dpaux->clk_parent));
+ return PTR_ERR(dpaux->clk_parent);
+ }
+
+ err = clk_set_rate(dpaux->clk_parent, 270000000);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set clock to 270 MHz: %d\n",
+ err);
+ return err;
+ }
+
+ dpaux->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
+ if (IS_ERR(dpaux->vdd)) {
+ if (PTR_ERR(dpaux->vdd) != -ENODEV) {
+ if (PTR_ERR(dpaux->vdd) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to get VDD supply: %ld\n",
+ PTR_ERR(dpaux->vdd));
+
+ return PTR_ERR(dpaux->vdd);
+ }
+
+ dpaux->vdd = NULL;
+ }
+
+ platform_set_drvdata(pdev, dpaux);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
+ dev_name(dpaux->dev), dpaux);
+ if (err < 0) {
+ dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
+ dpaux->irq, err);
+ return err;
+ }
+
+ disable_irq(dpaux->irq);
+
+ dpaux->aux.transfer = tegra_dpaux_transfer;
+ dpaux->aux.dev = &pdev->dev;
+
+ drm_dp_aux_init(&dpaux->aux);
+
+ /*
+ * Assume that by default the DPAUX/I2C pads will be used for HDMI,
+ * so power them up and configure them in I2C mode.
+ *
+ * The DPAUX code paths reconfigure the pads in AUX mode, but there
+ * is no possibility to perform the I2C mode configuration in the
+ * HDMI path.
+ */
+ err = tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_I2C);
+ if (err < 0)
+ return err;
+
+#ifdef CONFIG_GENERIC_PINCONF
+ dpaux->desc.name = dev_name(&pdev->dev);
+ dpaux->desc.pins = tegra_dpaux_pins;
+ dpaux->desc.npins = ARRAY_SIZE(tegra_dpaux_pins);
+ dpaux->desc.pctlops = &tegra_dpaux_pinctrl_ops;
+ dpaux->desc.pmxops = &tegra_dpaux_pinmux_ops;
+ dpaux->desc.owner = THIS_MODULE;
+
+ dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
+ if (IS_ERR(dpaux->pinctrl)) {
+ dev_err(&pdev->dev, "failed to register pincontrol\n");
+ return PTR_ERR(dpaux->pinctrl);
+ }
+#endif
+ /* enable and clear all interrupts */
+ value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
+ DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
+ tegra_dpaux_writel(dpaux, value, DPAUX_INTR_EN_AUX);
+ tegra_dpaux_writel(dpaux, value, DPAUX_INTR_AUX);
+
+ mutex_lock(&dpaux_lock);
+ list_add_tail(&dpaux->list, &dpaux_list);
+ mutex_unlock(&dpaux_lock);
+
+ err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux);
+ if (err < 0) {
+ dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_dpaux_remove(struct platform_device *pdev)
+{
+ struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&dpaux->work);
+
+ /* make sure pads are powered down when not in use */
+ tegra_dpaux_pad_power_down(dpaux);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ mutex_lock(&dpaux_lock);
+ list_del(&dpaux->list);
+ mutex_unlock(&dpaux_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_dpaux_suspend(struct device *dev)
+{
+ struct tegra_dpaux *dpaux = dev_get_drvdata(dev);
+ int err = 0;
+
+ if (dpaux->rst) {
+ err = reset_control_assert(dpaux->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(dpaux->clk_parent);
+ clk_disable_unprepare(dpaux->clk);
+
+ return err;
+}
+
+static int tegra_dpaux_resume(struct device *dev)
+{
+ struct tegra_dpaux *dpaux = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(dpaux->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(dpaux->clk_parent);
+ if (err < 0) {
+ dev_err(dev, "failed to enable parent clock: %d\n", err);
+ goto disable_clk;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (dpaux->rst) {
+ err = reset_control_deassert(dpaux->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_parent;
+ }
+
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+
+disable_parent:
+ clk_disable_unprepare(dpaux->clk_parent);
+disable_clk:
+ clk_disable_unprepare(dpaux->clk);
+ return err;
+}
+#endif
+
+static const struct dev_pm_ops tegra_dpaux_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_dpaux_suspend, tegra_dpaux_resume, NULL)
+};
+
+static const struct tegra_dpaux_soc tegra124_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x18,
+};
+
+static const struct tegra_dpaux_soc tegra210_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x30,
+};
+
+static const struct tegra_dpaux_soc tegra194_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x2c,
+};
+
+static const struct of_device_id tegra_dpaux_of_match[] = {
+ { .compatible = "nvidia,tegra194-dpaux", .data = &tegra194_dpaux_soc },
+ { .compatible = "nvidia,tegra186-dpaux", .data = &tegra210_dpaux_soc },
+ { .compatible = "nvidia,tegra210-dpaux", .data = &tegra210_dpaux_soc },
+ { .compatible = "nvidia,tegra124-dpaux", .data = &tegra124_dpaux_soc },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match);
+
+struct platform_driver tegra_dpaux_driver = {
+ .driver = {
+ .name = "tegra-dpaux",
+ .of_match_table = tegra_dpaux_of_match,
+ .pm = &tegra_dpaux_pm_ops,
+ },
+ .probe = tegra_dpaux_probe,
+ .remove = tegra_dpaux_remove,
+};
+
+struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np)
+{
+ struct tegra_dpaux *dpaux;
+
+ mutex_lock(&dpaux_lock);
+
+ list_for_each_entry(dpaux, &dpaux_list, list)
+ if (np == dpaux->dev->of_node) {
+ mutex_unlock(&dpaux_lock);
+ return &dpaux->aux;
+ }
+
+ mutex_unlock(&dpaux_lock);
+
+ return NULL;
+}
+
+int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output)
+{
+ struct tegra_dpaux *dpaux = to_dpaux(aux);
+ unsigned long timeout;
+ int err;
+
+ aux->drm_dev = output->connector.dev;
+ err = drm_dp_aux_register(aux);
+ if (err < 0)
+ return err;
+
+ output->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ dpaux->output = output;
+
+ if (output->panel) {
+ enum drm_connector_status status;
+
+ if (dpaux->vdd) {
+ err = regulator_enable(dpaux->vdd);
+ if (err < 0)
+ return err;
+ }
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ status = drm_dp_aux_detect(aux);
+
+ if (status == connector_status_connected)
+ break;
+
+ usleep_range(1000, 2000);
+ }
+
+ if (status != connector_status_connected)
+ return -ETIMEDOUT;
+ }
+
+ enable_irq(dpaux->irq);
+ return 0;
+}
+
+int drm_dp_aux_detach(struct drm_dp_aux *aux)
+{
+ struct tegra_dpaux *dpaux = to_dpaux(aux);
+ unsigned long timeout;
+ int err;
+
+ drm_dp_aux_unregister(aux);
+ disable_irq(dpaux->irq);
+
+ if (dpaux->output->panel) {
+ enum drm_connector_status status;
+
+ if (dpaux->vdd) {
+ err = regulator_disable(dpaux->vdd);
+ if (err < 0)
+ return err;
+ }
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ status = drm_dp_aux_detect(aux);
+
+ if (status == connector_status_disconnected)
+ break;
+
+ usleep_range(1000, 2000);
+ }
+
+ if (status != connector_status_disconnected)
+ return -ETIMEDOUT;
+
+ dpaux->output = NULL;
+ }
+
+ return 0;
+}
+
+enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
+{
+ struct tegra_dpaux *dpaux = to_dpaux(aux);
+ u32 value;
+
+ value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT);
+
+ if (value & DPAUX_DP_AUXSTAT_HPD_STATUS)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+int drm_dp_aux_enable(struct drm_dp_aux *aux)
+{
+ struct tegra_dpaux *dpaux = to_dpaux(aux);
+
+ return tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_AUX);
+}
+
+int drm_dp_aux_disable(struct drm_dp_aux *aux)
+{
+ struct tegra_dpaux *dpaux = to_dpaux(aux);
+
+ tegra_dpaux_pad_power_down(dpaux);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/dpaux.h b/drivers/gpu/drm/tegra/dpaux.h
new file mode 100644
index 000000000..5eced10fa
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dpaux.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ */
+
+#ifndef DRM_TEGRA_DPAUX_H
+#define DRM_TEGRA_DPAUX_H
+
+#define DPAUX_CTXSW 0x00
+
+#define DPAUX_INTR_EN_AUX 0x01
+#define DPAUX_INTR_AUX 0x05
+#define DPAUX_INTR_AUX_DONE (1 << 3)
+#define DPAUX_INTR_IRQ_EVENT (1 << 2)
+#define DPAUX_INTR_UNPLUG_EVENT (1 << 1)
+#define DPAUX_INTR_PLUG_EVENT (1 << 0)
+
+#define DPAUX_DP_AUXDATA_WRITE(x) (0x09 + ((x) << 2))
+#define DPAUX_DP_AUXDATA_READ(x) (0x19 + ((x) << 2))
+#define DPAUX_DP_AUXADDR 0x29
+
+#define DPAUX_DP_AUXCTL 0x2d
+#define DPAUX_DP_AUXCTL_TRANSACTREQ (1 << 16)
+#define DPAUX_DP_AUXCTL_CMD_AUX_RD (9 << 12)
+#define DPAUX_DP_AUXCTL_CMD_AUX_WR (8 << 12)
+#define DPAUX_DP_AUXCTL_CMD_MOT_RQ (6 << 12)
+#define DPAUX_DP_AUXCTL_CMD_MOT_RD (5 << 12)
+#define DPAUX_DP_AUXCTL_CMD_MOT_WR (4 << 12)
+#define DPAUX_DP_AUXCTL_CMD_I2C_RQ (2 << 12)
+#define DPAUX_DP_AUXCTL_CMD_I2C_RD (1 << 12)
+#define DPAUX_DP_AUXCTL_CMD_I2C_WR (0 << 12)
+#define DPAUX_DP_AUXCTL_CMD_ADDRESS_ONLY (1 << 8)
+#define DPAUX_DP_AUXCTL_CMDLEN(x) ((x) & 0xff)
+
+#define DPAUX_DP_AUXSTAT 0x31
+#define DPAUX_DP_AUXSTAT_HPD_STATUS (1 << 28)
+#define DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK (0xf0000)
+#define DPAUX_DP_AUXSTAT_NO_STOP_ERROR (1 << 11)
+#define DPAUX_DP_AUXSTAT_SINKSTAT_ERROR (1 << 10)
+#define DPAUX_DP_AUXSTAT_RX_ERROR (1 << 9)
+#define DPAUX_DP_AUXSTAT_TIMEOUT_ERROR (1 << 8)
+#define DPAUX_DP_AUXSTAT_REPLY_MASK (0xff)
+
+#define DPAUX_DP_AUX_SINKSTAT_LO 0x35
+#define DPAUX_DP_AUX_SINKSTAT_HI 0x39
+
+#define DPAUX_HPD_CONFIG 0x3d
+#define DPAUX_HPD_CONFIG_UNPLUG_MIN_TIME(x) (((x) & 0xffff) << 16)
+#define DPAUX_HPD_CONFIG_PLUG_MIN_TIME(x) ((x) & 0xffff)
+
+#define DPAUX_HPD_IRQ_CONFIG 0x41
+#define DPAUX_HPD_IRQ_CONFIG_MIN_LOW_TIME(x) ((x) & 0xffff)
+
+#define DPAUX_DP_AUX_CONFIG 0x45
+
+#define DPAUX_HYBRID_PADCTL 0x49
+#define DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV (1 << 15)
+#define DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV (1 << 14)
+#define DPAUX_HYBRID_PADCTL_AUX_CMH(x) (((x) & 0x3) << 12)
+#define DPAUX_HYBRID_PADCTL_AUX_DRVZ(x) (((x) & 0x7) << 8)
+#define DPAUX_HYBRID_PADCTL_AUX_DRVI(x) (((x) & 0x3f) << 2)
+#define DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV (1 << 1)
+#define DPAUX_HYBRID_PADCTL_MODE_I2C (1 << 0)
+#define DPAUX_HYBRID_PADCTL_MODE_AUX (0 << 0)
+
+#define DPAUX_HYBRID_SPARE 0x4d
+#define DPAUX_HYBRID_SPARE_PAD_POWER_DOWN (1 << 0)
+
+#define DPAUX_SCRATCH_REG0 0x51
+#define DPAUX_SCRATCH_REG1 0x55
+#define DPAUX_SCRATCH_REG2 0x59
+
+#endif
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
new file mode 100644
index 000000000..5fc55b977
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -0,0 +1,1447 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/host1x.h>
+#include <linux/idr.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_vblank.h>
+
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#endif
+
+#include "dc.h"
+#include "drm.h"
+#include "gem.h"
+#include "uapi.h"
+
+#define DRIVER_NAME "tegra"
+#define DRIVER_DESC "NVIDIA Tegra graphics"
+#define DRIVER_DATE "20120330"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#define CARVEOUT_SZ SZ_64M
+#define CDMA_GATHER_FETCHES_MAX_NB 16383
+
+static int tegra_atomic_check(struct drm_device *drm,
+ struct drm_atomic_state *state)
+{
+ int err;
+
+ err = drm_atomic_helper_check(drm, state);
+ if (err < 0)
+ return err;
+
+ return tegra_display_hub_atomic_check(drm, state);
+}
+
+static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
+ .fb_create = tegra_fb_create,
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
+#endif
+ .atomic_check = tegra_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void tegra_atomic_post_commit(struct drm_device *drm,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc_state *old_crtc_state __maybe_unused;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
+ tegra_crtc_atomic_post_commit(crtc, old_state);
+}
+
+static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+ struct drm_device *drm = old_state->dev;
+ struct tegra_drm *tegra = drm->dev_private;
+
+ if (tegra->hub) {
+ bool fence_cookie = dma_fence_begin_signalling();
+
+ drm_atomic_helper_commit_modeset_disables(drm, old_state);
+ tegra_display_hub_atomic_commit(drm, old_state);
+ drm_atomic_helper_commit_planes(drm, old_state, 0);
+ drm_atomic_helper_commit_modeset_enables(drm, old_state);
+ drm_atomic_helper_commit_hw_done(old_state);
+ dma_fence_end_signalling(fence_cookie);
+ drm_atomic_helper_wait_for_vblanks(drm, old_state);
+ drm_atomic_helper_cleanup_planes(drm, old_state);
+ } else {
+ drm_atomic_helper_commit_tail_rpm(old_state);
+ }
+
+ tegra_atomic_post_commit(drm, old_state);
+}
+
+static const struct drm_mode_config_helper_funcs
+tegra_drm_mode_config_helpers = {
+ .atomic_commit_tail = tegra_atomic_commit_tail,
+};
+
+static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
+{
+ struct tegra_drm_file *fpriv;
+
+ fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+ if (!fpriv)
+ return -ENOMEM;
+
+ idr_init_base(&fpriv->legacy_contexts, 1);
+ xa_init_flags(&fpriv->contexts, XA_FLAGS_ALLOC1);
+ xa_init(&fpriv->syncpoints);
+ mutex_init(&fpriv->lock);
+ filp->driver_priv = fpriv;
+
+ return 0;
+}
+
+static void tegra_drm_context_free(struct tegra_drm_context *context)
+{
+ context->client->ops->close_channel(context);
+ pm_runtime_put(context->client->base.dev);
+ kfree(context);
+}
+
+static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
+ struct drm_tegra_reloc __user *src,
+ struct drm_device *drm,
+ struct drm_file *file)
+{
+ u32 cmdbuf, target;
+ int err;
+
+ err = get_user(cmdbuf, &src->cmdbuf.handle);
+ if (err < 0)
+ return err;
+
+ err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
+ if (err < 0)
+ return err;
+
+ err = get_user(target, &src->target.handle);
+ if (err < 0)
+ return err;
+
+ err = get_user(dest->target.offset, &src->target.offset);
+ if (err < 0)
+ return err;
+
+ err = get_user(dest->shift, &src->shift);
+ if (err < 0)
+ return err;
+
+ dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
+
+ dest->cmdbuf.bo = tegra_gem_lookup(file, cmdbuf);
+ if (!dest->cmdbuf.bo)
+ return -ENOENT;
+
+ dest->target.bo = tegra_gem_lookup(file, target);
+ if (!dest->target.bo)
+ return -ENOENT;
+
+ return 0;
+}
+
+int tegra_drm_submit(struct tegra_drm_context *context,
+ struct drm_tegra_submit *args, struct drm_device *drm,
+ struct drm_file *file)
+{
+ struct host1x_client *client = &context->client->base;
+ unsigned int num_cmdbufs = args->num_cmdbufs;
+ unsigned int num_relocs = args->num_relocs;
+ struct drm_tegra_cmdbuf __user *user_cmdbufs;
+ struct drm_tegra_reloc __user *user_relocs;
+ struct drm_tegra_syncpt __user *user_syncpt;
+ struct drm_tegra_syncpt syncpt;
+ struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
+ struct drm_gem_object **refs;
+ struct host1x_syncpt *sp = NULL;
+ struct host1x_job *job;
+ unsigned int num_refs;
+ int err;
+
+ user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
+ user_relocs = u64_to_user_ptr(args->relocs);
+ user_syncpt = u64_to_user_ptr(args->syncpts);
+
+ /* We don't yet support other than one syncpt_incr struct per submit */
+ if (args->num_syncpts != 1)
+ return -EINVAL;
+
+ /* We don't yet support waitchks */
+ if (args->num_waitchks != 0)
+ return -EINVAL;
+
+ job = host1x_job_alloc(context->channel, args->num_cmdbufs,
+ args->num_relocs, false);
+ if (!job)
+ return -ENOMEM;
+
+ job->num_relocs = args->num_relocs;
+ job->client = client;
+ job->class = client->class;
+ job->serialize = true;
+ job->syncpt_recovery = true;
+
+ /*
+ * Track referenced BOs so that they can be unreferenced after the
+ * submission is complete.
+ */
+ num_refs = num_cmdbufs + num_relocs * 2;
+
+ refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
+ if (!refs) {
+ err = -ENOMEM;
+ goto put;
+ }
+
+ /* reuse as an iterator later */
+ num_refs = 0;
+
+ while (num_cmdbufs) {
+ struct drm_tegra_cmdbuf cmdbuf;
+ struct host1x_bo *bo;
+ struct tegra_bo *obj;
+ u64 offset;
+
+ if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) {
+ err = -EFAULT;
+ goto fail;
+ }
+
+ /*
+ * The maximum number of CDMA gather fetches is 16383, a higher
+ * value means the words count is malformed.
+ */
+ if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ bo = tegra_gem_lookup(file, cmdbuf.handle);
+ if (!bo) {
+ err = -ENOENT;
+ goto fail;
+ }
+
+ offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
+ obj = host1x_to_tegra_bo(bo);
+ refs[num_refs++] = &obj->gem;
+
+ /*
+ * Gather buffer base address must be 4-bytes aligned,
+ * unaligned offset is malformed and cause commands stream
+ * corruption on the buffer address relocation.
+ */
+ if (offset & 3 || offset > obj->gem.size) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
+ num_cmdbufs--;
+ user_cmdbufs++;
+ }
+
+ /* copy and resolve relocations from submit */
+ while (num_relocs--) {
+ struct host1x_reloc *reloc;
+ struct tegra_bo *obj;
+
+ err = host1x_reloc_copy_from_user(&job->relocs[num_relocs],
+ &user_relocs[num_relocs], drm,
+ file);
+ if (err < 0)
+ goto fail;
+
+ reloc = &job->relocs[num_relocs];
+ obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
+ refs[num_refs++] = &obj->gem;
+
+ /*
+ * The unaligned cmdbuf offset will cause an unaligned write
+ * during of the relocations patching, corrupting the commands
+ * stream.
+ */
+ if (reloc->cmdbuf.offset & 3 ||
+ reloc->cmdbuf.offset >= obj->gem.size) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ obj = host1x_to_tegra_bo(reloc->target.bo);
+ refs[num_refs++] = &obj->gem;
+
+ if (reloc->target.offset >= obj->gem.size) {
+ err = -EINVAL;
+ goto fail;
+ }
+ }
+
+ if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
+ err = -EFAULT;
+ goto fail;
+ }
+
+ /* Syncpoint ref will be dropped on job release. */
+ sp = host1x_syncpt_get_by_id(host1x, syncpt.id);
+ if (!sp) {
+ err = -ENOENT;
+ goto fail;
+ }
+
+ job->is_addr_reg = context->client->ops->is_addr_reg;
+ job->is_valid_class = context->client->ops->is_valid_class;
+ job->syncpt_incrs = syncpt.incrs;
+ job->syncpt = sp;
+ job->timeout = 10000;
+
+ if (args->timeout && args->timeout < 10000)
+ job->timeout = args->timeout;
+
+ err = host1x_job_pin(job, context->client->base.dev);
+ if (err)
+ goto fail;
+
+ err = host1x_job_submit(job);
+ if (err) {
+ host1x_job_unpin(job);
+ goto fail;
+ }
+
+ args->fence = job->syncpt_end;
+
+fail:
+ while (num_refs--)
+ drm_gem_object_put(refs[num_refs]);
+
+ kfree(refs);
+
+put:
+ host1x_job_put(job);
+ return err;
+}
+
+
+#ifdef CONFIG_DRM_TEGRA_STAGING
+static int tegra_gem_create(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_create *args = data;
+ struct tegra_bo *bo;
+
+ bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
+ &args->handle);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ return 0;
+}
+
+static int tegra_gem_mmap(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_mmap *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ gem = drm_gem_object_lookup(file, args->handle);
+ if (!gem)
+ return -EINVAL;
+
+ bo = to_tegra_bo(gem);
+
+ args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
+
+ drm_gem_object_put(gem);
+
+ return 0;
+}
+
+static int tegra_syncpt_read(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct host1x *host = dev_get_drvdata(drm->dev->parent);
+ struct drm_tegra_syncpt_read *args = data;
+ struct host1x_syncpt *sp;
+
+ sp = host1x_syncpt_get_by_id_noref(host, args->id);
+ if (!sp)
+ return -EINVAL;
+
+ args->value = host1x_syncpt_read_min(sp);
+ return 0;
+}
+
+static int tegra_syncpt_incr(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
+ struct drm_tegra_syncpt_incr *args = data;
+ struct host1x_syncpt *sp;
+
+ sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
+ if (!sp)
+ return -EINVAL;
+
+ return host1x_syncpt_incr(sp);
+}
+
+static int tegra_syncpt_wait(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
+ struct drm_tegra_syncpt_wait *args = data;
+ struct host1x_syncpt *sp;
+
+ sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
+ if (!sp)
+ return -EINVAL;
+
+ return host1x_syncpt_wait(sp, args->thresh,
+ msecs_to_jiffies(args->timeout),
+ &args->value);
+}
+
+static int tegra_client_open(struct tegra_drm_file *fpriv,
+ struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ int err;
+
+ err = pm_runtime_resume_and_get(client->base.dev);
+ if (err)
+ return err;
+
+ err = client->ops->open_channel(client, context);
+ if (err < 0) {
+ pm_runtime_put(client->base.dev);
+ return err;
+ }
+
+ err = idr_alloc(&fpriv->legacy_contexts, context, 1, 0, GFP_KERNEL);
+ if (err < 0) {
+ client->ops->close_channel(context);
+ pm_runtime_put(client->base.dev);
+ return err;
+ }
+
+ context->client = client;
+ context->id = err;
+
+ return 0;
+}
+
+static int tegra_open_channel(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct tegra_drm *tegra = drm->dev_private;
+ struct drm_tegra_open_channel *args = data;
+ struct tegra_drm_context *context;
+ struct tegra_drm_client *client;
+ int err = -ENODEV;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ mutex_lock(&fpriv->lock);
+
+ list_for_each_entry(client, &tegra->clients, list)
+ if (client->base.class == args->client) {
+ err = tegra_client_open(fpriv, client, context);
+ if (err < 0)
+ break;
+
+ args->context = context->id;
+ break;
+ }
+
+ if (err < 0)
+ kfree(context);
+
+ mutex_unlock(&fpriv->lock);
+ return err;
+}
+
+static int tegra_close_channel(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_close_channel *args = data;
+ struct tegra_drm_context *context;
+ int err = 0;
+
+ mutex_lock(&fpriv->lock);
+
+ context = idr_find(&fpriv->legacy_contexts, args->context);
+ if (!context) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ idr_remove(&fpriv->legacy_contexts, context->id);
+ tegra_drm_context_free(context);
+
+unlock:
+ mutex_unlock(&fpriv->lock);
+ return err;
+}
+
+static int tegra_get_syncpt(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_get_syncpt *args = data;
+ struct tegra_drm_context *context;
+ struct host1x_syncpt *syncpt;
+ int err = 0;
+
+ mutex_lock(&fpriv->lock);
+
+ context = idr_find(&fpriv->legacy_contexts, args->context);
+ if (!context) {
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ if (args->index >= context->client->base.num_syncpts) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ syncpt = context->client->base.syncpts[args->index];
+ args->id = host1x_syncpt_id(syncpt);
+
+unlock:
+ mutex_unlock(&fpriv->lock);
+ return err;
+}
+
+static int tegra_submit(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_submit *args = data;
+ struct tegra_drm_context *context;
+ int err;
+
+ mutex_lock(&fpriv->lock);
+
+ context = idr_find(&fpriv->legacy_contexts, args->context);
+ if (!context) {
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ err = context->client->ops->submit(context, args, drm, file);
+
+unlock:
+ mutex_unlock(&fpriv->lock);
+ return err;
+}
+
+static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_get_syncpt_base *args = data;
+ struct tegra_drm_context *context;
+ struct host1x_syncpt_base *base;
+ struct host1x_syncpt *syncpt;
+ int err = 0;
+
+ mutex_lock(&fpriv->lock);
+
+ context = idr_find(&fpriv->legacy_contexts, args->context);
+ if (!context) {
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ if (args->syncpt >= context->client->base.num_syncpts) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ syncpt = context->client->base.syncpts[args->syncpt];
+
+ base = host1x_syncpt_get_base(syncpt);
+ if (!base) {
+ err = -ENXIO;
+ goto unlock;
+ }
+
+ args->id = host1x_syncpt_base_id(base);
+
+unlock:
+ mutex_unlock(&fpriv->lock);
+ return err;
+}
+
+static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_set_tiling *args = data;
+ enum tegra_bo_tiling_mode mode;
+ struct drm_gem_object *gem;
+ unsigned long value = 0;
+ struct tegra_bo *bo;
+
+ switch (args->mode) {
+ case DRM_TEGRA_GEM_TILING_MODE_PITCH:
+ mode = TEGRA_BO_TILING_MODE_PITCH;
+
+ if (args->value != 0)
+ return -EINVAL;
+
+ break;
+
+ case DRM_TEGRA_GEM_TILING_MODE_TILED:
+ mode = TEGRA_BO_TILING_MODE_TILED;
+
+ if (args->value != 0)
+ return -EINVAL;
+
+ break;
+
+ case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
+ mode = TEGRA_BO_TILING_MODE_BLOCK;
+
+ if (args->value > 5)
+ return -EINVAL;
+
+ value = args->value;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ gem = drm_gem_object_lookup(file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+
+ bo->tiling.mode = mode;
+ bo->tiling.value = value;
+
+ drm_gem_object_put(gem);
+
+ return 0;
+}
+
+static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_get_tiling *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+ int err = 0;
+
+ gem = drm_gem_object_lookup(file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+
+ switch (bo->tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
+ args->value = 0;
+ break;
+
+ case TEGRA_BO_TILING_MODE_TILED:
+ args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
+ args->value = 0;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
+ args->value = bo->tiling.value;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ drm_gem_object_put(gem);
+
+ return err;
+}
+
+static int tegra_gem_set_flags(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_set_flags *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
+ return -EINVAL;
+
+ gem = drm_gem_object_lookup(file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+ bo->flags = 0;
+
+ if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
+ bo->flags |= TEGRA_BO_BOTTOM_UP;
+
+ drm_gem_object_put(gem);
+
+ return 0;
+}
+
+static int tegra_gem_get_flags(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_get_flags *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ gem = drm_gem_object_lookup(file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+ args->flags = 0;
+
+ if (bo->flags & TEGRA_BO_BOTTOM_UP)
+ args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
+
+ drm_gem_object_put(gem);
+
+ return 0;
+}
+#endif
+
+static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
+#ifdef CONFIG_DRM_TEGRA_STAGING
+ DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_OPEN, tegra_drm_ioctl_channel_open,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_CLOSE, tegra_drm_ioctl_channel_close,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_MAP, tegra_drm_ioctl_channel_map,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_UNMAP, tegra_drm_ioctl_channel_unmap,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_SUBMIT, tegra_drm_ioctl_channel_submit,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_ALLOCATE, tegra_drm_ioctl_syncpoint_allocate,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_FREE, tegra_drm_ioctl_syncpoint_free,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_WAIT, tegra_drm_ioctl_syncpoint_wait,
+ DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
+ DRM_RENDER_ALLOW),
+#endif
+};
+
+static const struct file_operations tegra_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = tegra_drm_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .compat_ioctl = drm_compat_ioctl,
+ .llseek = noop_llseek,
+};
+
+static int tegra_drm_context_cleanup(int id, void *p, void *data)
+{
+ struct tegra_drm_context *context = p;
+
+ tegra_drm_context_free(context);
+
+ return 0;
+}
+
+static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+
+ mutex_lock(&fpriv->lock);
+ idr_for_each(&fpriv->legacy_contexts, tegra_drm_context_cleanup, NULL);
+ tegra_drm_uapi_close_file(fpriv);
+ mutex_unlock(&fpriv->lock);
+
+ idr_destroy(&fpriv->legacy_contexts);
+ mutex_destroy(&fpriv->lock);
+ kfree(fpriv);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct drm_device *drm = node->minor->dev;
+ struct drm_framebuffer *fb;
+
+ mutex_lock(&drm->mode_config.fb_lock);
+
+ list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
+ seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
+ fb->base.id, fb->width, fb->height,
+ fb->format->depth,
+ fb->format->cpp[0] * 8,
+ drm_framebuffer_read_refcount(fb));
+ }
+
+ mutex_unlock(&drm->mode_config.fb_lock);
+
+ return 0;
+}
+
+static int tegra_debugfs_iova(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)s->private;
+ struct drm_device *drm = node->minor->dev;
+ struct tegra_drm *tegra = drm->dev_private;
+ struct drm_printer p = drm_seq_file_printer(s);
+
+ if (tegra->domain) {
+ mutex_lock(&tegra->mm_lock);
+ drm_mm_print(&tegra->mm, &p);
+ mutex_unlock(&tegra->mm_lock);
+ }
+
+ return 0;
+}
+
+static struct drm_info_list tegra_debugfs_list[] = {
+ { "framebuffers", tegra_debugfs_framebuffers, 0 },
+ { "iova", tegra_debugfs_iova, 0 },
+};
+
+static void tegra_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list),
+ minor->debugfs_root, minor);
+}
+#endif
+
+static const struct drm_driver tegra_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM |
+ DRIVER_ATOMIC | DRIVER_RENDER | DRIVER_SYNCOBJ,
+ .open = tegra_drm_open,
+ .postclose = tegra_drm_postclose,
+ .lastclose = drm_fb_helper_lastclose,
+
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = tegra_debugfs_init,
+#endif
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = tegra_gem_prime_import,
+
+ .dumb_create = tegra_bo_dumb_create,
+
+ .ioctls = tegra_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
+ .fops = &tegra_drm_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+int tegra_drm_register_client(struct tegra_drm *tegra,
+ struct tegra_drm_client *client)
+{
+ /*
+ * When MLOCKs are implemented, change to allocate a shared channel
+ * only when MLOCKs are disabled.
+ */
+ client->shared_channel = host1x_channel_request(&client->base);
+ if (!client->shared_channel)
+ return -EBUSY;
+
+ mutex_lock(&tegra->clients_lock);
+ list_add_tail(&client->list, &tegra->clients);
+ client->drm = tegra;
+ mutex_unlock(&tegra->clients_lock);
+
+ return 0;
+}
+
+int tegra_drm_unregister_client(struct tegra_drm *tegra,
+ struct tegra_drm_client *client)
+{
+ mutex_lock(&tegra->clients_lock);
+ list_del_init(&client->list);
+ client->drm = NULL;
+ mutex_unlock(&tegra->clients_lock);
+
+ if (client->shared_channel)
+ host1x_channel_put(client->shared_channel);
+
+ return 0;
+}
+
+int host1x_client_iommu_attach(struct host1x_client *client)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
+ struct drm_device *drm = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = drm->dev_private;
+ struct iommu_group *group = NULL;
+ int err;
+
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+ if (client->dev->archdata.mapping) {
+ struct dma_iommu_mapping *mapping =
+ to_dma_iommu_mapping(client->dev);
+ arm_iommu_detach_device(client->dev);
+ arm_iommu_release_mapping(mapping);
+
+ domain = iommu_get_domain_for_dev(client->dev);
+ }
+#endif
+
+ /*
+ * If the host1x client is already attached to an IOMMU domain that is
+ * not the shared IOMMU domain, don't try to attach it to a different
+ * domain. This allows using the IOMMU-backed DMA API.
+ */
+ if (domain && domain != tegra->domain)
+ return 0;
+
+ if (tegra->domain) {
+ group = iommu_group_get(client->dev);
+ if (!group)
+ return -ENODEV;
+
+ if (domain != tegra->domain) {
+ err = iommu_attach_group(tegra->domain, group);
+ if (err < 0) {
+ iommu_group_put(group);
+ return err;
+ }
+ }
+
+ tegra->use_explicit_iommu = true;
+ }
+
+ client->group = group;
+
+ return 0;
+}
+
+void host1x_client_iommu_detach(struct host1x_client *client)
+{
+ struct drm_device *drm = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = drm->dev_private;
+ struct iommu_domain *domain;
+
+ if (client->group) {
+ /*
+ * Devices that are part of the same group may no longer be
+ * attached to a domain at this point because their group may
+ * have been detached by an earlier client.
+ */
+ domain = iommu_get_domain_for_dev(client->dev);
+ if (domain)
+ iommu_detach_group(tegra->domain, client->group);
+
+ iommu_group_put(client->group);
+ client->group = NULL;
+ }
+}
+
+void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
+{
+ struct iova *alloc;
+ void *virt;
+ gfp_t gfp;
+ int err;
+
+ if (tegra->domain)
+ size = iova_align(&tegra->carveout.domain, size);
+ else
+ size = PAGE_ALIGN(size);
+
+ gfp = GFP_KERNEL | __GFP_ZERO;
+ if (!tegra->domain) {
+ /*
+ * Many units only support 32-bit addresses, even on 64-bit
+ * SoCs. If there is no IOMMU to translate into a 32-bit IO
+ * virtual address space, force allocations to be in the
+ * lower 32-bit range.
+ */
+ gfp |= GFP_DMA;
+ }
+
+ virt = (void *)__get_free_pages(gfp, get_order(size));
+ if (!virt)
+ return ERR_PTR(-ENOMEM);
+
+ if (!tegra->domain) {
+ /*
+ * If IOMMU is disabled, devices address physical memory
+ * directly.
+ */
+ *dma = virt_to_phys(virt);
+ return virt;
+ }
+
+ alloc = alloc_iova(&tegra->carveout.domain,
+ size >> tegra->carveout.shift,
+ tegra->carveout.limit, true);
+ if (!alloc) {
+ err = -EBUSY;
+ goto free_pages;
+ }
+
+ *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
+ err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
+ size, IOMMU_READ | IOMMU_WRITE);
+ if (err < 0)
+ goto free_iova;
+
+ return virt;
+
+free_iova:
+ __free_iova(&tegra->carveout.domain, alloc);
+free_pages:
+ free_pages((unsigned long)virt, get_order(size));
+
+ return ERR_PTR(err);
+}
+
+void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
+ dma_addr_t dma)
+{
+ if (tegra->domain)
+ size = iova_align(&tegra->carveout.domain, size);
+ else
+ size = PAGE_ALIGN(size);
+
+ if (tegra->domain) {
+ iommu_unmap(tegra->domain, dma, size);
+ free_iova(&tegra->carveout.domain,
+ iova_pfn(&tegra->carveout.domain, dma));
+ }
+
+ free_pages((unsigned long)virt, get_order(size));
+}
+
+static bool host1x_drm_wants_iommu(struct host1x_device *dev)
+{
+ struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
+ struct iommu_domain *domain;
+
+ /* Our IOMMU usage policy doesn't currently play well with GART */
+ if (of_machine_is_compatible("nvidia,tegra20"))
+ return false;
+
+ /*
+ * If the Tegra DRM clients are backed by an IOMMU, push buffers are
+ * likely to be allocated beyond the 32-bit boundary if sufficient
+ * system memory is available. This is problematic on earlier Tegra
+ * generations where host1x supports a maximum of 32 address bits in
+ * the GATHER opcode. In this case, unless host1x is behind an IOMMU
+ * as well it won't be able to process buffers allocated beyond the
+ * 32-bit boundary.
+ *
+ * The DMA API will use bounce buffers in this case, so that could
+ * perhaps still be made to work, even if less efficient, but there
+ * is another catch: in order to perform cache maintenance on pages
+ * allocated for discontiguous buffers we need to map and unmap the
+ * SG table representing these buffers. This is fine for something
+ * small like a push buffer, but it exhausts the bounce buffer pool
+ * (typically on the order of a few MiB) for framebuffers (many MiB
+ * for any modern resolution).
+ *
+ * Work around this by making sure that Tegra DRM clients only use
+ * an IOMMU if the parent host1x also uses an IOMMU.
+ *
+ * Note that there's still a small gap here that we don't cover: if
+ * the DMA API is backed by an IOMMU there's no way to control which
+ * device is attached to an IOMMU and which isn't, except via wiring
+ * up the device tree appropriately. This is considered an problem
+ * of integration, so care must be taken for the DT to be consistent.
+ */
+ domain = iommu_get_domain_for_dev(dev->dev.parent);
+
+ /*
+ * Tegra20 and Tegra30 don't support addressing memory beyond the
+ * 32-bit boundary, so the regular GATHER opcodes will always be
+ * sufficient and whether or not the host1x is attached to an IOMMU
+ * doesn't matter.
+ */
+ if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32))
+ return true;
+
+ return domain != NULL;
+}
+
+static int host1x_drm_probe(struct host1x_device *dev)
+{
+ struct tegra_drm *tegra;
+ struct drm_device *drm;
+ int err;
+
+ drm = drm_dev_alloc(&tegra_drm_driver, &dev->dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
+ if (!tegra) {
+ err = -ENOMEM;
+ goto put;
+ }
+
+ if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) {
+ tegra->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!tegra->domain) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ err = iova_cache_get();
+ if (err < 0)
+ goto domain;
+ }
+
+ mutex_init(&tegra->clients_lock);
+ INIT_LIST_HEAD(&tegra->clients);
+
+ dev_set_drvdata(&dev->dev, drm);
+ drm->dev_private = tegra;
+ tegra->drm = drm;
+
+ drm_mode_config_init(drm);
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+ drm->mode_config.max_width = 0;
+ drm->mode_config.max_height = 0;
+
+ drm->mode_config.normalize_zpos = true;
+
+ drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
+
+ err = tegra_drm_fb_prepare(drm);
+ if (err < 0)
+ goto config;
+
+ drm_kms_helper_poll_init(drm);
+
+ err = host1x_device_init(dev);
+ if (err < 0)
+ goto fbdev;
+
+ /*
+ * Now that all display controller have been initialized, the maximum
+ * supported resolution is known and the bitmask for horizontal and
+ * vertical bitfields can be computed.
+ */
+ tegra->hmask = drm->mode_config.max_width - 1;
+ tegra->vmask = drm->mode_config.max_height - 1;
+
+ if (tegra->use_explicit_iommu) {
+ u64 carveout_start, carveout_end, gem_start, gem_end;
+ u64 dma_mask = dma_get_mask(&dev->dev);
+ dma_addr_t start, end;
+ unsigned long order;
+
+ start = tegra->domain->geometry.aperture_start & dma_mask;
+ end = tegra->domain->geometry.aperture_end & dma_mask;
+
+ gem_start = start;
+ gem_end = end - CARVEOUT_SZ;
+ carveout_start = gem_end + 1;
+ carveout_end = end;
+
+ order = __ffs(tegra->domain->pgsize_bitmap);
+ init_iova_domain(&tegra->carveout.domain, 1UL << order,
+ carveout_start >> order);
+
+ tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
+ tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
+
+ drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
+ mutex_init(&tegra->mm_lock);
+
+ DRM_DEBUG_DRIVER("IOMMU apertures:\n");
+ DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end);
+ DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start,
+ carveout_end);
+ } else if (tegra->domain) {
+ iommu_domain_free(tegra->domain);
+ tegra->domain = NULL;
+ iova_cache_put();
+ }
+
+ if (tegra->hub) {
+ err = tegra_display_hub_prepare(tegra->hub);
+ if (err < 0)
+ goto device;
+ }
+
+ /* syncpoints are used for full 32-bit hardware VBLANK counters */
+ drm->max_vblank_count = 0xffffffff;
+
+ err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (err < 0)
+ goto hub;
+
+ drm_mode_config_reset(drm);
+
+ err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
+ if (err < 0)
+ goto hub;
+
+ err = tegra_drm_fb_init(drm);
+ if (err < 0)
+ goto hub;
+
+ err = drm_dev_register(drm, 0);
+ if (err < 0)
+ goto fb;
+
+ return 0;
+
+fb:
+ tegra_drm_fb_exit(drm);
+hub:
+ if (tegra->hub)
+ tegra_display_hub_cleanup(tegra->hub);
+device:
+ if (tegra->domain) {
+ mutex_destroy(&tegra->mm_lock);
+ drm_mm_takedown(&tegra->mm);
+ put_iova_domain(&tegra->carveout.domain);
+ iova_cache_put();
+ }
+
+ host1x_device_exit(dev);
+fbdev:
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_free(drm);
+config:
+ drm_mode_config_cleanup(drm);
+domain:
+ if (tegra->domain)
+ iommu_domain_free(tegra->domain);
+free:
+ kfree(tegra);
+put:
+ drm_dev_put(drm);
+ return err;
+}
+
+static int host1x_drm_remove(struct host1x_device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(&dev->dev);
+ struct tegra_drm *tegra = drm->dev_private;
+ int err;
+
+ drm_dev_unregister(drm);
+
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_exit(drm);
+ drm_atomic_helper_shutdown(drm);
+ drm_mode_config_cleanup(drm);
+
+ if (tegra->hub)
+ tegra_display_hub_cleanup(tegra->hub);
+
+ err = host1x_device_exit(dev);
+ if (err < 0)
+ dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err);
+
+ if (tegra->domain) {
+ mutex_destroy(&tegra->mm_lock);
+ drm_mm_takedown(&tegra->mm);
+ put_iova_domain(&tegra->carveout.domain);
+ iova_cache_put();
+ iommu_domain_free(tegra->domain);
+ }
+
+ kfree(tegra);
+ drm_dev_put(drm);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int host1x_drm_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_suspend(drm);
+}
+
+static int host1x_drm_resume(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_resume(drm);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
+ host1x_drm_resume);
+
+static const struct of_device_id host1x_drm_subdevs[] = {
+ { .compatible = "nvidia,tegra20-dc", },
+ { .compatible = "nvidia,tegra20-hdmi", },
+ { .compatible = "nvidia,tegra20-gr2d", },
+ { .compatible = "nvidia,tegra20-gr3d", },
+ { .compatible = "nvidia,tegra30-dc", },
+ { .compatible = "nvidia,tegra30-hdmi", },
+ { .compatible = "nvidia,tegra30-gr2d", },
+ { .compatible = "nvidia,tegra30-gr3d", },
+ { .compatible = "nvidia,tegra114-dc", },
+ { .compatible = "nvidia,tegra114-dsi", },
+ { .compatible = "nvidia,tegra114-hdmi", },
+ { .compatible = "nvidia,tegra114-gr2d", },
+ { .compatible = "nvidia,tegra114-gr3d", },
+ { .compatible = "nvidia,tegra124-dc", },
+ { .compatible = "nvidia,tegra124-sor", },
+ { .compatible = "nvidia,tegra124-hdmi", },
+ { .compatible = "nvidia,tegra124-dsi", },
+ { .compatible = "nvidia,tegra124-vic", },
+ { .compatible = "nvidia,tegra132-dsi", },
+ { .compatible = "nvidia,tegra210-dc", },
+ { .compatible = "nvidia,tegra210-dsi", },
+ { .compatible = "nvidia,tegra210-sor", },
+ { .compatible = "nvidia,tegra210-sor1", },
+ { .compatible = "nvidia,tegra210-vic", },
+ { .compatible = "nvidia,tegra210-nvdec", },
+ { .compatible = "nvidia,tegra186-display", },
+ { .compatible = "nvidia,tegra186-dc", },
+ { .compatible = "nvidia,tegra186-sor", },
+ { .compatible = "nvidia,tegra186-sor1", },
+ { .compatible = "nvidia,tegra186-vic", },
+ { .compatible = "nvidia,tegra186-nvdec", },
+ { .compatible = "nvidia,tegra194-display", },
+ { .compatible = "nvidia,tegra194-dc", },
+ { .compatible = "nvidia,tegra194-sor", },
+ { .compatible = "nvidia,tegra194-vic", },
+ { .compatible = "nvidia,tegra194-nvdec", },
+ { .compatible = "nvidia,tegra234-vic", },
+ { /* sentinel */ }
+};
+
+static struct host1x_driver host1x_drm_driver = {
+ .driver = {
+ .name = "drm",
+ .pm = &host1x_drm_pm_ops,
+ },
+ .probe = host1x_drm_probe,
+ .remove = host1x_drm_remove,
+ .subdevs = host1x_drm_subdevs,
+};
+
+static struct platform_driver * const drivers[] = {
+ &tegra_display_hub_driver,
+ &tegra_dc_driver,
+ &tegra_hdmi_driver,
+ &tegra_dsi_driver,
+ &tegra_dpaux_driver,
+ &tegra_sor_driver,
+ &tegra_gr2d_driver,
+ &tegra_gr3d_driver,
+ &tegra_vic_driver,
+ &tegra_nvdec_driver,
+};
+
+static int __init host1x_drm_init(void)
+{
+ int err;
+
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ err = host1x_driver_register(&host1x_drm_driver);
+ if (err < 0)
+ return err;
+
+ err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+ if (err < 0)
+ goto unregister_host1x;
+
+ return 0;
+
+unregister_host1x:
+ host1x_driver_unregister(&host1x_drm_driver);
+ return err;
+}
+module_init(host1x_drm_init);
+
+static void __exit host1x_drm_exit(void)
+{
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+ host1x_driver_unregister(&host1x_drm_driver);
+}
+module_exit(host1x_drm_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
new file mode 100644
index 000000000..845e60f14
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef HOST1X_DRM_H
+#define HOST1X_DRM_H 1
+
+#include <linux/host1x.h>
+#include <linux/iova.h>
+#include <linux/gpio/consumer.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_probe_helper.h>
+#include <uapi/drm/tegra_drm.h>
+
+#include "gem.h"
+#include "hub.h"
+#include "trace.h"
+
+/* XXX move to include/uapi/drm/drm_fourcc.h? */
+#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22)
+
+struct reset_control;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+struct tegra_fbdev {
+ struct drm_fb_helper base;
+ struct drm_framebuffer *fb;
+};
+#endif
+
+struct tegra_drm {
+ struct drm_device *drm;
+
+ struct iommu_domain *domain;
+ bool use_explicit_iommu;
+ struct mutex mm_lock;
+ struct drm_mm mm;
+
+ struct {
+ struct iova_domain domain;
+ unsigned long shift;
+ unsigned long limit;
+ } carveout;
+
+ struct mutex clients_lock;
+ struct list_head clients;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ struct tegra_fbdev *fbdev;
+#endif
+
+ unsigned int hmask, vmask;
+ unsigned int pitch_align;
+ unsigned int num_crtcs;
+
+ struct tegra_display_hub *hub;
+};
+
+static inline struct host1x *tegra_drm_to_host1x(struct tegra_drm *tegra)
+{
+ return dev_get_drvdata(tegra->drm->dev->parent);
+}
+
+struct tegra_drm_client;
+
+struct tegra_drm_context {
+ struct tegra_drm_client *client;
+ struct host1x_channel *channel;
+
+ /* Only used by legacy UAPI. */
+ unsigned int id;
+
+ /* Only used by new UAPI. */
+ struct xarray mappings;
+ struct host1x_memory_context *memory_context;
+};
+
+struct tegra_drm_client_ops {
+ int (*open_channel)(struct tegra_drm_client *client,
+ struct tegra_drm_context *context);
+ void (*close_channel)(struct tegra_drm_context *context);
+ int (*is_addr_reg)(struct device *dev, u32 class, u32 offset);
+ int (*is_valid_class)(u32 class);
+ int (*submit)(struct tegra_drm_context *context,
+ struct drm_tegra_submit *args, struct drm_device *drm,
+ struct drm_file *file);
+ int (*get_streamid_offset)(struct tegra_drm_client *client, u32 *offset);
+ int (*can_use_memory_ctx)(struct tegra_drm_client *client, bool *supported);
+};
+
+int tegra_drm_submit(struct tegra_drm_context *context,
+ struct drm_tegra_submit *args, struct drm_device *drm,
+ struct drm_file *file);
+
+static inline int
+tegra_drm_get_streamid_offset_thi(struct tegra_drm_client *client, u32 *offset)
+{
+ *offset = 0x30;
+
+ return 0;
+}
+
+struct tegra_drm_client {
+ struct host1x_client base;
+ struct list_head list;
+ struct tegra_drm *drm;
+ struct host1x_channel *shared_channel;
+
+ /* Set by driver */
+ unsigned int version;
+ const struct tegra_drm_client_ops *ops;
+};
+
+static inline struct tegra_drm_client *
+host1x_to_drm_client(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_drm_client, base);
+}
+
+int tegra_drm_register_client(struct tegra_drm *tegra,
+ struct tegra_drm_client *client);
+int tegra_drm_unregister_client(struct tegra_drm *tegra,
+ struct tegra_drm_client *client);
+int host1x_client_iommu_attach(struct host1x_client *client);
+void host1x_client_iommu_detach(struct host1x_client *client);
+
+int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
+int tegra_drm_exit(struct tegra_drm *tegra);
+
+void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *iova);
+void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
+ dma_addr_t iova);
+
+struct cec_notifier;
+
+struct tegra_output {
+ struct device_node *of_node;
+ struct device *dev;
+
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ struct i2c_adapter *ddc;
+ const struct edid *edid;
+ struct cec_notifier *cec;
+ unsigned int hpd_irq;
+ struct gpio_desc *hpd_gpio;
+
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+static inline struct tegra_output *encoder_to_output(struct drm_encoder *e)
+{
+ return container_of(e, struct tegra_output, encoder);
+}
+
+static inline struct tegra_output *connector_to_output(struct drm_connector *c)
+{
+ return container_of(c, struct tegra_output, connector);
+}
+
+/* from output.c */
+int tegra_output_probe(struct tegra_output *output);
+void tegra_output_remove(struct tegra_output *output);
+int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
+void tegra_output_exit(struct tegra_output *output);
+void tegra_output_find_possible_crtcs(struct tegra_output *output,
+ struct drm_device *drm);
+int tegra_output_suspend(struct tegra_output *output);
+int tegra_output_resume(struct tegra_output *output);
+
+int tegra_output_connector_get_modes(struct drm_connector *connector);
+enum drm_connector_status
+tegra_output_connector_detect(struct drm_connector *connector, bool force);
+void tegra_output_connector_destroy(struct drm_connector *connector);
+
+/* from dpaux.c */
+struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
+enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
+int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output);
+int drm_dp_aux_detach(struct drm_dp_aux *aux);
+int drm_dp_aux_enable(struct drm_dp_aux *aux);
+int drm_dp_aux_disable(struct drm_dp_aux *aux);
+
+/* from fb.c */
+struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
+ unsigned int index);
+bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
+int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
+ struct tegra_bo_tiling *tiling);
+struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
+ struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *cmd);
+int tegra_drm_fb_prepare(struct drm_device *drm);
+void tegra_drm_fb_free(struct drm_device *drm);
+int tegra_drm_fb_init(struct drm_device *drm);
+void tegra_drm_fb_exit(struct drm_device *drm);
+
+extern struct platform_driver tegra_display_hub_driver;
+extern struct platform_driver tegra_dc_driver;
+extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_dsi_driver;
+extern struct platform_driver tegra_dpaux_driver;
+extern struct platform_driver tegra_sor_driver;
+extern struct platform_driver tegra_gr2d_driver;
+extern struct platform_driver tegra_gr3d_driver;
+extern struct platform_driver tegra_vic_driver;
+extern struct platform_driver tegra_nvdec_driver;
+
+#endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
new file mode 100644
index 000000000..de1333dc0
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -0,0 +1,1700 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "dc.h"
+#include "drm.h"
+#include "dsi.h"
+#include "mipi-phy.h"
+#include "trace.h"
+
+struct tegra_dsi_state {
+ struct drm_connector_state base;
+
+ struct mipi_dphy_timing timing;
+ unsigned long period;
+
+ unsigned int vrefresh;
+ unsigned int lanes;
+ unsigned long pclk;
+ unsigned long bclk;
+
+ enum tegra_dsi_format format;
+ unsigned int mul;
+ unsigned int div;
+};
+
+static inline struct tegra_dsi_state *
+to_dsi_state(struct drm_connector_state *state)
+{
+ return container_of(state, struct tegra_dsi_state, base);
+}
+
+struct tegra_dsi {
+ struct host1x_client client;
+ struct tegra_output output;
+ struct device *dev;
+
+ void __iomem *regs;
+
+ struct reset_control *rst;
+ struct clk *clk_parent;
+ struct clk *clk_lp;
+ struct clk *clk;
+
+ struct drm_info_list *debugfs_files;
+
+ unsigned long flags;
+ enum mipi_dsi_pixel_format format;
+ unsigned int lanes;
+
+ struct tegra_mipi_device *mipi;
+ struct mipi_dsi_host host;
+
+ struct regulator *vdd;
+
+ unsigned int video_fifo_depth;
+ unsigned int host_fifo_depth;
+
+ /* for ganged-mode support */
+ struct tegra_dsi *master;
+ struct tegra_dsi *slave;
+};
+
+static inline struct tegra_dsi *
+host1x_client_to_dsi(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_dsi, client);
+}
+
+static inline struct tegra_dsi *host_to_tegra(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct tegra_dsi, host);
+}
+
+static inline struct tegra_dsi *to_dsi(struct tegra_output *output)
+{
+ return container_of(output, struct tegra_dsi, output);
+}
+
+static struct tegra_dsi_state *tegra_dsi_get_state(struct tegra_dsi *dsi)
+{
+ return to_dsi_state(dsi->output.connector.state);
+}
+
+static inline u32 tegra_dsi_readl(struct tegra_dsi *dsi, unsigned int offset)
+{
+ u32 value = readl(dsi->regs + (offset << 2));
+
+ trace_dsi_readl(dsi->dev, offset, value);
+
+ return value;
+}
+
+static inline void tegra_dsi_writel(struct tegra_dsi *dsi, u32 value,
+ unsigned int offset)
+{
+ trace_dsi_writel(dsi->dev, offset, value);
+ writel(value, dsi->regs + (offset << 2));
+}
+
+#define DEBUGFS_REG32(_name) { .name = #_name, .offset = _name }
+
+static const struct debugfs_reg32 tegra_dsi_regs[] = {
+ DEBUGFS_REG32(DSI_INCR_SYNCPT),
+ DEBUGFS_REG32(DSI_INCR_SYNCPT_CONTROL),
+ DEBUGFS_REG32(DSI_INCR_SYNCPT_ERROR),
+ DEBUGFS_REG32(DSI_CTXSW),
+ DEBUGFS_REG32(DSI_RD_DATA),
+ DEBUGFS_REG32(DSI_WR_DATA),
+ DEBUGFS_REG32(DSI_POWER_CONTROL),
+ DEBUGFS_REG32(DSI_INT_ENABLE),
+ DEBUGFS_REG32(DSI_INT_STATUS),
+ DEBUGFS_REG32(DSI_INT_MASK),
+ DEBUGFS_REG32(DSI_HOST_CONTROL),
+ DEBUGFS_REG32(DSI_CONTROL),
+ DEBUGFS_REG32(DSI_SOL_DELAY),
+ DEBUGFS_REG32(DSI_MAX_THRESHOLD),
+ DEBUGFS_REG32(DSI_TRIGGER),
+ DEBUGFS_REG32(DSI_TX_CRC),
+ DEBUGFS_REG32(DSI_STATUS),
+ DEBUGFS_REG32(DSI_INIT_SEQ_CONTROL),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_0),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_1),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_2),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_3),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_4),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_5),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_6),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_7),
+ DEBUGFS_REG32(DSI_PKT_SEQ_0_LO),
+ DEBUGFS_REG32(DSI_PKT_SEQ_0_HI),
+ DEBUGFS_REG32(DSI_PKT_SEQ_1_LO),
+ DEBUGFS_REG32(DSI_PKT_SEQ_1_HI),
+ DEBUGFS_REG32(DSI_PKT_SEQ_2_LO),
+ DEBUGFS_REG32(DSI_PKT_SEQ_2_HI),
+ DEBUGFS_REG32(DSI_PKT_SEQ_3_LO),
+ DEBUGFS_REG32(DSI_PKT_SEQ_3_HI),
+ DEBUGFS_REG32(DSI_PKT_SEQ_4_LO),
+ DEBUGFS_REG32(DSI_PKT_SEQ_4_HI),
+ DEBUGFS_REG32(DSI_PKT_SEQ_5_LO),
+ DEBUGFS_REG32(DSI_PKT_SEQ_5_HI),
+ DEBUGFS_REG32(DSI_DCS_CMDS),
+ DEBUGFS_REG32(DSI_PKT_LEN_0_1),
+ DEBUGFS_REG32(DSI_PKT_LEN_2_3),
+ DEBUGFS_REG32(DSI_PKT_LEN_4_5),
+ DEBUGFS_REG32(DSI_PKT_LEN_6_7),
+ DEBUGFS_REG32(DSI_PHY_TIMING_0),
+ DEBUGFS_REG32(DSI_PHY_TIMING_1),
+ DEBUGFS_REG32(DSI_PHY_TIMING_2),
+ DEBUGFS_REG32(DSI_BTA_TIMING),
+ DEBUGFS_REG32(DSI_TIMEOUT_0),
+ DEBUGFS_REG32(DSI_TIMEOUT_1),
+ DEBUGFS_REG32(DSI_TO_TALLY),
+ DEBUGFS_REG32(DSI_PAD_CONTROL_0),
+ DEBUGFS_REG32(DSI_PAD_CONTROL_CD),
+ DEBUGFS_REG32(DSI_PAD_CD_STATUS),
+ DEBUGFS_REG32(DSI_VIDEO_MODE_CONTROL),
+ DEBUGFS_REG32(DSI_PAD_CONTROL_1),
+ DEBUGFS_REG32(DSI_PAD_CONTROL_2),
+ DEBUGFS_REG32(DSI_PAD_CONTROL_3),
+ DEBUGFS_REG32(DSI_PAD_CONTROL_4),
+ DEBUGFS_REG32(DSI_GANGED_MODE_CONTROL),
+ DEBUGFS_REG32(DSI_GANGED_MODE_START),
+ DEBUGFS_REG32(DSI_GANGED_MODE_SIZE),
+ DEBUGFS_REG32(DSI_RAW_DATA_BYTE_COUNT),
+ DEBUGFS_REG32(DSI_ULTRA_LOW_POWER_CONTROL),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_8),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_9),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_10),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_11),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_12),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_13),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_14),
+ DEBUGFS_REG32(DSI_INIT_SEQ_DATA_15),
+};
+
+static int tegra_dsi_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_dsi *dsi = node->info_ent->data;
+ struct drm_crtc *crtc = dsi->output.encoder.crtc;
+ struct drm_device *drm = node->minor->dev;
+ unsigned int i;
+ int err = 0;
+
+ drm_modeset_lock_all(drm);
+
+ if (!crtc || !crtc->state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_dsi_regs); i++) {
+ unsigned int offset = tegra_dsi_regs[i].offset;
+
+ seq_printf(s, "%-32s %#05x %08x\n", tegra_dsi_regs[i].name,
+ offset, tegra_dsi_readl(dsi, offset));
+ }
+
+unlock:
+ drm_modeset_unlock_all(drm);
+ return err;
+}
+
+static struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_dsi_show_regs, 0, NULL },
+};
+
+static int tegra_dsi_late_register(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ unsigned int i, count = ARRAY_SIZE(debugfs_files);
+ struct drm_minor *minor = connector->dev->primary;
+ struct dentry *root = connector->debugfs_entry;
+ struct tegra_dsi *dsi = to_dsi(output);
+
+ dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!dsi->debugfs_files)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++)
+ dsi->debugfs_files[i].data = dsi;
+
+ drm_debugfs_create_files(dsi->debugfs_files, count, root, minor);
+
+ return 0;
+}
+
+static void tegra_dsi_early_unregister(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ unsigned int count = ARRAY_SIZE(debugfs_files);
+ struct tegra_dsi *dsi = to_dsi(output);
+
+ drm_debugfs_remove_files(dsi->debugfs_files, count,
+ connector->dev->primary);
+ kfree(dsi->debugfs_files);
+ dsi->debugfs_files = NULL;
+}
+
+#define PKT_ID0(id) ((((id) & 0x3f) << 3) | (1 << 9))
+#define PKT_LEN0(len) (((len) & 0x07) << 0)
+#define PKT_ID1(id) ((((id) & 0x3f) << 13) | (1 << 19))
+#define PKT_LEN1(len) (((len) & 0x07) << 10)
+#define PKT_ID2(id) ((((id) & 0x3f) << 23) | (1 << 29))
+#define PKT_LEN2(len) (((len) & 0x07) << 20)
+
+#define PKT_LP (1 << 30)
+#define NUM_PKT_SEQ 12
+
+/*
+ * non-burst mode with sync pulses
+ */
+static const u32 pkt_seq_video_non_burst_sync_pulses[NUM_PKT_SEQ] = {
+ [ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+ PKT_LP,
+ [ 1] = 0,
+ [ 2] = PKT_ID0(MIPI_DSI_V_SYNC_END) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+ PKT_LP,
+ [ 3] = 0,
+ [ 4] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+ PKT_LP,
+ [ 5] = 0,
+ [ 6] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0),
+ [ 7] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) |
+ PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) |
+ PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4),
+ [ 8] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0) |
+ PKT_LP,
+ [ 9] = 0,
+ [10] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(1) |
+ PKT_ID2(MIPI_DSI_H_SYNC_END) | PKT_LEN2(0),
+ [11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(2) |
+ PKT_ID1(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN1(3) |
+ PKT_ID2(MIPI_DSI_BLANKING_PACKET) | PKT_LEN2(4),
+};
+
+/*
+ * non-burst mode with sync events
+ */
+static const u32 pkt_seq_video_non_burst_sync_events[NUM_PKT_SEQ] = {
+ [ 0] = PKT_ID0(MIPI_DSI_V_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
+ PKT_LP,
+ [ 1] = 0,
+ [ 2] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
+ PKT_LP,
+ [ 3] = 0,
+ [ 4] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
+ PKT_LP,
+ [ 5] = 0,
+ [ 6] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(2) |
+ PKT_ID2(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN2(3),
+ [ 7] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(4),
+ [ 8] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_END_OF_TRANSMISSION) | PKT_LEN1(7) |
+ PKT_LP,
+ [ 9] = 0,
+ [10] = PKT_ID0(MIPI_DSI_H_SYNC_START) | PKT_LEN0(0) |
+ PKT_ID1(MIPI_DSI_BLANKING_PACKET) | PKT_LEN1(2) |
+ PKT_ID2(MIPI_DSI_PACKED_PIXEL_STREAM_24) | PKT_LEN2(3),
+ [11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(4),
+};
+
+static const u32 pkt_seq_command_mode[NUM_PKT_SEQ] = {
+ [ 0] = 0,
+ [ 1] = 0,
+ [ 2] = 0,
+ [ 3] = 0,
+ [ 4] = 0,
+ [ 5] = 0,
+ [ 6] = PKT_ID0(MIPI_DSI_DCS_LONG_WRITE) | PKT_LEN0(3) | PKT_LP,
+ [ 7] = 0,
+ [ 8] = 0,
+ [ 9] = 0,
+ [10] = PKT_ID0(MIPI_DSI_DCS_LONG_WRITE) | PKT_LEN0(5) | PKT_LP,
+ [11] = 0,
+};
+
+static void tegra_dsi_set_phy_timing(struct tegra_dsi *dsi,
+ unsigned long period,
+ const struct mipi_dphy_timing *timing)
+{
+ u32 value;
+
+ value = DSI_TIMING_FIELD(timing->hsexit, period, 1) << 24 |
+ DSI_TIMING_FIELD(timing->hstrail, period, 0) << 16 |
+ DSI_TIMING_FIELD(timing->hszero, period, 3) << 8 |
+ DSI_TIMING_FIELD(timing->hsprepare, period, 1);
+ tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_0);
+
+ value = DSI_TIMING_FIELD(timing->clktrail, period, 1) << 24 |
+ DSI_TIMING_FIELD(timing->clkpost, period, 1) << 16 |
+ DSI_TIMING_FIELD(timing->clkzero, period, 1) << 8 |
+ DSI_TIMING_FIELD(timing->lpx, period, 1);
+ tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_1);
+
+ value = DSI_TIMING_FIELD(timing->clkprepare, period, 1) << 16 |
+ DSI_TIMING_FIELD(timing->clkpre, period, 1) << 8 |
+ DSI_TIMING_FIELD(0xff * period, period, 0) << 0;
+ tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_2);
+
+ value = DSI_TIMING_FIELD(timing->taget, period, 1) << 16 |
+ DSI_TIMING_FIELD(timing->tasure, period, 1) << 8 |
+ DSI_TIMING_FIELD(timing->tago, period, 1);
+ tegra_dsi_writel(dsi, value, DSI_BTA_TIMING);
+
+ if (dsi->slave)
+ tegra_dsi_set_phy_timing(dsi->slave, period, timing);
+}
+
+static int tegra_dsi_get_muldiv(enum mipi_dsi_pixel_format format,
+ unsigned int *mulp, unsigned int *divp)
+{
+ switch (format) {
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ case MIPI_DSI_FMT_RGB888:
+ *mulp = 3;
+ *divp = 1;
+ break;
+
+ case MIPI_DSI_FMT_RGB565:
+ *mulp = 2;
+ *divp = 1;
+ break;
+
+ case MIPI_DSI_FMT_RGB666:
+ *mulp = 9;
+ *divp = 4;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tegra_dsi_get_format(enum mipi_dsi_pixel_format format,
+ enum tegra_dsi_format *fmt)
+{
+ switch (format) {
+ case MIPI_DSI_FMT_RGB888:
+ *fmt = TEGRA_DSI_FORMAT_24P;
+ break;
+
+ case MIPI_DSI_FMT_RGB666:
+ *fmt = TEGRA_DSI_FORMAT_18NP;
+ break;
+
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ *fmt = TEGRA_DSI_FORMAT_18P;
+ break;
+
+ case MIPI_DSI_FMT_RGB565:
+ *fmt = TEGRA_DSI_FORMAT_16P;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void tegra_dsi_ganged_enable(struct tegra_dsi *dsi, unsigned int start,
+ unsigned int size)
+{
+ u32 value;
+
+ tegra_dsi_writel(dsi, start, DSI_GANGED_MODE_START);
+ tegra_dsi_writel(dsi, size << 16 | size, DSI_GANGED_MODE_SIZE);
+
+ value = DSI_GANGED_MODE_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_GANGED_MODE_CONTROL);
+}
+
+static void tegra_dsi_enable(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value |= DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ if (dsi->slave)
+ tegra_dsi_enable(dsi->slave);
+}
+
+static unsigned int tegra_dsi_get_lanes(struct tegra_dsi *dsi)
+{
+ if (dsi->master)
+ return dsi->master->lanes + dsi->lanes;
+
+ if (dsi->slave)
+ return dsi->lanes + dsi->slave->lanes;
+
+ return dsi->lanes;
+}
+
+static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
+ const struct drm_display_mode *mode)
+{
+ unsigned int hact, hsw, hbp, hfp, i, mul, div;
+ struct tegra_dsi_state *state;
+ const u32 *pkt_seq;
+ u32 value;
+
+ /* XXX: pass in state into this function? */
+ if (dsi->master)
+ state = tegra_dsi_get_state(dsi->master);
+ else
+ state = tegra_dsi_get_state(dsi);
+
+ mul = state->mul;
+ div = state->div;
+
+ if (dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
+ DRM_DEBUG_KMS("Non-burst video mode with sync pulses\n");
+ pkt_seq = pkt_seq_video_non_burst_sync_pulses;
+ } else if (dsi->flags & MIPI_DSI_MODE_VIDEO) {
+ DRM_DEBUG_KMS("Non-burst video mode with sync events\n");
+ pkt_seq = pkt_seq_video_non_burst_sync_events;
+ } else {
+ DRM_DEBUG_KMS("Command mode\n");
+ pkt_seq = pkt_seq_command_mode;
+ }
+
+ value = DSI_CONTROL_CHANNEL(0) |
+ DSI_CONTROL_FORMAT(state->format) |
+ DSI_CONTROL_LANES(dsi->lanes - 1) |
+ DSI_CONTROL_SOURCE(pipe);
+ tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+ tegra_dsi_writel(dsi, dsi->video_fifo_depth, DSI_MAX_THRESHOLD);
+
+ value = DSI_HOST_CONTROL_HS;
+ tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+
+ value = tegra_dsi_readl(dsi, DSI_CONTROL);
+
+ if (dsi->flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ value |= DSI_CONTROL_HS_CLK_CTRL;
+
+ value &= ~DSI_CONTROL_TX_TRIG(3);
+
+ /* enable DCS commands for command mode */
+ if (dsi->flags & MIPI_DSI_MODE_VIDEO)
+ value &= ~DSI_CONTROL_DCS_ENABLE;
+ else
+ value |= DSI_CONTROL_DCS_ENABLE;
+
+ value |= DSI_CONTROL_VIDEO_ENABLE;
+ value &= ~DSI_CONTROL_HOST_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+ for (i = 0; i < NUM_PKT_SEQ; i++)
+ tegra_dsi_writel(dsi, pkt_seq[i], DSI_PKT_SEQ_0_LO + i);
+
+ if (dsi->flags & MIPI_DSI_MODE_VIDEO) {
+ /* horizontal active pixels */
+ hact = mode->hdisplay * mul / div;
+
+ /* horizontal sync width */
+ hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
+
+ /* horizontal back porch */
+ hbp = (mode->htotal - mode->hsync_end) * mul / div;
+
+ if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0)
+ hbp += hsw;
+
+ /* horizontal front porch */
+ hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
+
+ /* subtract packet overhead */
+ hsw -= 10;
+ hbp -= 14;
+ hfp -= 8;
+
+ tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
+ tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
+ tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
+ tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
+
+ /* set SOL delay (for non-burst mode only) */
+ tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
+
+ /* TODO: implement ganged mode */
+ } else {
+ u16 bytes;
+
+ if (dsi->master || dsi->slave) {
+ /*
+ * For ganged mode, assume symmetric left-right mode.
+ */
+ bytes = 1 + (mode->hdisplay / 2) * mul / div;
+ } else {
+ /* 1 byte (DCS command) + pixel data */
+ bytes = 1 + mode->hdisplay * mul / div;
+ }
+
+ tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_0_1);
+ tegra_dsi_writel(dsi, bytes << 16, DSI_PKT_LEN_2_3);
+ tegra_dsi_writel(dsi, bytes << 16, DSI_PKT_LEN_4_5);
+ tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_6_7);
+
+ value = MIPI_DCS_WRITE_MEMORY_START << 8 |
+ MIPI_DCS_WRITE_MEMORY_CONTINUE;
+ tegra_dsi_writel(dsi, value, DSI_DCS_CMDS);
+
+ /* set SOL delay */
+ if (dsi->master || dsi->slave) {
+ unsigned long delay, bclk, bclk_ganged;
+ unsigned int lanes = state->lanes;
+
+ /* SOL to valid, valid to FIFO and FIFO write delay */
+ delay = 4 + 4 + 2;
+ delay = DIV_ROUND_UP(delay * mul, div * lanes);
+ /* FIFO read delay */
+ delay = delay + 6;
+
+ bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes);
+ bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes);
+ value = bclk - bclk_ganged + delay + 20;
+ } else {
+ /* TODO: revisit for non-ganged mode */
+ value = 8 * mul / div;
+ }
+
+ tegra_dsi_writel(dsi, value, DSI_SOL_DELAY);
+ }
+
+ if (dsi->slave) {
+ tegra_dsi_configure(dsi->slave, pipe, mode);
+
+ /*
+ * TODO: Support modes other than symmetrical left-right
+ * split.
+ */
+ tegra_dsi_ganged_enable(dsi, 0, mode->hdisplay / 2);
+ tegra_dsi_ganged_enable(dsi->slave, mode->hdisplay / 2,
+ mode->hdisplay / 2);
+ }
+}
+
+static int tegra_dsi_wait_idle(struct tegra_dsi *dsi, unsigned long timeout)
+{
+ u32 value;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_dsi_readl(dsi, DSI_STATUS);
+ if (value & DSI_STATUS_IDLE)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void tegra_dsi_video_disable(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ value = tegra_dsi_readl(dsi, DSI_CONTROL);
+ value &= ~DSI_CONTROL_VIDEO_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+ if (dsi->slave)
+ tegra_dsi_video_disable(dsi->slave);
+}
+
+static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi)
+{
+ tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_START);
+ tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_SIZE);
+ tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
+}
+
+static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
+
+ return 0;
+}
+
+static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
+{
+ u32 value;
+ int err;
+
+ /*
+ * XXX Is this still needed? The module reset is deasserted right
+ * before this function is called.
+ */
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
+
+ /* start calibration */
+ tegra_dsi_pad_enable(dsi);
+
+ value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
+ DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
+ DSI_PAD_OUT_CLK(0x0);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
+
+ value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
+ DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
+
+ err = tegra_mipi_start_calibration(dsi->mipi);
+ if (err < 0)
+ return err;
+
+ return tegra_mipi_finish_calibration(dsi->mipi);
+}
+
+static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
+ unsigned int vrefresh)
+{
+ unsigned int timeout;
+ u32 value;
+
+ /* one frame high-speed transmission timeout */
+ timeout = (bclk / vrefresh) / 512;
+ value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
+ tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
+
+ /* 2 ms peripheral timeout for panel */
+ timeout = 2 * bclk / 512 * 1000;
+ value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
+ tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
+
+ value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
+ tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
+
+ if (dsi->slave)
+ tegra_dsi_set_timeout(dsi->slave, bclk, vrefresh);
+}
+
+static void tegra_dsi_disable(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ if (dsi->slave) {
+ tegra_dsi_ganged_disable(dsi->slave);
+ tegra_dsi_ganged_disable(dsi);
+ }
+
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value &= ~DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ if (dsi->slave)
+ tegra_dsi_disable(dsi->slave);
+
+ usleep_range(5000, 10000);
+}
+
+static void tegra_dsi_soft_reset(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value &= ~DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ usleep_range(300, 1000);
+
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value |= DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ usleep_range(300, 1000);
+
+ value = tegra_dsi_readl(dsi, DSI_TRIGGER);
+ if (value)
+ tegra_dsi_writel(dsi, 0, DSI_TRIGGER);
+
+ if (dsi->slave)
+ tegra_dsi_soft_reset(dsi->slave);
+}
+
+static void tegra_dsi_connector_reset(struct drm_connector *connector)
+{
+ struct tegra_dsi_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return;
+
+ if (connector->state) {
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+ kfree(connector->state);
+ }
+
+ __drm_atomic_helper_connector_reset(connector, &state->base);
+}
+
+static struct drm_connector_state *
+tegra_dsi_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct tegra_dsi_state *state = to_dsi_state(connector->state);
+ struct tegra_dsi_state *copy;
+
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector,
+ &copy->base);
+
+ return &copy->base;
+}
+
+static const struct drm_connector_funcs tegra_dsi_connector_funcs = {
+ .reset = tegra_dsi_connector_reset,
+ .detect = tegra_output_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = tegra_output_connector_destroy,
+ .atomic_duplicate_state = tegra_dsi_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = tegra_dsi_late_register,
+ .early_unregister = tegra_dsi_early_unregister,
+};
+
+static enum drm_mode_status
+tegra_dsi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = {
+ .get_modes = tegra_output_connector_get_modes,
+ .mode_valid = tegra_dsi_connector_mode_valid,
+};
+
+static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
+{
+ int err;
+
+ if (dsi->slave)
+ tegra_dsi_unprepare(dsi->slave);
+
+ err = tegra_mipi_disable(dsi->mipi);
+ if (err < 0)
+ dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n",
+ err);
+
+ err = host1x_client_suspend(&dsi->client);
+ if (err < 0)
+ dev_err(dsi->dev, "failed to suspend: %d\n", err);
+}
+
+static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_dsi *dsi = to_dsi(output);
+ u32 value;
+ int err;
+
+ if (output->panel)
+ drm_panel_disable(output->panel);
+
+ tegra_dsi_video_disable(dsi);
+
+ /*
+ * The following accesses registers of the display controller, so make
+ * sure it's only executed when the output is attached to one.
+ */
+ if (dc) {
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~DSI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+ }
+
+ err = tegra_dsi_wait_idle(dsi, 100);
+ if (err < 0)
+ dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err);
+
+ tegra_dsi_soft_reset(dsi);
+
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
+
+ tegra_dsi_disable(dsi);
+
+ tegra_dsi_unprepare(dsi);
+}
+
+static int tegra_dsi_prepare(struct tegra_dsi *dsi)
+{
+ int err;
+
+ err = host1x_client_resume(&dsi->client);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to resume: %d\n", err);
+ return err;
+ }
+
+ err = tegra_mipi_enable(dsi->mipi);
+ if (err < 0)
+ dev_err(dsi->dev, "failed to enable MIPI calibration: %d\n",
+ err);
+
+ err = tegra_dsi_pad_calibrate(dsi);
+ if (err < 0)
+ dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
+
+ if (dsi->slave)
+ tegra_dsi_prepare(dsi->slave);
+
+ return 0;
+}
+
+static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_dsi *dsi = to_dsi(output);
+ struct tegra_dsi_state *state;
+ u32 value;
+ int err;
+
+ err = tegra_dsi_prepare(dsi);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to prepare: %d\n", err);
+ return;
+ }
+
+ state = tegra_dsi_get_state(dsi);
+
+ tegra_dsi_set_timeout(dsi, state->bclk, state->vrefresh);
+
+ /*
+ * The D-PHY timing fields are expressed in byte-clock cycles, so
+ * multiply the period by 8.
+ */
+ tegra_dsi_set_phy_timing(dsi, state->period * 8, &state->timing);
+
+ if (output->panel)
+ drm_panel_prepare(output->panel);
+
+ tegra_dsi_configure(dsi, dc->pipe, mode);
+
+ /* enable display controller */
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= DSI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+
+ /* enable DSI controller */
+ tegra_dsi_enable(dsi);
+
+ if (output->panel)
+ drm_panel_enable(output->panel);
+}
+
+static int
+tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dsi_state *state = to_dsi_state(conn_state);
+ struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
+ struct tegra_dsi *dsi = to_dsi(output);
+ unsigned int scdiv;
+ unsigned long plld;
+ int err;
+
+ state->pclk = crtc_state->mode.clock * 1000;
+
+ err = tegra_dsi_get_muldiv(dsi->format, &state->mul, &state->div);
+ if (err < 0)
+ return err;
+
+ state->lanes = tegra_dsi_get_lanes(dsi);
+
+ err = tegra_dsi_get_format(dsi->format, &state->format);
+ if (err < 0)
+ return err;
+
+ state->vrefresh = drm_mode_vrefresh(&crtc_state->mode);
+
+ /* compute byte clock */
+ state->bclk = (state->pclk * state->mul) / (state->div * state->lanes);
+
+ DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", state->mul, state->div,
+ state->lanes);
+ DRM_DEBUG_KMS("format: %u, vrefresh: %u\n", state->format,
+ state->vrefresh);
+ DRM_DEBUG_KMS("bclk: %lu\n", state->bclk);
+
+ /*
+ * Compute bit clock and round up to the next MHz.
+ */
+ plld = DIV_ROUND_UP(state->bclk * 8, USEC_PER_SEC) * USEC_PER_SEC;
+ state->period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, plld);
+
+ err = mipi_dphy_timing_get_default(&state->timing, state->period);
+ if (err < 0)
+ return err;
+
+ err = mipi_dphy_timing_validate(&state->timing, state->period);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to validate D-PHY timing: %d\n", err);
+ return err;
+ }
+
+ /*
+ * We divide the frequency by two here, but we make up for that by
+ * setting the shift clock divider (further below) to half of the
+ * correct value.
+ */
+ plld /= 2;
+
+ /*
+ * Derive pixel clock from bit clock using the shift clock divider.
+ * Note that this is only half of what we would expect, but we need
+ * that to make up for the fact that we divided the bit clock by a
+ * factor of two above.
+ *
+ * It's not clear exactly why this is necessary, but the display is
+ * not working properly otherwise. Perhaps the PLLs cannot generate
+ * frequencies sufficiently high.
+ */
+ scdiv = ((8 * state->mul) / (state->div * state->lanes)) - 2;
+
+ err = tegra_dc_state_setup_clock(dc, crtc_state, dsi->clk_parent,
+ plld, scdiv);
+ if (err < 0) {
+ dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
+ return err;
+ }
+
+ return err;
+}
+
+static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
+ .disable = tegra_dsi_encoder_disable,
+ .enable = tegra_dsi_encoder_enable,
+ .atomic_check = tegra_dsi_encoder_atomic_check,
+};
+
+static int tegra_dsi_init(struct host1x_client *client)
+{
+ struct drm_device *drm = dev_get_drvdata(client->host);
+ struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+ int err;
+
+ /* Gangsters must not register their own outputs. */
+ if (!dsi->master) {
+ dsi->output.dev = client->dev;
+
+ drm_connector_init(drm, &dsi->output.connector,
+ &tegra_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ drm_connector_helper_add(&dsi->output.connector,
+ &tegra_dsi_connector_helper_funcs);
+ dsi->output.connector.dpms = DRM_MODE_DPMS_OFF;
+
+ drm_simple_encoder_init(drm, &dsi->output.encoder,
+ DRM_MODE_ENCODER_DSI);
+ drm_encoder_helper_add(&dsi->output.encoder,
+ &tegra_dsi_encoder_helper_funcs);
+
+ drm_connector_attach_encoder(&dsi->output.connector,
+ &dsi->output.encoder);
+ drm_connector_register(&dsi->output.connector);
+
+ err = tegra_output_init(drm, &dsi->output);
+ if (err < 0)
+ dev_err(dsi->dev, "failed to initialize output: %d\n",
+ err);
+
+ dsi->output.encoder.possible_crtcs = 0x3;
+ }
+
+ return 0;
+}
+
+static int tegra_dsi_exit(struct host1x_client *client)
+{
+ struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+
+ tegra_output_exit(&dsi->output);
+
+ return 0;
+}
+
+static int tegra_dsi_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ if (dsi->rst) {
+ err = reset_control_assert(dsi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(dsi->clk_lp);
+ clk_disable_unprepare(dsi->clk);
+
+ regulator_disable(dsi->vdd);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_dsi_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = regulator_enable(dsi->vdd);
+ if (err < 0) {
+ dev_err(dev, "failed to enable VDD supply: %d\n", err);
+ goto put_rpm;
+ }
+
+ err = clk_prepare_enable(dsi->clk);
+ if (err < 0) {
+ dev_err(dev, "cannot enable DSI clock: %d\n", err);
+ goto disable_vdd;
+ }
+
+ err = clk_prepare_enable(dsi->clk_lp);
+ if (err < 0) {
+ dev_err(dev, "cannot enable low-power clock: %d\n", err);
+ goto disable_clk;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (dsi->rst) {
+ err = reset_control_deassert(dsi->rst);
+ if (err < 0) {
+ dev_err(dev, "cannot assert reset: %d\n", err);
+ goto disable_clk_lp;
+ }
+ }
+
+ return 0;
+
+disable_clk_lp:
+ clk_disable_unprepare(dsi->clk_lp);
+disable_clk:
+ clk_disable_unprepare(dsi->clk);
+disable_vdd:
+ regulator_disable(dsi->vdd);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
+static const struct host1x_client_ops dsi_client_ops = {
+ .init = tegra_dsi_init,
+ .exit = tegra_dsi_exit,
+ .suspend = tegra_dsi_runtime_suspend,
+ .resume = tegra_dsi_runtime_resume,
+};
+
+static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
+{
+ struct clk *parent;
+ int err;
+
+ parent = clk_get_parent(dsi->clk);
+ if (!parent)
+ return -EINVAL;
+
+ err = clk_set_parent(parent, dsi->clk_parent);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static const char * const error_report[16] = {
+ "SoT Error",
+ "SoT Sync Error",
+ "EoT Sync Error",
+ "Escape Mode Entry Command Error",
+ "Low-Power Transmit Sync Error",
+ "Peripheral Timeout Error",
+ "False Control Error",
+ "Contention Detected",
+ "ECC Error, single-bit",
+ "ECC Error, multi-bit",
+ "Checksum Error",
+ "DSI Data Type Not Recognized",
+ "DSI VC ID Invalid",
+ "Invalid Transmission Length",
+ "Reserved",
+ "DSI Protocol Violation",
+};
+
+static ssize_t tegra_dsi_read_response(struct tegra_dsi *dsi,
+ const struct mipi_dsi_msg *msg,
+ size_t count)
+{
+ u8 *rx = msg->rx_buf;
+ unsigned int i, j, k;
+ size_t size = 0;
+ u16 errors;
+ u32 value;
+
+ /* read and parse packet header */
+ value = tegra_dsi_readl(dsi, DSI_RD_DATA);
+
+ switch (value & 0x3f) {
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ errors = (value >> 8) & 0xffff;
+ dev_dbg(dsi->dev, "Acknowledge and error report: %04x\n",
+ errors);
+ for (i = 0; i < ARRAY_SIZE(error_report); i++)
+ if (errors & BIT(i))
+ dev_dbg(dsi->dev, " %2u: %s\n", i,
+ error_report[i]);
+ break;
+
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ rx[0] = (value >> 8) & 0xff;
+ size = 1;
+ break;
+
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ rx[0] = (value >> 8) & 0xff;
+ rx[1] = (value >> 16) & 0xff;
+ size = 2;
+ break;
+
+ case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+ size = ((value >> 8) & 0xff00) | ((value >> 8) & 0xff);
+ break;
+
+ case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+ size = ((value >> 8) & 0xff00) | ((value >> 8) & 0xff);
+ break;
+
+ default:
+ dev_err(dsi->dev, "unhandled response type: %02x\n",
+ value & 0x3f);
+ return -EPROTO;
+ }
+
+ size = min(size, msg->rx_len);
+
+ if (msg->rx_buf && size > 0) {
+ for (i = 0, j = 0; i < count - 1; i++, j += 4) {
+ u8 *rx = msg->rx_buf + j;
+
+ value = tegra_dsi_readl(dsi, DSI_RD_DATA);
+
+ for (k = 0; k < 4 && (j + k) < msg->rx_len; k++)
+ rx[j + k] = (value >> (k << 3)) & 0xff;
+ }
+ }
+
+ return size;
+}
+
+static int tegra_dsi_transmit(struct tegra_dsi *dsi, unsigned long timeout)
+{
+ tegra_dsi_writel(dsi, DSI_TRIGGER_HOST, DSI_TRIGGER);
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ u32 value = tegra_dsi_readl(dsi, DSI_TRIGGER);
+ if ((value & DSI_TRIGGER_HOST) == 0)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ DRM_DEBUG_KMS("timeout waiting for transmission to complete\n");
+ return -ETIMEDOUT;
+}
+
+static int tegra_dsi_wait_for_response(struct tegra_dsi *dsi,
+ unsigned long timeout)
+{
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ u32 value = tegra_dsi_readl(dsi, DSI_STATUS);
+ u8 count = value & 0x1f;
+
+ if (count > 0)
+ return count;
+
+ usleep_range(1000, 2000);
+ }
+
+ DRM_DEBUG_KMS("peripheral returned no data\n");
+ return -ETIMEDOUT;
+}
+
+static void tegra_dsi_writesl(struct tegra_dsi *dsi, unsigned long offset,
+ const void *buffer, size_t size)
+{
+ const u8 *buf = buffer;
+ size_t i, j;
+ u32 value;
+
+ for (j = 0; j < size; j += 4) {
+ value = 0;
+
+ for (i = 0; i < 4 && j + i < size; i++)
+ value |= buf[j + i] << (i << 3);
+
+ tegra_dsi_writel(dsi, value, DSI_WR_DATA);
+ }
+}
+
+static ssize_t tegra_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct tegra_dsi *dsi = host_to_tegra(host);
+ struct mipi_dsi_packet packet;
+ const u8 *header;
+ size_t count;
+ ssize_t err;
+ u32 value;
+
+ err = mipi_dsi_create_packet(&packet, msg);
+ if (err < 0)
+ return err;
+
+ header = packet.header;
+
+ /* maximum FIFO depth is 1920 words */
+ if (packet.size > dsi->video_fifo_depth * 4)
+ return -ENOSPC;
+
+ /* reset underflow/overflow flags */
+ value = tegra_dsi_readl(dsi, DSI_STATUS);
+ if (value & (DSI_STATUS_UNDERFLOW | DSI_STATUS_OVERFLOW)) {
+ value = DSI_HOST_CONTROL_FIFO_RESET;
+ tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+ usleep_range(10, 20);
+ }
+
+ value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
+ value |= DSI_POWER_CONTROL_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
+
+ usleep_range(5000, 10000);
+
+ value = DSI_HOST_CONTROL_CRC_RESET | DSI_HOST_CONTROL_TX_TRIG_HOST |
+ DSI_HOST_CONTROL_CS | DSI_HOST_CONTROL_ECC;
+
+ if ((msg->flags & MIPI_DSI_MSG_USE_LPM) == 0)
+ value |= DSI_HOST_CONTROL_HS;
+
+ /*
+ * The host FIFO has a maximum of 64 words, so larger transmissions
+ * need to use the video FIFO.
+ */
+ if (packet.size > dsi->host_fifo_depth * 4)
+ value |= DSI_HOST_CONTROL_FIFO_SEL;
+
+ tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+
+ /*
+ * For reads and messages with explicitly requested ACK, generate a
+ * BTA sequence after the transmission of the packet.
+ */
+ if ((msg->flags & MIPI_DSI_MSG_REQ_ACK) ||
+ (msg->rx_buf && msg->rx_len > 0)) {
+ value = tegra_dsi_readl(dsi, DSI_HOST_CONTROL);
+ value |= DSI_HOST_CONTROL_PKT_BTA;
+ tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+ }
+
+ value = DSI_CONTROL_LANES(0) | DSI_CONTROL_HOST_ENABLE;
+ tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+ /* write packet header, ECC is generated by hardware */
+ value = header[2] << 16 | header[1] << 8 | header[0];
+ tegra_dsi_writel(dsi, value, DSI_WR_DATA);
+
+ /* write payload (if any) */
+ if (packet.payload_length > 0)
+ tegra_dsi_writesl(dsi, DSI_WR_DATA, packet.payload,
+ packet.payload_length);
+
+ err = tegra_dsi_transmit(dsi, 250);
+ if (err < 0)
+ return err;
+
+ if ((msg->flags & MIPI_DSI_MSG_REQ_ACK) ||
+ (msg->rx_buf && msg->rx_len > 0)) {
+ err = tegra_dsi_wait_for_response(dsi, 250);
+ if (err < 0)
+ return err;
+
+ count = err;
+
+ value = tegra_dsi_readl(dsi, DSI_RD_DATA);
+ switch (value) {
+ case 0x84:
+ /*
+ dev_dbg(dsi->dev, "ACK\n");
+ */
+ break;
+
+ case 0x87:
+ /*
+ dev_dbg(dsi->dev, "ESCAPE\n");
+ */
+ break;
+
+ default:
+ dev_err(dsi->dev, "unknown status: %08x\n", value);
+ break;
+ }
+
+ if (count > 1) {
+ err = tegra_dsi_read_response(dsi, msg, count);
+ if (err < 0)
+ dev_err(dsi->dev,
+ "failed to parse response: %zd\n",
+ err);
+ else {
+ /*
+ * For read commands, return the number of
+ * bytes returned by the peripheral.
+ */
+ count = err;
+ }
+ }
+ } else {
+ /*
+ * For write commands, we have transmitted the 4-byte header
+ * plus the variable-length payload.
+ */
+ count = 4 + packet.payload_length;
+ }
+
+ return count;
+}
+
+static int tegra_dsi_ganged_setup(struct tegra_dsi *dsi)
+{
+ struct clk *parent;
+ int err;
+
+ /* make sure both DSI controllers share the same PLL */
+ parent = clk_get_parent(dsi->slave->clk);
+ if (!parent)
+ return -EINVAL;
+
+ err = clk_set_parent(parent, dsi->clk_parent);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct tegra_dsi *dsi = host_to_tegra(host);
+
+ dsi->flags = device->mode_flags;
+ dsi->format = device->format;
+ dsi->lanes = device->lanes;
+
+ if (dsi->slave) {
+ int err;
+
+ dev_dbg(dsi->dev, "attaching dual-channel device %s\n",
+ dev_name(&device->dev));
+
+ err = tegra_dsi_ganged_setup(dsi);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to set up ganged mode: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ /*
+ * Slaves don't have a panel associated with them, so they provide
+ * merely the second channel.
+ */
+ if (!dsi->master) {
+ struct tegra_output *output = &dsi->output;
+
+ output->panel = of_drm_find_panel(device->dev.of_node);
+ if (IS_ERR(output->panel))
+ output->panel = NULL;
+
+ if (output->panel && output->connector.dev)
+ drm_helper_hpd_irq_event(output->connector.dev);
+ }
+
+ return 0;
+}
+
+static int tegra_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct tegra_dsi *dsi = host_to_tegra(host);
+ struct tegra_output *output = &dsi->output;
+
+ if (output->panel && &device->dev == output->panel->dev) {
+ output->panel = NULL;
+
+ if (output->connector.dev)
+ drm_helper_hpd_irq_event(output->connector.dev);
+ }
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops tegra_dsi_host_ops = {
+ .attach = tegra_dsi_host_attach,
+ .detach = tegra_dsi_host_detach,
+ .transfer = tegra_dsi_host_transfer,
+};
+
+static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
+{
+ struct device_node *np;
+
+ np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0);
+ if (np) {
+ struct platform_device *gangster = of_find_device_by_node(np);
+
+ dsi->slave = platform_get_drvdata(gangster);
+ of_node_put(np);
+
+ if (!dsi->slave) {
+ put_device(&gangster->dev);
+ return -EPROBE_DEFER;
+ }
+
+ dsi->slave->master = dsi;
+ }
+
+ return 0;
+}
+
+static int tegra_dsi_probe(struct platform_device *pdev)
+{
+ struct tegra_dsi *dsi;
+ struct resource *regs;
+ int err;
+
+ dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->output.dev = dsi->dev = &pdev->dev;
+ dsi->video_fifo_depth = 1920;
+ dsi->host_fifo_depth = 64;
+
+ err = tegra_dsi_ganged_probe(dsi);
+ if (err < 0)
+ return err;
+
+ err = tegra_output_probe(&dsi->output);
+ if (err < 0)
+ return err;
+
+ dsi->output.connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ /*
+ * Assume these values by default. When a DSI peripheral driver
+ * attaches to the DSI host, the parameters will be taken from
+ * the attached device.
+ */
+ dsi->flags = MIPI_DSI_MODE_VIDEO;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ if (!pdev->dev.pm_domain) {
+ dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
+ if (IS_ERR(dsi->rst))
+ return PTR_ERR(dsi->rst);
+ }
+
+ dsi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dsi->clk)) {
+ dev_err(&pdev->dev, "cannot get DSI clock\n");
+ return PTR_ERR(dsi->clk);
+ }
+
+ dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
+ if (IS_ERR(dsi->clk_lp)) {
+ dev_err(&pdev->dev, "cannot get low-power clock\n");
+ return PTR_ERR(dsi->clk_lp);
+ }
+
+ dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
+ if (IS_ERR(dsi->clk_parent)) {
+ dev_err(&pdev->dev, "cannot get parent clock\n");
+ return PTR_ERR(dsi->clk_parent);
+ }
+
+ dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
+ if (IS_ERR(dsi->vdd)) {
+ dev_err(&pdev->dev, "cannot get VDD supply\n");
+ return PTR_ERR(dsi->vdd);
+ }
+
+ err = tegra_dsi_setup_clocks(dsi);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cannot setup clocks\n");
+ return err;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(dsi->regs))
+ return PTR_ERR(dsi->regs);
+
+ dsi->mipi = tegra_mipi_request(&pdev->dev, pdev->dev.of_node);
+ if (IS_ERR(dsi->mipi))
+ return PTR_ERR(dsi->mipi);
+
+ dsi->host.ops = &tegra_dsi_host_ops;
+ dsi->host.dev = &pdev->dev;
+
+ err = mipi_dsi_host_register(&dsi->host);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register DSI host: %d\n", err);
+ goto mipi_free;
+ }
+
+ platform_set_drvdata(pdev, dsi);
+ pm_runtime_enable(&pdev->dev);
+
+ INIT_LIST_HEAD(&dsi->client.list);
+ dsi->client.ops = &dsi_client_ops;
+ dsi->client.dev = &pdev->dev;
+
+ err = host1x_client_register(&dsi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ goto unregister;
+ }
+
+ return 0;
+
+unregister:
+ mipi_dsi_host_unregister(&dsi->host);
+mipi_free:
+ tegra_mipi_free(dsi->mipi);
+ return err;
+}
+
+static int tegra_dsi_remove(struct platform_device *pdev)
+{
+ struct tegra_dsi *dsi = platform_get_drvdata(pdev);
+ int err;
+
+ pm_runtime_disable(&pdev->dev);
+
+ err = host1x_client_unregister(&dsi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ tegra_output_remove(&dsi->output);
+
+ mipi_dsi_host_unregister(&dsi->host);
+ tegra_mipi_free(dsi->mipi);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_dsi_of_match[] = {
+ { .compatible = "nvidia,tegra210-dsi", },
+ { .compatible = "nvidia,tegra132-dsi", },
+ { .compatible = "nvidia,tegra124-dsi", },
+ { .compatible = "nvidia,tegra114-dsi", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_dsi_of_match);
+
+struct platform_driver tegra_dsi_driver = {
+ .driver = {
+ .name = "tegra-dsi",
+ .of_match_table = tegra_dsi_of_match,
+ },
+ .probe = tegra_dsi_probe,
+ .remove = tegra_dsi_remove,
+};
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
new file mode 100644
index 000000000..f39594e65
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ */
+
+#ifndef DRM_TEGRA_DSI_H
+#define DRM_TEGRA_DSI_H
+
+#define DSI_INCR_SYNCPT 0x00
+#define DSI_INCR_SYNCPT_CONTROL 0x01
+#define DSI_INCR_SYNCPT_ERROR 0x02
+#define DSI_CTXSW 0x08
+#define DSI_RD_DATA 0x09
+#define DSI_WR_DATA 0x0a
+#define DSI_POWER_CONTROL 0x0b
+#define DSI_POWER_CONTROL_ENABLE (1 << 0)
+#define DSI_INT_ENABLE 0x0c
+#define DSI_INT_STATUS 0x0d
+#define DSI_INT_MASK 0x0e
+#define DSI_HOST_CONTROL 0x0f
+#define DSI_HOST_CONTROL_FIFO_RESET (1 << 21)
+#define DSI_HOST_CONTROL_CRC_RESET (1 << 20)
+#define DSI_HOST_CONTROL_TX_TRIG_SOL (0 << 12)
+#define DSI_HOST_CONTROL_TX_TRIG_FIFO (1 << 12)
+#define DSI_HOST_CONTROL_TX_TRIG_HOST (2 << 12)
+#define DSI_HOST_CONTROL_RAW (1 << 6)
+#define DSI_HOST_CONTROL_HS (1 << 5)
+#define DSI_HOST_CONTROL_FIFO_SEL (1 << 4)
+#define DSI_HOST_CONTROL_IMM_BTA (1 << 3)
+#define DSI_HOST_CONTROL_PKT_BTA (1 << 2)
+#define DSI_HOST_CONTROL_CS (1 << 1)
+#define DSI_HOST_CONTROL_ECC (1 << 0)
+#define DSI_CONTROL 0x10
+#define DSI_CONTROL_HS_CLK_CTRL (1 << 20)
+#define DSI_CONTROL_CHANNEL(c) (((c) & 0x3) << 16)
+#define DSI_CONTROL_FORMAT(f) (((f) & 0x3) << 12)
+#define DSI_CONTROL_TX_TRIG(x) (((x) & 0x3) << 8)
+#define DSI_CONTROL_LANES(n) (((n) & 0x3) << 4)
+#define DSI_CONTROL_DCS_ENABLE (1 << 3)
+#define DSI_CONTROL_SOURCE(s) (((s) & 0x1) << 2)
+#define DSI_CONTROL_VIDEO_ENABLE (1 << 1)
+#define DSI_CONTROL_HOST_ENABLE (1 << 0)
+#define DSI_SOL_DELAY 0x11
+#define DSI_MAX_THRESHOLD 0x12
+#define DSI_TRIGGER 0x13
+#define DSI_TRIGGER_HOST (1 << 1)
+#define DSI_TRIGGER_VIDEO (1 << 0)
+#define DSI_TX_CRC 0x14
+#define DSI_STATUS 0x15
+#define DSI_STATUS_IDLE (1 << 10)
+#define DSI_STATUS_UNDERFLOW (1 << 9)
+#define DSI_STATUS_OVERFLOW (1 << 8)
+#define DSI_INIT_SEQ_CONTROL 0x1a
+#define DSI_INIT_SEQ_DATA_0 0x1b
+#define DSI_INIT_SEQ_DATA_1 0x1c
+#define DSI_INIT_SEQ_DATA_2 0x1d
+#define DSI_INIT_SEQ_DATA_3 0x1e
+#define DSI_INIT_SEQ_DATA_4 0x1f
+#define DSI_INIT_SEQ_DATA_5 0x20
+#define DSI_INIT_SEQ_DATA_6 0x21
+#define DSI_INIT_SEQ_DATA_7 0x22
+#define DSI_PKT_SEQ_0_LO 0x23
+#define DSI_PKT_SEQ_0_HI 0x24
+#define DSI_PKT_SEQ_1_LO 0x25
+#define DSI_PKT_SEQ_1_HI 0x26
+#define DSI_PKT_SEQ_2_LO 0x27
+#define DSI_PKT_SEQ_2_HI 0x28
+#define DSI_PKT_SEQ_3_LO 0x29
+#define DSI_PKT_SEQ_3_HI 0x2a
+#define DSI_PKT_SEQ_4_LO 0x2b
+#define DSI_PKT_SEQ_4_HI 0x2c
+#define DSI_PKT_SEQ_5_LO 0x2d
+#define DSI_PKT_SEQ_5_HI 0x2e
+#define DSI_DCS_CMDS 0x33
+#define DSI_PKT_LEN_0_1 0x34
+#define DSI_PKT_LEN_2_3 0x35
+#define DSI_PKT_LEN_4_5 0x36
+#define DSI_PKT_LEN_6_7 0x37
+#define DSI_PHY_TIMING_0 0x3c
+#define DSI_PHY_TIMING_1 0x3d
+#define DSI_PHY_TIMING_2 0x3e
+#define DSI_BTA_TIMING 0x3f
+
+#define DSI_TIMING_FIELD(value, period, hwinc) \
+ ((DIV_ROUND_CLOSEST(value, period) - (hwinc)) & 0xff)
+
+#define DSI_TIMEOUT_0 0x44
+#define DSI_TIMEOUT_LRX(x) (((x) & 0xffff) << 16)
+#define DSI_TIMEOUT_HTX(x) (((x) & 0xffff) << 0)
+#define DSI_TIMEOUT_1 0x45
+#define DSI_TIMEOUT_PR(x) (((x) & 0xffff) << 16)
+#define DSI_TIMEOUT_TA(x) (((x) & 0xffff) << 0)
+#define DSI_TO_TALLY 0x46
+#define DSI_TALLY_TA(x) (((x) & 0xff) << 16)
+#define DSI_TALLY_LRX(x) (((x) & 0xff) << 8)
+#define DSI_TALLY_HTX(x) (((x) & 0xff) << 0)
+#define DSI_PAD_CONTROL_0 0x4b
+#define DSI_PAD_CONTROL_VS1_PDIO(x) (((x) & 0xf) << 0)
+#define DSI_PAD_CONTROL_VS1_PDIO_CLK (1 << 8)
+#define DSI_PAD_CONTROL_VS1_PULLDN(x) (((x) & 0xf) << 16)
+#define DSI_PAD_CONTROL_VS1_PULLDN_CLK (1 << 24)
+#define DSI_PAD_CONTROL_CD 0x4c
+#define DSI_PAD_CD_STATUS 0x4d
+#define DSI_VIDEO_MODE_CONTROL 0x4e
+#define DSI_PAD_CONTROL_1 0x4f
+#define DSI_PAD_CONTROL_2 0x50
+#define DSI_PAD_OUT_CLK(x) (((x) & 0x7) << 0)
+#define DSI_PAD_LP_DN(x) (((x) & 0x7) << 4)
+#define DSI_PAD_LP_UP(x) (((x) & 0x7) << 8)
+#define DSI_PAD_SLEW_DN(x) (((x) & 0x7) << 12)
+#define DSI_PAD_SLEW_UP(x) (((x) & 0x7) << 16)
+#define DSI_PAD_CONTROL_3 0x51
+#define DSI_PAD_PREEMP_PD_CLK(x) (((x) & 0x3) << 12)
+#define DSI_PAD_PREEMP_PU_CLK(x) (((x) & 0x3) << 8)
+#define DSI_PAD_PREEMP_PD(x) (((x) & 0x3) << 4)
+#define DSI_PAD_PREEMP_PU(x) (((x) & 0x3) << 0)
+#define DSI_PAD_CONTROL_4 0x52
+#define DSI_GANGED_MODE_CONTROL 0x53
+#define DSI_GANGED_MODE_CONTROL_ENABLE (1 << 0)
+#define DSI_GANGED_MODE_START 0x54
+#define DSI_GANGED_MODE_SIZE 0x55
+#define DSI_RAW_DATA_BYTE_COUNT 0x56
+#define DSI_ULTRA_LOW_POWER_CONTROL 0x57
+#define DSI_INIT_SEQ_DATA_8 0x58
+#define DSI_INIT_SEQ_DATA_9 0x59
+#define DSI_INIT_SEQ_DATA_10 0x5a
+#define DSI_INIT_SEQ_DATA_11 0x5b
+#define DSI_INIT_SEQ_DATA_12 0x5c
+#define DSI_INIT_SEQ_DATA_13 0x5d
+#define DSI_INIT_SEQ_DATA_14 0x5e
+#define DSI_INIT_SEQ_DATA_15 0x5f
+
+/*
+ * pixel format as used in the DSI_CONTROL_FORMAT field
+ */
+enum tegra_dsi_format {
+ TEGRA_DSI_FORMAT_16P,
+ TEGRA_DSI_FORMAT_18NP,
+ TEGRA_DSI_FORMAT_18P,
+ TEGRA_DSI_FORMAT_24P,
+};
+
+#endif
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
new file mode 100644
index 000000000..c0d85463e
--- /dev/null
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, NVIDIA Corporation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/pci_ids.h>
+#include <linux/iopoll.h>
+
+#include "falcon.h"
+#include "drm.h"
+
+enum falcon_memory {
+ FALCON_MEMORY_IMEM,
+ FALCON_MEMORY_DATA,
+};
+
+static void falcon_writel(struct falcon *falcon, u32 value, u32 offset)
+{
+ writel(value, falcon->regs + offset);
+}
+
+int falcon_wait_idle(struct falcon *falcon)
+{
+ u32 value;
+
+ return readl_poll_timeout(falcon->regs + FALCON_IDLESTATE, value,
+ (value == 0), 10, 100000);
+}
+
+static int falcon_dma_wait_idle(struct falcon *falcon)
+{
+ u32 value;
+
+ return readl_poll_timeout(falcon->regs + FALCON_DMATRFCMD, value,
+ (value & FALCON_DMATRFCMD_IDLE), 10, 100000);
+}
+
+static int falcon_copy_chunk(struct falcon *falcon,
+ phys_addr_t base,
+ unsigned long offset,
+ enum falcon_memory target)
+{
+ u32 cmd = FALCON_DMATRFCMD_SIZE_256B;
+
+ if (target == FALCON_MEMORY_IMEM)
+ cmd |= FALCON_DMATRFCMD_IMEM;
+
+ /*
+ * Use second DMA context (i.e. the one for firmware). Strictly
+ * speaking, at this point both DMA contexts point to the firmware
+ * stream ID, but this register's value will be reused by the firmware
+ * for later DMA transactions, so we need to use the correct value.
+ */
+ cmd |= FALCON_DMATRFCMD_DMACTX(1);
+
+ falcon_writel(falcon, offset, FALCON_DMATRFMOFFS);
+ falcon_writel(falcon, base, FALCON_DMATRFFBOFFS);
+ falcon_writel(falcon, cmd, FALCON_DMATRFCMD);
+
+ return falcon_dma_wait_idle(falcon);
+}
+
+static void falcon_copy_firmware_image(struct falcon *falcon,
+ const struct firmware *firmware)
+{
+ u32 *virt = falcon->firmware.virt;
+ size_t i;
+
+ /* copy the whole thing taking into account endianness */
+ for (i = 0; i < firmware->size / sizeof(u32); i++)
+ virt[i] = le32_to_cpu(((__le32 *)firmware->data)[i]);
+}
+
+static int falcon_parse_firmware_image(struct falcon *falcon)
+{
+ struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.virt;
+ struct falcon_fw_os_header_v1 *os;
+
+ /* endian problems would show up right here */
+ if (bin->magic != PCI_VENDOR_ID_NVIDIA && bin->magic != 0x10fe) {
+ dev_err(falcon->dev, "incorrect firmware magic\n");
+ return -EINVAL;
+ }
+
+ /* currently only version 1 is supported */
+ if (bin->version != 1) {
+ dev_err(falcon->dev, "unsupported firmware version\n");
+ return -EINVAL;
+ }
+
+ /* check that the firmware size is consistent */
+ if (bin->size > falcon->firmware.size) {
+ dev_err(falcon->dev, "firmware image size inconsistency\n");
+ return -EINVAL;
+ }
+
+ os = falcon->firmware.virt + bin->os_header_offset;
+
+ falcon->firmware.bin_data.size = bin->os_size;
+ falcon->firmware.bin_data.offset = bin->os_data_offset;
+ falcon->firmware.code.offset = os->code_offset;
+ falcon->firmware.code.size = os->code_size;
+ falcon->firmware.data.offset = os->data_offset;
+ falcon->firmware.data.size = os->data_size;
+
+ return 0;
+}
+
+int falcon_read_firmware(struct falcon *falcon, const char *name)
+{
+ int err;
+
+ /* request_firmware prints error if it fails */
+ err = request_firmware(&falcon->firmware.firmware, name, falcon->dev);
+ if (err < 0)
+ return err;
+
+ falcon->firmware.size = falcon->firmware.firmware->size;
+
+ return 0;
+}
+
+int falcon_load_firmware(struct falcon *falcon)
+{
+ const struct firmware *firmware = falcon->firmware.firmware;
+ int err;
+
+ /* copy firmware image into local area. this also ensures endianness */
+ falcon_copy_firmware_image(falcon, firmware);
+
+ /* parse the image data */
+ err = falcon_parse_firmware_image(falcon);
+ if (err < 0) {
+ dev_err(falcon->dev, "failed to parse firmware image\n");
+ return err;
+ }
+
+ release_firmware(firmware);
+ falcon->firmware.firmware = NULL;
+
+ return 0;
+}
+
+int falcon_init(struct falcon *falcon)
+{
+ falcon->firmware.virt = NULL;
+
+ return 0;
+}
+
+void falcon_exit(struct falcon *falcon)
+{
+ if (falcon->firmware.firmware)
+ release_firmware(falcon->firmware.firmware);
+}
+
+int falcon_boot(struct falcon *falcon)
+{
+ unsigned long offset;
+ u32 value;
+ int err;
+
+ if (!falcon->firmware.virt)
+ return -EINVAL;
+
+ err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
+ (value & (FALCON_DMACTL_IMEM_SCRUBBING |
+ FALCON_DMACTL_DMEM_SCRUBBING)) == 0,
+ 10, 10000);
+ if (err < 0)
+ return err;
+
+ falcon_writel(falcon, 0, FALCON_DMACTL);
+
+ /* setup the address of the binary data so Falcon can access it later */
+ falcon_writel(falcon, (falcon->firmware.iova +
+ falcon->firmware.bin_data.offset) >> 8,
+ FALCON_DMATRFBASE);
+
+ /* copy the data segment into Falcon internal memory */
+ for (offset = 0; offset < falcon->firmware.data.size; offset += 256)
+ falcon_copy_chunk(falcon,
+ falcon->firmware.data.offset + offset,
+ offset, FALCON_MEMORY_DATA);
+
+ /* copy the code segment into Falcon internal memory */
+ for (offset = 0; offset < falcon->firmware.code.size; offset += 256)
+ falcon_copy_chunk(falcon, falcon->firmware.code.offset + offset,
+ offset, FALCON_MEMORY_IMEM);
+
+ /* setup falcon interrupts */
+ falcon_writel(falcon, FALCON_IRQMSET_EXT(0xff) |
+ FALCON_IRQMSET_SWGEN1 |
+ FALCON_IRQMSET_SWGEN0 |
+ FALCON_IRQMSET_EXTERR |
+ FALCON_IRQMSET_HALT |
+ FALCON_IRQMSET_WDTMR,
+ FALCON_IRQMSET);
+ falcon_writel(falcon, FALCON_IRQDEST_EXT(0xff) |
+ FALCON_IRQDEST_SWGEN1 |
+ FALCON_IRQDEST_SWGEN0 |
+ FALCON_IRQDEST_EXTERR |
+ FALCON_IRQDEST_HALT,
+ FALCON_IRQDEST);
+
+ /* enable interface */
+ falcon_writel(falcon, FALCON_ITFEN_MTHDEN |
+ FALCON_ITFEN_CTXEN,
+ FALCON_ITFEN);
+
+ /* boot falcon */
+ falcon_writel(falcon, 0x00000000, FALCON_BOOTVEC);
+ falcon_writel(falcon, FALCON_CPUCTL_STARTCPU, FALCON_CPUCTL);
+
+ err = falcon_wait_idle(falcon);
+ if (err < 0) {
+ dev_err(falcon->dev, "Falcon boot failed due to timeout\n");
+ return err;
+ }
+
+ return 0;
+}
+
+void falcon_execute_method(struct falcon *falcon, u32 method, u32 data)
+{
+ falcon_writel(falcon, method >> 2, FALCON_UCLASS_METHOD_OFFSET);
+ falcon_writel(falcon, data, FALCON_UCLASS_METHOD_DATA);
+}
diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h
new file mode 100644
index 000000000..1955cf11a
--- /dev/null
+++ b/drivers/gpu/drm/tegra/falcon.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015, NVIDIA Corporation.
+ */
+
+#ifndef _FALCON_H_
+#define _FALCON_H_
+
+#include <linux/types.h>
+
+#define FALCON_UCLASS_METHOD_OFFSET 0x00000040
+
+#define FALCON_UCLASS_METHOD_DATA 0x00000044
+
+#define FALCON_IRQMSET 0x00001010
+#define FALCON_IRQMSET_WDTMR (1 << 1)
+#define FALCON_IRQMSET_HALT (1 << 4)
+#define FALCON_IRQMSET_EXTERR (1 << 5)
+#define FALCON_IRQMSET_SWGEN0 (1 << 6)
+#define FALCON_IRQMSET_SWGEN1 (1 << 7)
+#define FALCON_IRQMSET_EXT(v) (((v) & 0xff) << 8)
+
+#define FALCON_IRQDEST 0x0000101c
+#define FALCON_IRQDEST_HALT (1 << 4)
+#define FALCON_IRQDEST_EXTERR (1 << 5)
+#define FALCON_IRQDEST_SWGEN0 (1 << 6)
+#define FALCON_IRQDEST_SWGEN1 (1 << 7)
+#define FALCON_IRQDEST_EXT(v) (((v) & 0xff) << 8)
+
+#define FALCON_ITFEN 0x00001048
+#define FALCON_ITFEN_CTXEN (1 << 0)
+#define FALCON_ITFEN_MTHDEN (1 << 1)
+
+#define FALCON_IDLESTATE 0x0000104c
+
+#define FALCON_CPUCTL 0x00001100
+#define FALCON_CPUCTL_STARTCPU (1 << 1)
+
+#define FALCON_BOOTVEC 0x00001104
+
+#define FALCON_DMACTL 0x0000110c
+#define FALCON_DMACTL_DMEM_SCRUBBING (1 << 1)
+#define FALCON_DMACTL_IMEM_SCRUBBING (1 << 2)
+
+#define FALCON_DMATRFBASE 0x00001110
+
+#define FALCON_DMATRFMOFFS 0x00001114
+
+#define FALCON_DMATRFCMD 0x00001118
+#define FALCON_DMATRFCMD_IDLE (1 << 1)
+#define FALCON_DMATRFCMD_IMEM (1 << 4)
+#define FALCON_DMATRFCMD_SIZE_256B (6 << 8)
+#define FALCON_DMATRFCMD_DMACTX(v) (((v) & 0x7) << 12)
+
+#define FALCON_DMATRFFBOFFS 0x0000111c
+
+struct falcon_fw_bin_header_v1 {
+ u32 magic; /* 0x10de */
+ u32 version; /* version of bin format (1) */
+ u32 size; /* entire image size including this header */
+ u32 os_header_offset;
+ u32 os_data_offset;
+ u32 os_size;
+};
+
+struct falcon_fw_os_app_v1 {
+ u32 offset;
+ u32 size;
+};
+
+struct falcon_fw_os_header_v1 {
+ u32 code_offset;
+ u32 code_size;
+ u32 data_offset;
+ u32 data_size;
+};
+
+struct falcon_firmware_section {
+ unsigned long offset;
+ size_t size;
+};
+
+struct falcon_firmware {
+ /* Firmware after it is read but not loaded */
+ const struct firmware *firmware;
+
+ /* Raw firmware data */
+ dma_addr_t iova;
+ dma_addr_t phys;
+ void *virt;
+ size_t size;
+
+ /* Parsed firmware information */
+ struct falcon_firmware_section bin_data;
+ struct falcon_firmware_section data;
+ struct falcon_firmware_section code;
+};
+
+struct falcon {
+ /* Set by falcon client */
+ struct device *dev;
+ void __iomem *regs;
+
+ struct falcon_firmware firmware;
+};
+
+int falcon_init(struct falcon *falcon);
+void falcon_exit(struct falcon *falcon);
+int falcon_read_firmware(struct falcon *falcon, const char *firmware_name);
+int falcon_load_firmware(struct falcon *falcon);
+int falcon_boot(struct falcon *falcon);
+void falcon_execute_method(struct falcon *falcon, u32 method, u32 data);
+int falcon_wait_idle(struct falcon *falcon);
+
+#endif /* _FALCON_H_ */
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
new file mode 100644
index 000000000..929120915
--- /dev/null
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012-2013 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * Based on the KMS/FB DMA helpers
+ * Copyright (C) 2012 Analog Devices Inc.
+ */
+
+#include <linux/console.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
+
+#include "drm.h"
+#include "gem.h"
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
+{
+ return container_of(helper, struct tegra_fbdev, base);
+}
+#endif
+
+struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
+ unsigned int index)
+{
+ return to_tegra_bo(drm_gem_fb_get_obj(framebuffer, index));
+}
+
+bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
+{
+ struct tegra_bo *bo = tegra_fb_get_plane(framebuffer, 0);
+
+ if (bo->flags & TEGRA_BO_BOTTOM_UP)
+ return true;
+
+ return false;
+}
+
+int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
+ struct tegra_bo_tiling *tiling)
+{
+ uint64_t modifier = framebuffer->modifier;
+
+ if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
+ if ((modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) == 0)
+ tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_TEGRA;
+ else
+ tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_GPU;
+
+ modifier &= ~DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT;
+ }
+
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ tiling->mode = TEGRA_BO_TILING_MODE_PITCH;
+ tiling->value = 0;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED:
+ tiling->mode = TEGRA_BO_TILING_MODE_TILED;
+ tiling->value = 0;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0):
+ tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
+ tiling->value = 0;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1):
+ tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
+ tiling->value = 1;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2):
+ tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
+ tiling->value = 2;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3):
+ tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
+ tiling->value = 3;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4):
+ tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
+ tiling->value = 4;
+ break;
+
+ case DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5):
+ tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
+ tiling->value = 5;
+ break;
+
+ default:
+ DRM_DEBUG_KMS("unknown format modifier: %llx\n", modifier);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs tegra_fb_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+};
+
+static struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct tegra_bo **planes,
+ unsigned int num_planes)
+{
+ struct drm_framebuffer *fb;
+ unsigned int i;
+ int err;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(drm, fb, mode_cmd);
+
+ for (i = 0; i < fb->format->num_planes; i++)
+ fb->obj[i] = &planes[i]->gem;
+
+ err = drm_framebuffer_init(drm, fb, &tegra_fb_funcs);
+ if (err < 0) {
+ dev_err(drm->dev, "failed to initialize framebuffer: %d\n",
+ err);
+ kfree(fb);
+ return ERR_PTR(err);
+ }
+
+ return fb;
+}
+
+struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
+ struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *cmd)
+{
+ const struct drm_format_info *info = drm_get_format_info(drm, cmd);
+ struct tegra_bo *planes[4];
+ struct drm_gem_object *gem;
+ struct drm_framebuffer *fb;
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = cmd->width / (i ? info->hsub : 1);
+ unsigned int height = cmd->height / (i ? info->vsub : 1);
+ unsigned int size, bpp;
+
+ gem = drm_gem_object_lookup(file, cmd->handles[i]);
+ if (!gem) {
+ err = -ENXIO;
+ goto unreference;
+ }
+
+ bpp = info->cpp[i];
+
+ size = (height - 1) * cmd->pitches[i] +
+ width * bpp + cmd->offsets[i];
+
+ if (gem->size < size) {
+ err = -EINVAL;
+ goto unreference;
+ }
+
+ planes[i] = to_tegra_bo(gem);
+ }
+
+ fb = tegra_fb_alloc(drm, cmd, planes, i);
+ if (IS_ERR(fb)) {
+ err = PTR_ERR(fb);
+ goto unreference;
+ }
+
+ return fb;
+
+unreference:
+ while (i--)
+ drm_gem_object_put(&planes[i]->gem);
+
+ return ERR_PTR(err);
+}
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+static int tegra_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct tegra_bo *bo;
+ int err;
+
+ bo = tegra_fb_get_plane(helper->fb, 0);
+
+ err = drm_gem_mmap_obj(&bo->gem, bo->gem.size, vma);
+ if (err < 0)
+ return err;
+
+ return __tegra_gem_mmap(&bo->gem, vma);
+}
+
+static const struct fb_ops tegra_fb_ops = {
+ .owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
+ .fb_mmap = tegra_fb_mmap,
+};
+
+static int tegra_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct tegra_fbdev *fbdev = to_tegra_fbdev(helper);
+ struct tegra_drm *tegra = helper->dev->dev_private;
+ struct drm_device *drm = helper->dev;
+ struct drm_mode_fb_cmd2 cmd = { 0 };
+ unsigned int bytes_per_pixel;
+ struct drm_framebuffer *fb;
+ unsigned long offset;
+ struct fb_info *info;
+ struct tegra_bo *bo;
+ size_t size;
+ int err;
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+ cmd.width = sizes->surface_width;
+ cmd.height = sizes->surface_height;
+ cmd.pitches[0] = round_up(sizes->surface_width * bytes_per_pixel,
+ tegra->pitch_align);
+
+ cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = cmd.pitches[0] * cmd.height;
+
+ bo = tegra_bo_create(drm, size, 0);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ dev_err(drm->dev, "failed to allocate framebuffer info\n");
+ drm_gem_object_put(&bo->gem);
+ return PTR_ERR(info);
+ }
+
+ fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
+ if (IS_ERR(fbdev->fb)) {
+ err = PTR_ERR(fbdev->fb);
+ dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n",
+ err);
+ drm_gem_object_put(&bo->gem);
+ return PTR_ERR(fbdev->fb);
+ }
+
+ fb = fbdev->fb;
+ helper->fb = fb;
+ helper->fbdev = info;
+
+ info->fbops = &tegra_fb_ops;
+
+ drm_fb_helper_fill_info(info, helper, sizes);
+
+ offset = info->var.xoffset * bytes_per_pixel +
+ info->var.yoffset * fb->pitches[0];
+
+ if (bo->pages) {
+ bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!bo->vaddr) {
+ dev_err(drm->dev, "failed to vmap() framebuffer\n");
+ err = -ENOMEM;
+ goto destroy;
+ }
+ }
+
+ drm->mode_config.fb_base = (resource_size_t)bo->iova;
+ info->screen_base = (void __iomem *)bo->vaddr + offset;
+ info->screen_size = size;
+ info->fix.smem_start = (unsigned long)(bo->iova + offset);
+ info->fix.smem_len = size;
+
+ return 0;
+
+destroy:
+ drm_framebuffer_remove(fb);
+ return err;
+}
+
+static const struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
+ .fb_probe = tegra_fbdev_probe,
+};
+
+static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm)
+{
+ struct tegra_fbdev *fbdev;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev) {
+ dev_err(drm->dev, "failed to allocate DRM fbdev\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drm_fb_helper_prepare(drm, &fbdev->base, &tegra_fb_helper_funcs);
+
+ return fbdev;
+}
+
+static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
+{
+ kfree(fbdev);
+}
+
+static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
+ unsigned int preferred_bpp,
+ unsigned int num_crtc,
+ unsigned int max_connectors)
+{
+ struct drm_device *drm = fbdev->base.dev;
+ int err;
+
+ err = drm_fb_helper_init(drm, &fbdev->base);
+ if (err < 0) {
+ dev_err(drm->dev, "failed to initialize DRM FB helper: %d\n",
+ err);
+ return err;
+ }
+
+ err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
+ if (err < 0) {
+ dev_err(drm->dev, "failed to set initial configuration: %d\n",
+ err);
+ goto fini;
+ }
+
+ return 0;
+
+fini:
+ drm_fb_helper_fini(&fbdev->base);
+ return err;
+}
+
+static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
+{
+ drm_fb_helper_unregister_fbi(&fbdev->base);
+
+ if (fbdev->fb) {
+ struct tegra_bo *bo = tegra_fb_get_plane(fbdev->fb, 0);
+
+ /* Undo the special mapping we made in fbdev probe. */
+ if (bo && bo->pages) {
+ vunmap(bo->vaddr);
+ bo->vaddr = NULL;
+ }
+
+ drm_framebuffer_remove(fbdev->fb);
+ }
+
+ drm_fb_helper_fini(&fbdev->base);
+ tegra_fbdev_free(fbdev);
+}
+#endif
+
+int tegra_drm_fb_prepare(struct drm_device *drm)
+{
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ struct tegra_drm *tegra = drm->dev_private;
+
+ tegra->fbdev = tegra_fbdev_create(drm);
+ if (IS_ERR(tegra->fbdev))
+ return PTR_ERR(tegra->fbdev);
+#endif
+
+ return 0;
+}
+
+void tegra_drm_fb_free(struct drm_device *drm)
+{
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ struct tegra_drm *tegra = drm->dev_private;
+
+ tegra_fbdev_free(tegra->fbdev);
+#endif
+}
+
+int tegra_drm_fb_init(struct drm_device *drm)
+{
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ struct tegra_drm *tegra = drm->dev_private;
+ int err;
+
+ err = tegra_fbdev_init(tegra->fbdev, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (err < 0)
+ return err;
+#endif
+
+ return 0;
+}
+
+void tegra_drm_fb_exit(struct drm_device *drm)
+{
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ struct tegra_drm *tegra = drm->dev_private;
+
+ tegra_fbdev_exit(tegra->fbdev);
+#endif
+}
diff --git a/drivers/gpu/drm/tegra/firewall.c b/drivers/gpu/drm/tegra/firewall.c
new file mode 100644
index 000000000..d53f890fa
--- /dev/null
+++ b/drivers/gpu/drm/tegra/firewall.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2010-2020 NVIDIA Corporation */
+
+#include "drm.h"
+#include "submit.h"
+#include "uapi.h"
+
+struct tegra_drm_firewall {
+ struct tegra_drm_submit_data *submit;
+ struct tegra_drm_client *client;
+ u32 *data;
+ u32 pos;
+ u32 end;
+ u32 class;
+};
+
+static int fw_next(struct tegra_drm_firewall *fw, u32 *word)
+{
+ if (fw->pos == fw->end)
+ return -EINVAL;
+
+ *word = fw->data[fw->pos++];
+
+ return 0;
+}
+
+static bool fw_check_addr_valid(struct tegra_drm_firewall *fw, u32 offset)
+{
+ u32 i;
+
+ for (i = 0; i < fw->submit->num_used_mappings; i++) {
+ struct tegra_drm_mapping *m = fw->submit->used_mappings[i].mapping;
+
+ if (offset >= m->iova && offset <= m->iova_end)
+ return true;
+ }
+
+ return false;
+}
+
+static int fw_check_reg(struct tegra_drm_firewall *fw, u32 offset)
+{
+ bool is_addr;
+ u32 word;
+ int err;
+
+ err = fw_next(fw, &word);
+ if (err)
+ return err;
+
+ if (!fw->client->ops->is_addr_reg)
+ return 0;
+
+ is_addr = fw->client->ops->is_addr_reg(fw->client->base.dev, fw->class,
+ offset);
+
+ if (!is_addr)
+ return 0;
+
+ if (!fw_check_addr_valid(fw, word))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fw_check_regs_seq(struct tegra_drm_firewall *fw, u32 offset,
+ u32 count, bool incr)
+{
+ u32 i;
+
+ for (i = 0; i < count; i++) {
+ if (fw_check_reg(fw, offset))
+ return -EINVAL;
+
+ if (incr)
+ offset++;
+ }
+
+ return 0;
+}
+
+static int fw_check_regs_mask(struct tegra_drm_firewall *fw, u32 offset,
+ u16 mask)
+{
+ unsigned long bmask = mask;
+ unsigned int bit;
+
+ for_each_set_bit(bit, &bmask, 16) {
+ if (fw_check_reg(fw, offset+bit))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fw_check_regs_imm(struct tegra_drm_firewall *fw, u32 offset)
+{
+ bool is_addr;
+
+ if (!fw->client->ops->is_addr_reg)
+ return 0;
+
+ is_addr = fw->client->ops->is_addr_reg(fw->client->base.dev, fw->class,
+ offset);
+ if (is_addr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fw_check_class(struct tegra_drm_firewall *fw, u32 class)
+{
+ if (!fw->client->ops->is_valid_class) {
+ if (class == fw->client->base.class)
+ return 0;
+ else
+ return -EINVAL;
+ }
+
+ if (!fw->client->ops->is_valid_class(class))
+ return -EINVAL;
+
+ return 0;
+}
+
+enum {
+ HOST1X_OPCODE_SETCLASS = 0x00,
+ HOST1X_OPCODE_INCR = 0x01,
+ HOST1X_OPCODE_NONINCR = 0x02,
+ HOST1X_OPCODE_MASK = 0x03,
+ HOST1X_OPCODE_IMM = 0x04,
+ HOST1X_OPCODE_RESTART = 0x05,
+ HOST1X_OPCODE_GATHER = 0x06,
+ HOST1X_OPCODE_SETSTRMID = 0x07,
+ HOST1X_OPCODE_SETAPPID = 0x08,
+ HOST1X_OPCODE_SETPYLD = 0x09,
+ HOST1X_OPCODE_INCR_W = 0x0a,
+ HOST1X_OPCODE_NONINCR_W = 0x0b,
+ HOST1X_OPCODE_GATHER_W = 0x0c,
+ HOST1X_OPCODE_RESTART_W = 0x0d,
+ HOST1X_OPCODE_EXTEND = 0x0e,
+};
+
+int tegra_drm_fw_validate(struct tegra_drm_client *client, u32 *data, u32 start,
+ u32 words, struct tegra_drm_submit_data *submit,
+ u32 *job_class)
+{
+ struct tegra_drm_firewall fw = {
+ .submit = submit,
+ .client = client,
+ .data = data,
+ .pos = start,
+ .end = start+words,
+ .class = *job_class,
+ };
+ bool payload_valid = false;
+ u32 payload;
+ int err;
+
+ while (fw.pos != fw.end) {
+ u32 word, opcode, offset, count, mask, class;
+
+ err = fw_next(&fw, &word);
+ if (err)
+ return err;
+
+ opcode = (word & 0xf0000000) >> 28;
+
+ switch (opcode) {
+ case HOST1X_OPCODE_SETCLASS:
+ offset = word >> 16 & 0xfff;
+ mask = word & 0x3f;
+ class = (word >> 6) & 0x3ff;
+ err = fw_check_class(&fw, class);
+ fw.class = class;
+ *job_class = class;
+ if (!err)
+ err = fw_check_regs_mask(&fw, offset, mask);
+ if (err)
+ dev_warn(client->base.dev,
+ "illegal SETCLASS(offset=0x%x, mask=0x%x, class=0x%x) at word %u",
+ offset, mask, class, fw.pos-1);
+ break;
+ case HOST1X_OPCODE_INCR:
+ offset = (word >> 16) & 0xfff;
+ count = word & 0xffff;
+ err = fw_check_regs_seq(&fw, offset, count, true);
+ if (err)
+ dev_warn(client->base.dev,
+ "illegal INCR(offset=0x%x, count=%u) in class 0x%x at word %u",
+ offset, count, fw.class, fw.pos-1);
+ break;
+ case HOST1X_OPCODE_NONINCR:
+ offset = (word >> 16) & 0xfff;
+ count = word & 0xffff;
+ err = fw_check_regs_seq(&fw, offset, count, false);
+ if (err)
+ dev_warn(client->base.dev,
+ "illegal NONINCR(offset=0x%x, count=%u) in class 0x%x at word %u",
+ offset, count, fw.class, fw.pos-1);
+ break;
+ case HOST1X_OPCODE_MASK:
+ offset = (word >> 16) & 0xfff;
+ mask = word & 0xffff;
+ err = fw_check_regs_mask(&fw, offset, mask);
+ if (err)
+ dev_warn(client->base.dev,
+ "illegal MASK(offset=0x%x, mask=0x%x) in class 0x%x at word %u",
+ offset, mask, fw.class, fw.pos-1);
+ break;
+ case HOST1X_OPCODE_IMM:
+ /* IMM cannot reasonably be used to write a pointer */
+ offset = (word >> 16) & 0xfff;
+ err = fw_check_regs_imm(&fw, offset);
+ if (err)
+ dev_warn(client->base.dev,
+ "illegal IMM(offset=0x%x) in class 0x%x at word %u",
+ offset, fw.class, fw.pos-1);
+ break;
+ case HOST1X_OPCODE_SETPYLD:
+ payload = word & 0xffff;
+ payload_valid = true;
+ break;
+ case HOST1X_OPCODE_INCR_W:
+ if (!payload_valid)
+ return -EINVAL;
+
+ offset = word & 0x3fffff;
+ err = fw_check_regs_seq(&fw, offset, payload, true);
+ if (err)
+ dev_warn(client->base.dev,
+ "illegal INCR_W(offset=0x%x) in class 0x%x at word %u",
+ offset, fw.class, fw.pos-1);
+ break;
+ case HOST1X_OPCODE_NONINCR_W:
+ if (!payload_valid)
+ return -EINVAL;
+
+ offset = word & 0x3fffff;
+ err = fw_check_regs_seq(&fw, offset, payload, false);
+ if (err)
+ dev_warn(client->base.dev,
+ "illegal NONINCR(offset=0x%x) in class 0x%x at word %u",
+ offset, fw.class, fw.pos-1);
+ break;
+ default:
+ dev_warn(client->base.dev, "illegal opcode at word %u",
+ fw.pos-1);
+ return -EINVAL;
+ }
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
new file mode 100644
index 000000000..81991090a
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -0,0 +1,784 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NVIDIA Tegra DRM GEM helper functions
+ *
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
+ *
+ * Based on the GEM/CMA helpers
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_prime.h>
+#include <drm/tegra_drm.h>
+
+#include "drm.h"
+#include "gem.h"
+
+MODULE_IMPORT_NS(DMA_BUF);
+
+static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
+{
+ dma_addr_t next = ~(dma_addr_t)0;
+ unsigned int count = 0, i;
+ struct scatterlist *s;
+
+ for_each_sg(sgl, s, nents, i) {
+ /* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
+ if (!sg_dma_len(s))
+ continue;
+
+ if (sg_dma_address(s) != next) {
+ next = sg_dma_address(s) + sg_dma_len(s);
+ count++;
+ }
+ }
+
+ return count;
+}
+
+static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
+{
+ return sg_dma_count_chunks(sgt->sgl, sgt->nents);
+}
+
+static void tegra_bo_put(struct host1x_bo *bo)
+{
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+
+ drm_gem_object_put(&obj->gem);
+}
+
+static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction direction)
+{
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ struct drm_gem_object *gem = &obj->gem;
+ struct host1x_bo_mapping *map;
+ int err;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&map->ref);
+ map->bo = host1x_bo_get(bo);
+ map->direction = direction;
+ map->dev = dev;
+
+ /*
+ * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
+ */
+ if (gem->import_attach) {
+ struct dma_buf *buf = gem->import_attach->dmabuf;
+
+ map->attach = dma_buf_attach(buf, dev);
+ if (IS_ERR(map->attach)) {
+ err = PTR_ERR(map->attach);
+ goto free;
+ }
+
+ map->sgt = dma_buf_map_attachment(map->attach, direction);
+ if (IS_ERR(map->sgt)) {
+ dma_buf_detach(buf, map->attach);
+ err = PTR_ERR(map->sgt);
+ map->sgt = NULL;
+ goto free;
+ }
+
+ err = sgt_dma_count_chunks(map->sgt);
+ map->size = gem->size;
+
+ goto out;
+ }
+
+ /*
+ * If we don't have a mapping for this buffer yet, return an SG table
+ * so that host1x can do the mapping for us via the DMA API.
+ */
+ map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
+ if (!map->sgt) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ if (obj->pages) {
+ /*
+ * If the buffer object was allocated from the explicit IOMMU
+ * API code paths, construct an SG table from the pages.
+ */
+ err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
+ GFP_KERNEL);
+ if (err < 0)
+ goto free;
+ } else {
+ /*
+ * If the buffer object had no pages allocated and if it was
+ * not imported, it had to be allocated with the DMA API, so
+ * the DMA API helper can be used.
+ */
+ err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
+ if (err < 0)
+ goto free;
+ }
+
+ err = dma_map_sgtable(dev, map->sgt, direction, 0);
+ if (err)
+ goto free_sgt;
+
+out:
+ /*
+ * If we've manually mapped the buffer object through the IOMMU, make sure to return the
+ * existing IOVA address of our mapping.
+ */
+ if (!obj->mm) {
+ map->phys = sg_dma_address(map->sgt->sgl);
+ map->chunks = err;
+ } else {
+ map->phys = obj->iova;
+ map->chunks = 1;
+ }
+
+ map->size = gem->size;
+
+ return map;
+
+free_sgt:
+ sg_free_table(map->sgt);
+free:
+ kfree(map->sgt);
+ kfree(map);
+ return ERR_PTR(err);
+}
+
+static void tegra_bo_unpin(struct host1x_bo_mapping *map)
+{
+ if (map->attach) {
+ dma_buf_unmap_attachment(map->attach, map->sgt, map->direction);
+ dma_buf_detach(map->attach->dmabuf, map->attach);
+ } else {
+ dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
+ sg_free_table(map->sgt);
+ kfree(map->sgt);
+ }
+
+ host1x_bo_put(map->bo);
+ kfree(map);
+}
+
+static void *tegra_bo_mmap(struct host1x_bo *bo)
+{
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ struct iosys_map map;
+ int ret;
+
+ if (obj->vaddr) {
+ return obj->vaddr;
+ } else if (obj->gem.import_attach) {
+ ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
+ return ret ? NULL : map.vaddr;
+ } else {
+ return vmap(obj->pages, obj->num_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ }
+}
+
+static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
+{
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
+
+ if (obj->vaddr)
+ return;
+ else if (obj->gem.import_attach)
+ dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
+ else
+ vunmap(addr);
+}
+
+static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
+{
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+
+ drm_gem_object_get(&obj->gem);
+
+ return bo;
+}
+
+static const struct host1x_bo_ops tegra_bo_ops = {
+ .get = tegra_bo_get,
+ .put = tegra_bo_put,
+ .pin = tegra_bo_pin,
+ .unpin = tegra_bo_unpin,
+ .mmap = tegra_bo_mmap,
+ .munmap = tegra_bo_munmap,
+};
+
+static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
+{
+ int prot = IOMMU_READ | IOMMU_WRITE;
+ int err;
+
+ if (bo->mm)
+ return -EBUSY;
+
+ bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
+ if (!bo->mm)
+ return -ENOMEM;
+
+ mutex_lock(&tegra->mm_lock);
+
+ err = drm_mm_insert_node_generic(&tegra->mm,
+ bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
+ if (err < 0) {
+ dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
+ err);
+ goto unlock;
+ }
+
+ bo->iova = bo->mm->start;
+
+ bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
+ if (!bo->size) {
+ dev_err(tegra->drm->dev, "failed to map buffer\n");
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ mutex_unlock(&tegra->mm_lock);
+
+ return 0;
+
+remove:
+ drm_mm_remove_node(bo->mm);
+unlock:
+ mutex_unlock(&tegra->mm_lock);
+ kfree(bo->mm);
+ return err;
+}
+
+static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
+{
+ if (!bo->mm)
+ return 0;
+
+ mutex_lock(&tegra->mm_lock);
+ iommu_unmap(tegra->domain, bo->iova, bo->size);
+ drm_mm_remove_node(bo->mm);
+ mutex_unlock(&tegra->mm_lock);
+
+ kfree(bo->mm);
+
+ return 0;
+}
+
+static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
+ .free = tegra_bo_free_object,
+ .export = tegra_gem_prime_export,
+ .vm_ops = &tegra_bo_vm_ops,
+};
+
+static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
+ size_t size)
+{
+ struct tegra_bo *bo;
+ int err;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ bo->gem.funcs = &tegra_gem_object_funcs;
+
+ host1x_bo_init(&bo->base, &tegra_bo_ops);
+ size = round_up(size, PAGE_SIZE);
+
+ err = drm_gem_object_init(drm, &bo->gem, size);
+ if (err < 0)
+ goto free;
+
+ err = drm_gem_create_mmap_offset(&bo->gem);
+ if (err < 0)
+ goto release;
+
+ return bo;
+
+release:
+ drm_gem_object_release(&bo->gem);
+free:
+ kfree(bo);
+ return ERR_PTR(err);
+}
+
+static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
+{
+ if (bo->pages) {
+ dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
+ drm_gem_put_pages(&bo->gem, bo->pages, true, true);
+ sg_free_table(bo->sgt);
+ kfree(bo->sgt);
+ } else if (bo->vaddr) {
+ dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
+ }
+}
+
+static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
+{
+ int err;
+
+ bo->pages = drm_gem_get_pages(&bo->gem);
+ if (IS_ERR(bo->pages))
+ return PTR_ERR(bo->pages);
+
+ bo->num_pages = bo->gem.size >> PAGE_SHIFT;
+
+ bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
+ if (IS_ERR(bo->sgt)) {
+ err = PTR_ERR(bo->sgt);
+ goto put_pages;
+ }
+
+ err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
+ if (err)
+ goto free_sgt;
+
+ return 0;
+
+free_sgt:
+ sg_free_table(bo->sgt);
+ kfree(bo->sgt);
+put_pages:
+ drm_gem_put_pages(&bo->gem, bo->pages, false, false);
+ return err;
+}
+
+static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
+{
+ struct tegra_drm *tegra = drm->dev_private;
+ int err;
+
+ if (tegra->domain) {
+ err = tegra_bo_get_pages(drm, bo);
+ if (err < 0)
+ return err;
+
+ err = tegra_bo_iommu_map(tegra, bo);
+ if (err < 0) {
+ tegra_bo_free(drm, bo);
+ return err;
+ }
+ } else {
+ size_t size = bo->gem.size;
+
+ bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!bo->vaddr) {
+ dev_err(drm->dev,
+ "failed to allocate buffer of size %zu\n",
+ size);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
+ unsigned long flags)
+{
+ struct tegra_bo *bo;
+ int err;
+
+ bo = tegra_bo_alloc_object(drm, size);
+ if (IS_ERR(bo))
+ return bo;
+
+ err = tegra_bo_alloc(drm, bo);
+ if (err < 0)
+ goto release;
+
+ if (flags & DRM_TEGRA_GEM_CREATE_TILED)
+ bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
+
+ if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
+ bo->flags |= TEGRA_BO_BOTTOM_UP;
+
+ return bo;
+
+release:
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+ return ERR_PTR(err);
+}
+
+struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
+ struct drm_device *drm,
+ size_t size,
+ unsigned long flags,
+ u32 *handle)
+{
+ struct tegra_bo *bo;
+ int err;
+
+ bo = tegra_bo_create(drm, size, flags);
+ if (IS_ERR(bo))
+ return bo;
+
+ err = drm_gem_handle_create(file, &bo->gem, handle);
+ if (err) {
+ tegra_bo_free_object(&bo->gem);
+ return ERR_PTR(err);
+ }
+
+ drm_gem_object_put(&bo->gem);
+
+ return bo;
+}
+
+static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
+ struct dma_buf *buf)
+{
+ struct tegra_drm *tegra = drm->dev_private;
+ struct dma_buf_attachment *attach;
+ struct tegra_bo *bo;
+ int err;
+
+ bo = tegra_bo_alloc_object(drm, buf->size);
+ if (IS_ERR(bo))
+ return bo;
+
+ attach = dma_buf_attach(buf, drm->dev);
+ if (IS_ERR(attach)) {
+ err = PTR_ERR(attach);
+ goto free;
+ }
+
+ get_dma_buf(buf);
+
+ bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
+ if (IS_ERR(bo->sgt)) {
+ err = PTR_ERR(bo->sgt);
+ goto detach;
+ }
+
+ if (tegra->domain) {
+ err = tegra_bo_iommu_map(tegra, bo);
+ if (err < 0)
+ goto detach;
+ }
+
+ bo->gem.import_attach = attach;
+
+ return bo;
+
+detach:
+ if (!IS_ERR_OR_NULL(bo->sgt))
+ dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
+
+ dma_buf_detach(buf, attach);
+ dma_buf_put(buf);
+free:
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+ return ERR_PTR(err);
+}
+
+void tegra_bo_free_object(struct drm_gem_object *gem)
+{
+ struct tegra_drm *tegra = gem->dev->dev_private;
+ struct host1x_bo_mapping *mapping, *tmp;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+
+ /* remove all mappings of this buffer object from any caches */
+ list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
+ if (mapping->cache)
+ host1x_bo_unpin(mapping);
+ else
+ dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
+ dev_name(mapping->dev));
+ }
+
+ if (tegra->domain)
+ tegra_bo_iommu_unmap(tegra, bo);
+
+ if (gem->import_attach) {
+ dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
+ DMA_TO_DEVICE);
+ drm_prime_gem_destroy(gem, NULL);
+ } else {
+ tegra_bo_free(gem->dev, bo);
+ }
+
+ drm_gem_object_release(gem);
+ kfree(bo);
+}
+
+int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ struct tegra_drm *tegra = drm->dev_private;
+ struct tegra_bo *bo;
+
+ args->pitch = round_up(min_pitch, tegra->pitch_align);
+ args->size = args->pitch * args->height;
+
+ bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
+ &args->handle);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ return 0;
+}
+
+static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *gem = vma->vm_private_data;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+ struct page *page;
+ pgoff_t offset;
+
+ if (!bo->pages)
+ return VM_FAULT_SIGBUS;
+
+ offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+ page = bo->pages[offset];
+
+ return vmf_insert_page(vma, vmf->address, page);
+}
+
+const struct vm_operations_struct tegra_bo_vm_ops = {
+ .fault = tegra_bo_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
+{
+ struct tegra_bo *bo = to_tegra_bo(gem);
+
+ if (!bo->pages) {
+ unsigned long vm_pgoff = vma->vm_pgoff;
+ int err;
+
+ /*
+ * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
+ * and set the vm_pgoff (used as a fake buffer offset by DRM)
+ * to 0 as we want to map the whole buffer.
+ */
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_pgoff = 0;
+
+ err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
+ gem->size);
+ if (err < 0) {
+ drm_gem_vm_close(vma);
+ return err;
+ }
+
+ vma->vm_pgoff = vm_pgoff;
+ } else {
+ pgprot_t prot = vm_get_page_prot(vma->vm_flags);
+
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags &= ~VM_PFNMAP;
+
+ vma->vm_page_prot = pgprot_writecombine(prot);
+ }
+
+ return 0;
+}
+
+int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gem;
+ int err;
+
+ err = drm_gem_mmap(file, vma);
+ if (err < 0)
+ return err;
+
+ gem = vma->vm_private_data;
+
+ return __tegra_gem_mmap(gem, vma);
+}
+
+static struct sg_table *
+tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *gem = attach->dmabuf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+ struct sg_table *sgt;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return NULL;
+
+ if (bo->pages) {
+ if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
+ 0, gem->size, GFP_KERNEL) < 0)
+ goto free;
+ } else {
+ if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
+ gem->size) < 0)
+ goto free;
+ }
+
+ if (dma_map_sgtable(attach->dev, sgt, dir, 0))
+ goto free;
+
+ return sgt;
+
+free:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return NULL;
+}
+
+static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *gem = attach->dmabuf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+
+ if (bo->pages)
+ dma_unmap_sgtable(attach->dev, sgt, dir, 0);
+
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static void tegra_gem_prime_release(struct dma_buf *buf)
+{
+ drm_gem_dmabuf_release(buf);
+}
+
+static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
+ enum dma_data_direction direction)
+{
+ struct drm_gem_object *gem = buf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+ struct drm_device *drm = gem->dev;
+
+ if (bo->pages)
+ dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
+
+ return 0;
+}
+
+static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
+ enum dma_data_direction direction)
+{
+ struct drm_gem_object *gem = buf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+ struct drm_device *drm = gem->dev;
+
+ if (bo->pages)
+ dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gem = buf->priv;
+ int err;
+
+ err = drm_gem_mmap_obj(gem, gem->size, vma);
+ if (err < 0)
+ return err;
+
+ return __tegra_gem_mmap(gem, vma);
+}
+
+static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
+{
+ struct drm_gem_object *gem = buf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+ void *vaddr;
+
+ vaddr = tegra_bo_mmap(&bo->base);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ iosys_map_set_vaddr(map, vaddr);
+
+ return 0;
+}
+
+static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
+{
+ struct drm_gem_object *gem = buf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+
+ tegra_bo_munmap(&bo->base, map->vaddr);
+}
+
+static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
+ .map_dma_buf = tegra_gem_prime_map_dma_buf,
+ .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
+ .release = tegra_gem_prime_release,
+ .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
+ .end_cpu_access = tegra_gem_prime_end_cpu_access,
+ .mmap = tegra_gem_prime_mmap,
+ .vmap = tegra_gem_prime_vmap,
+ .vunmap = tegra_gem_prime_vunmap,
+};
+
+struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
+ int flags)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.exp_name = KBUILD_MODNAME;
+ exp_info.owner = gem->dev->driver->fops->owner;
+ exp_info.ops = &tegra_gem_prime_dmabuf_ops;
+ exp_info.size = gem->size;
+ exp_info.flags = flags;
+ exp_info.priv = gem;
+
+ return drm_gem_dmabuf_export(gem->dev, &exp_info);
+}
+
+struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
+ struct dma_buf *buf)
+{
+ struct tegra_bo *bo;
+
+ if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
+ struct drm_gem_object *gem = buf->priv;
+
+ if (gem->dev == drm) {
+ drm_gem_object_get(gem);
+ return gem;
+ }
+ }
+
+ bo = tegra_bo_import(drm, buf);
+ if (IS_ERR(bo))
+ return ERR_CAST(bo);
+
+ return &bo->gem;
+}
+
+struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
+{
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ gem = drm_gem_object_lookup(file, handle);
+ if (!gem)
+ return NULL;
+
+ bo = to_tegra_bo(gem);
+ return &bo->base;
+}
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
new file mode 100644
index 000000000..cb5146a67
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Tegra host1x GEM implementation
+ *
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ */
+
+#ifndef __HOST1X_GEM_H
+#define __HOST1X_GEM_H
+
+#include <linux/host1x.h>
+
+#include <drm/drm.h>
+#include <drm/drm_gem.h>
+
+#define TEGRA_BO_BOTTOM_UP (1 << 0)
+
+enum tegra_bo_tiling_mode {
+ TEGRA_BO_TILING_MODE_PITCH,
+ TEGRA_BO_TILING_MODE_TILED,
+ TEGRA_BO_TILING_MODE_BLOCK,
+};
+
+enum tegra_bo_sector_layout {
+ TEGRA_BO_SECTOR_LAYOUT_TEGRA,
+ TEGRA_BO_SECTOR_LAYOUT_GPU,
+};
+
+struct tegra_bo_tiling {
+ enum tegra_bo_tiling_mode mode;
+ unsigned long value;
+ enum tegra_bo_sector_layout sector_layout;
+};
+
+struct tegra_bo {
+ struct drm_gem_object gem;
+ struct host1x_bo base;
+ unsigned long flags;
+ struct sg_table *sgt;
+ dma_addr_t iova;
+ void *vaddr;
+
+ struct drm_mm_node *mm;
+ unsigned long num_pages;
+ struct page **pages;
+ /* size of IOMMU mapping */
+ size_t size;
+
+ struct tegra_bo_tiling tiling;
+};
+
+static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
+{
+ return container_of(gem, struct tegra_bo, gem);
+}
+
+static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
+{
+ return container_of(bo, struct tegra_bo, base);
+}
+
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
+ unsigned long flags);
+struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
+ struct drm_device *drm,
+ size_t size,
+ unsigned long flags,
+ u32 *handle);
+void tegra_bo_free_object(struct drm_gem_object *gem);
+int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+extern const struct vm_operations_struct tegra_bo_vm_ops;
+
+int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma);
+int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
+
+struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
+ int flags);
+struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
+ struct dma_buf *buf);
+
+struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
new file mode 100644
index 000000000..e3bb4c99e
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/common.h>
+
+#include "drm.h"
+#include "gem.h"
+#include "gr2d.h"
+
+enum {
+ RST_MC,
+ RST_GR2D,
+ RST_GR2D_MAX,
+};
+
+struct gr2d_soc {
+ unsigned int version;
+};
+
+struct gr2d {
+ struct tegra_drm_client client;
+ struct host1x_channel *channel;
+ struct clk *clk;
+
+ struct reset_control_bulk_data resets[RST_GR2D_MAX];
+ unsigned int nresets;
+
+ const struct gr2d_soc *soc;
+
+ DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS);
+};
+
+static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
+{
+ return container_of(client, struct gr2d, client);
+}
+
+static int gr2d_init(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->host);
+ unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
+ struct gr2d *gr2d = to_gr2d(drm);
+ int err;
+
+ gr2d->channel = host1x_channel_request(client);
+ if (!gr2d->channel)
+ return -ENOMEM;
+
+ client->syncpts[0] = host1x_syncpt_request(client, flags);
+ if (!client->syncpts[0]) {
+ err = -ENOMEM;
+ dev_err(client->dev, "failed to request syncpoint: %d\n", err);
+ goto put;
+ }
+
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
+ dev_err(client->dev, "failed to attach to domain: %d\n", err);
+ goto free;
+ }
+
+ pm_runtime_enable(client->dev);
+ pm_runtime_use_autosuspend(client->dev);
+ pm_runtime_set_autosuspend_delay(client->dev, 200);
+
+ err = tegra_drm_register_client(dev->dev_private, drm);
+ if (err < 0) {
+ dev_err(client->dev, "failed to register client: %d\n", err);
+ goto disable_rpm;
+ }
+
+ return 0;
+
+disable_rpm:
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
+ host1x_client_iommu_detach(client);
+free:
+ host1x_syncpt_put(client->syncpts[0]);
+put:
+ host1x_channel_put(gr2d->channel);
+ return err;
+}
+
+static int gr2d_exit(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = dev->dev_private;
+ struct gr2d *gr2d = to_gr2d(drm);
+ int err;
+
+ err = tegra_drm_unregister_client(tegra, drm);
+ if (err < 0)
+ return err;
+
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
+ host1x_client_iommu_detach(client);
+ host1x_syncpt_put(client->syncpts[0]);
+ host1x_channel_put(gr2d->channel);
+
+ gr2d->channel = NULL;
+
+ return 0;
+}
+
+static const struct host1x_client_ops gr2d_client_ops = {
+ .init = gr2d_init,
+ .exit = gr2d_exit,
+};
+
+static int gr2d_open_channel(struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ struct gr2d *gr2d = to_gr2d(client);
+
+ context->channel = host1x_channel_get(gr2d->channel);
+ if (!context->channel)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void gr2d_close_channel(struct tegra_drm_context *context)
+{
+ host1x_channel_put(context->channel);
+}
+
+static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 offset)
+{
+ struct gr2d *gr2d = dev_get_drvdata(dev);
+
+ switch (class) {
+ case HOST1X_CLASS_HOST1X:
+ if (offset == 0x2b)
+ return 1;
+
+ break;
+
+ case HOST1X_CLASS_GR2D:
+ case HOST1X_CLASS_GR2D_SB:
+ if (offset >= GR2D_NUM_REGS)
+ break;
+
+ if (test_bit(offset, gr2d->addr_regs))
+ return 1;
+
+ break;
+ }
+
+ return 0;
+}
+
+static int gr2d_is_valid_class(u32 class)
+{
+ return (class == HOST1X_CLASS_GR2D ||
+ class == HOST1X_CLASS_GR2D_SB);
+}
+
+static const struct tegra_drm_client_ops gr2d_ops = {
+ .open_channel = gr2d_open_channel,
+ .close_channel = gr2d_close_channel,
+ .is_addr_reg = gr2d_is_addr_reg,
+ .is_valid_class = gr2d_is_valid_class,
+ .submit = tegra_drm_submit,
+};
+
+static const struct gr2d_soc tegra20_gr2d_soc = {
+ .version = 0x20,
+};
+
+static const struct gr2d_soc tegra30_gr2d_soc = {
+ .version = 0x30,
+};
+
+static const struct gr2d_soc tegra114_gr2d_soc = {
+ .version = 0x35,
+};
+
+static const struct of_device_id gr2d_match[] = {
+ { .compatible = "nvidia,tegra114-gr2d", .data = &tegra114_gr2d_soc },
+ { .compatible = "nvidia,tegra30-gr2d", .data = &tegra30_gr2d_soc },
+ { .compatible = "nvidia,tegra20-gr2d", .data = &tegra20_gr2d_soc },
+ { },
+};
+MODULE_DEVICE_TABLE(of, gr2d_match);
+
+static const u32 gr2d_addr_regs[] = {
+ GR2D_UA_BASE_ADDR,
+ GR2D_VA_BASE_ADDR,
+ GR2D_PAT_BASE_ADDR,
+ GR2D_DSTA_BASE_ADDR,
+ GR2D_DSTB_BASE_ADDR,
+ GR2D_DSTC_BASE_ADDR,
+ GR2D_SRCA_BASE_ADDR,
+ GR2D_SRCB_BASE_ADDR,
+ GR2D_PATBASE_ADDR,
+ GR2D_SRC_BASE_ADDR_SB,
+ GR2D_DSTA_BASE_ADDR_SB,
+ GR2D_DSTB_BASE_ADDR_SB,
+ GR2D_UA_BASE_ADDR_SB,
+ GR2D_VA_BASE_ADDR_SB,
+};
+
+static int gr2d_get_resets(struct device *dev, struct gr2d *gr2d)
+{
+ int err;
+
+ gr2d->resets[RST_MC].id = "mc";
+ gr2d->resets[RST_GR2D].id = "2d";
+ gr2d->nresets = RST_GR2D_MAX;
+
+ err = devm_reset_control_bulk_get_optional_exclusive_released(
+ dev, gr2d->nresets, gr2d->resets);
+ if (err) {
+ dev_err(dev, "failed to get reset: %d\n", err);
+ return err;
+ }
+
+ if (WARN_ON(!gr2d->resets[RST_GR2D].rstc))
+ return -ENOENT;
+
+ return 0;
+}
+
+static int gr2d_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct host1x_syncpt **syncpts;
+ struct gr2d *gr2d;
+ unsigned int i;
+ int err;
+
+ gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
+ if (!gr2d)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, gr2d);
+
+ gr2d->soc = of_device_get_match_data(dev);
+
+ syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
+ if (!syncpts)
+ return -ENOMEM;
+
+ gr2d->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(gr2d->clk)) {
+ dev_err(dev, "cannot get clock\n");
+ return PTR_ERR(gr2d->clk);
+ }
+
+ err = gr2d_get_resets(dev, gr2d);
+ if (err)
+ return err;
+
+ INIT_LIST_HEAD(&gr2d->client.base.list);
+ gr2d->client.base.ops = &gr2d_client_ops;
+ gr2d->client.base.dev = dev;
+ gr2d->client.base.class = HOST1X_CLASS_GR2D;
+ gr2d->client.base.syncpts = syncpts;
+ gr2d->client.base.num_syncpts = 1;
+
+ INIT_LIST_HEAD(&gr2d->client.list);
+ gr2d->client.version = gr2d->soc->version;
+ gr2d->client.ops = &gr2d_ops;
+
+ err = devm_tegra_core_dev_init_opp_table_common(dev);
+ if (err)
+ return err;
+
+ err = host1x_client_register(&gr2d->client.base);
+ if (err < 0) {
+ dev_err(dev, "failed to register host1x client: %d\n", err);
+ return err;
+ }
+
+ /* initialize address register map */
+ for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); i++)
+ set_bit(gr2d_addr_regs[i], gr2d->addr_regs);
+
+ return 0;
+}
+
+static int gr2d_remove(struct platform_device *pdev)
+{
+ struct gr2d *gr2d = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&gr2d->client.base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused gr2d_runtime_suspend(struct device *dev)
+{
+ struct gr2d *gr2d = dev_get_drvdata(dev);
+ int err;
+
+ host1x_channel_stop(gr2d->channel);
+ reset_control_bulk_release(gr2d->nresets, gr2d->resets);
+
+ /*
+ * GR2D module shouldn't be reset while hardware is idling, otherwise
+ * host1x's cmdproc will stuck on trying to access any G2 register
+ * after reset. GR2D module could be either hot-reset or reset after
+ * power-gating of the HEG partition. Hence we will put in reset only
+ * the memory client part of the module, the HEG GENPD will take care
+ * of resetting GR2D module across power-gating.
+ *
+ * On Tegra20 there is no HEG partition, but it's okay to have
+ * undetermined h/w state since userspace is expected to reprogram
+ * the state on each job submission anyways.
+ */
+ err = reset_control_acquire(gr2d->resets[RST_MC].rstc);
+ if (err) {
+ dev_err(dev, "failed to acquire MC reset: %d\n", err);
+ goto acquire_reset;
+ }
+
+ err = reset_control_assert(gr2d->resets[RST_MC].rstc);
+ reset_control_release(gr2d->resets[RST_MC].rstc);
+ if (err) {
+ dev_err(dev, "failed to assert MC reset: %d\n", err);
+ goto acquire_reset;
+ }
+
+ clk_disable_unprepare(gr2d->clk);
+
+ return 0;
+
+acquire_reset:
+ reset_control_bulk_acquire(gr2d->nresets, gr2d->resets);
+ reset_control_bulk_deassert(gr2d->nresets, gr2d->resets);
+
+ return err;
+}
+
+static int __maybe_unused gr2d_runtime_resume(struct device *dev)
+{
+ struct gr2d *gr2d = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_bulk_acquire(gr2d->nresets, gr2d->resets);
+ if (err) {
+ dev_err(dev, "failed to acquire reset: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(gr2d->clk);
+ if (err) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto release_reset;
+ }
+
+ usleep_range(2000, 4000);
+
+ /* this is a reset array which deasserts both 2D MC and 2D itself */
+ err = reset_control_bulk_deassert(gr2d->nresets, gr2d->resets);
+ if (err) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(gr2d->clk);
+release_reset:
+ reset_control_bulk_release(gr2d->nresets, gr2d->resets);
+
+ return err;
+}
+
+static const struct dev_pm_ops tegra_gr2d_pm = {
+ SET_RUNTIME_PM_OPS(gr2d_runtime_suspend, gr2d_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+struct platform_driver tegra_gr2d_driver = {
+ .driver = {
+ .name = "tegra-gr2d",
+ .of_match_table = gr2d_match,
+ .pm = &tegra_gr2d_pm,
+ },
+ .probe = gr2d_probe,
+ .remove = gr2d_remove,
+};
diff --git a/drivers/gpu/drm/tegra/gr2d.h b/drivers/gpu/drm/tegra/gr2d.h
new file mode 100644
index 000000000..9b7d66e15
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr2d.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ */
+
+#ifndef TEGRA_GR2D_H
+#define TEGRA_GR2D_H
+
+#define GR2D_UA_BASE_ADDR 0x1a
+#define GR2D_VA_BASE_ADDR 0x1b
+#define GR2D_PAT_BASE_ADDR 0x26
+#define GR2D_DSTA_BASE_ADDR 0x2b
+#define GR2D_DSTB_BASE_ADDR 0x2c
+#define GR2D_DSTC_BASE_ADDR 0x2d
+#define GR2D_SRCA_BASE_ADDR 0x31
+#define GR2D_SRCB_BASE_ADDR 0x32
+#define GR2D_PATBASE_ADDR 0x47
+#define GR2D_SRC_BASE_ADDR_SB 0x48
+#define GR2D_DSTA_BASE_ADDR_SB 0x49
+#define GR2D_DSTB_BASE_ADDR_SB 0x4a
+#define GR2D_UA_BASE_ADDR_SB 0x4b
+#define GR2D_VA_BASE_ADDR_SB 0x4c
+
+#define GR2D_NUM_REGS 0x4d
+
+#endif
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
new file mode 100644
index 000000000..a1fd3113e
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Avionic Design GmbH
+ * Copyright (C) 2013 NVIDIA Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/pmc.h>
+
+#include "drm.h"
+#include "gem.h"
+#include "gr3d.h"
+
+enum {
+ RST_MC,
+ RST_GR3D,
+ RST_MC2,
+ RST_GR3D2,
+ RST_GR3D_MAX,
+};
+
+struct gr3d_soc {
+ unsigned int version;
+ unsigned int num_clocks;
+ unsigned int num_resets;
+};
+
+struct gr3d {
+ struct tegra_drm_client client;
+ struct host1x_channel *channel;
+
+ const struct gr3d_soc *soc;
+ struct clk_bulk_data *clocks;
+ unsigned int nclocks;
+ struct reset_control_bulk_data resets[RST_GR3D_MAX];
+ unsigned int nresets;
+
+ DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
+};
+
+static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
+{
+ return container_of(client, struct gr3d, client);
+}
+
+static int gr3d_init(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->host);
+ unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
+ struct gr3d *gr3d = to_gr3d(drm);
+ int err;
+
+ gr3d->channel = host1x_channel_request(client);
+ if (!gr3d->channel)
+ return -ENOMEM;
+
+ client->syncpts[0] = host1x_syncpt_request(client, flags);
+ if (!client->syncpts[0]) {
+ err = -ENOMEM;
+ dev_err(client->dev, "failed to request syncpoint: %d\n", err);
+ goto put;
+ }
+
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
+ dev_err(client->dev, "failed to attach to domain: %d\n", err);
+ goto free;
+ }
+
+ pm_runtime_enable(client->dev);
+ pm_runtime_use_autosuspend(client->dev);
+ pm_runtime_set_autosuspend_delay(client->dev, 200);
+
+ err = tegra_drm_register_client(dev->dev_private, drm);
+ if (err < 0) {
+ dev_err(client->dev, "failed to register client: %d\n", err);
+ goto disable_rpm;
+ }
+
+ return 0;
+
+disable_rpm:
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
+ host1x_client_iommu_detach(client);
+free:
+ host1x_syncpt_put(client->syncpts[0]);
+put:
+ host1x_channel_put(gr3d->channel);
+ return err;
+}
+
+static int gr3d_exit(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->host);
+ struct gr3d *gr3d = to_gr3d(drm);
+ int err;
+
+ err = tegra_drm_unregister_client(dev->dev_private, drm);
+ if (err < 0)
+ return err;
+
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
+ host1x_client_iommu_detach(client);
+ host1x_syncpt_put(client->syncpts[0]);
+ host1x_channel_put(gr3d->channel);
+
+ gr3d->channel = NULL;
+
+ return 0;
+}
+
+static const struct host1x_client_ops gr3d_client_ops = {
+ .init = gr3d_init,
+ .exit = gr3d_exit,
+};
+
+static int gr3d_open_channel(struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ struct gr3d *gr3d = to_gr3d(client);
+
+ context->channel = host1x_channel_get(gr3d->channel);
+ if (!context->channel)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void gr3d_close_channel(struct tegra_drm_context *context)
+{
+ host1x_channel_put(context->channel);
+}
+
+static int gr3d_is_addr_reg(struct device *dev, u32 class, u32 offset)
+{
+ struct gr3d *gr3d = dev_get_drvdata(dev);
+
+ switch (class) {
+ case HOST1X_CLASS_HOST1X:
+ if (offset == 0x2b)
+ return 1;
+
+ break;
+
+ case HOST1X_CLASS_GR3D:
+ if (offset >= GR3D_NUM_REGS)
+ break;
+
+ if (test_bit(offset, gr3d->addr_regs))
+ return 1;
+
+ break;
+ }
+
+ return 0;
+}
+
+static const struct tegra_drm_client_ops gr3d_ops = {
+ .open_channel = gr3d_open_channel,
+ .close_channel = gr3d_close_channel,
+ .is_addr_reg = gr3d_is_addr_reg,
+ .submit = tegra_drm_submit,
+};
+
+static const struct gr3d_soc tegra20_gr3d_soc = {
+ .version = 0x20,
+ .num_clocks = 1,
+ .num_resets = 2,
+};
+
+static const struct gr3d_soc tegra30_gr3d_soc = {
+ .version = 0x30,
+ .num_clocks = 2,
+ .num_resets = 4,
+};
+
+static const struct gr3d_soc tegra114_gr3d_soc = {
+ .version = 0x35,
+ .num_clocks = 1,
+ .num_resets = 2,
+};
+
+static const struct of_device_id tegra_gr3d_match[] = {
+ { .compatible = "nvidia,tegra114-gr3d", .data = &tegra114_gr3d_soc },
+ { .compatible = "nvidia,tegra30-gr3d", .data = &tegra30_gr3d_soc },
+ { .compatible = "nvidia,tegra20-gr3d", .data = &tegra20_gr3d_soc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_gr3d_match);
+
+static const u32 gr3d_addr_regs[] = {
+ GR3D_IDX_ATTRIBUTE( 0),
+ GR3D_IDX_ATTRIBUTE( 1),
+ GR3D_IDX_ATTRIBUTE( 2),
+ GR3D_IDX_ATTRIBUTE( 3),
+ GR3D_IDX_ATTRIBUTE( 4),
+ GR3D_IDX_ATTRIBUTE( 5),
+ GR3D_IDX_ATTRIBUTE( 6),
+ GR3D_IDX_ATTRIBUTE( 7),
+ GR3D_IDX_ATTRIBUTE( 8),
+ GR3D_IDX_ATTRIBUTE( 9),
+ GR3D_IDX_ATTRIBUTE(10),
+ GR3D_IDX_ATTRIBUTE(11),
+ GR3D_IDX_ATTRIBUTE(12),
+ GR3D_IDX_ATTRIBUTE(13),
+ GR3D_IDX_ATTRIBUTE(14),
+ GR3D_IDX_ATTRIBUTE(15),
+ GR3D_IDX_INDEX_BASE,
+ GR3D_QR_ZTAG_ADDR,
+ GR3D_QR_CTAG_ADDR,
+ GR3D_QR_CZ_ADDR,
+ GR3D_TEX_TEX_ADDR( 0),
+ GR3D_TEX_TEX_ADDR( 1),
+ GR3D_TEX_TEX_ADDR( 2),
+ GR3D_TEX_TEX_ADDR( 3),
+ GR3D_TEX_TEX_ADDR( 4),
+ GR3D_TEX_TEX_ADDR( 5),
+ GR3D_TEX_TEX_ADDR( 6),
+ GR3D_TEX_TEX_ADDR( 7),
+ GR3D_TEX_TEX_ADDR( 8),
+ GR3D_TEX_TEX_ADDR( 9),
+ GR3D_TEX_TEX_ADDR(10),
+ GR3D_TEX_TEX_ADDR(11),
+ GR3D_TEX_TEX_ADDR(12),
+ GR3D_TEX_TEX_ADDR(13),
+ GR3D_TEX_TEX_ADDR(14),
+ GR3D_TEX_TEX_ADDR(15),
+ GR3D_DW_MEMORY_OUTPUT_ADDRESS,
+ GR3D_GLOBAL_SURFADDR( 0),
+ GR3D_GLOBAL_SURFADDR( 1),
+ GR3D_GLOBAL_SURFADDR( 2),
+ GR3D_GLOBAL_SURFADDR( 3),
+ GR3D_GLOBAL_SURFADDR( 4),
+ GR3D_GLOBAL_SURFADDR( 5),
+ GR3D_GLOBAL_SURFADDR( 6),
+ GR3D_GLOBAL_SURFADDR( 7),
+ GR3D_GLOBAL_SURFADDR( 8),
+ GR3D_GLOBAL_SURFADDR( 9),
+ GR3D_GLOBAL_SURFADDR(10),
+ GR3D_GLOBAL_SURFADDR(11),
+ GR3D_GLOBAL_SURFADDR(12),
+ GR3D_GLOBAL_SURFADDR(13),
+ GR3D_GLOBAL_SURFADDR(14),
+ GR3D_GLOBAL_SURFADDR(15),
+ GR3D_GLOBAL_SPILLSURFADDR,
+ GR3D_GLOBAL_SURFOVERADDR( 0),
+ GR3D_GLOBAL_SURFOVERADDR( 1),
+ GR3D_GLOBAL_SURFOVERADDR( 2),
+ GR3D_GLOBAL_SURFOVERADDR( 3),
+ GR3D_GLOBAL_SURFOVERADDR( 4),
+ GR3D_GLOBAL_SURFOVERADDR( 5),
+ GR3D_GLOBAL_SURFOVERADDR( 6),
+ GR3D_GLOBAL_SURFOVERADDR( 7),
+ GR3D_GLOBAL_SURFOVERADDR( 8),
+ GR3D_GLOBAL_SURFOVERADDR( 9),
+ GR3D_GLOBAL_SURFOVERADDR(10),
+ GR3D_GLOBAL_SURFOVERADDR(11),
+ GR3D_GLOBAL_SURFOVERADDR(12),
+ GR3D_GLOBAL_SURFOVERADDR(13),
+ GR3D_GLOBAL_SURFOVERADDR(14),
+ GR3D_GLOBAL_SURFOVERADDR(15),
+ GR3D_GLOBAL_SAMP01SURFADDR( 0),
+ GR3D_GLOBAL_SAMP01SURFADDR( 1),
+ GR3D_GLOBAL_SAMP01SURFADDR( 2),
+ GR3D_GLOBAL_SAMP01SURFADDR( 3),
+ GR3D_GLOBAL_SAMP01SURFADDR( 4),
+ GR3D_GLOBAL_SAMP01SURFADDR( 5),
+ GR3D_GLOBAL_SAMP01SURFADDR( 6),
+ GR3D_GLOBAL_SAMP01SURFADDR( 7),
+ GR3D_GLOBAL_SAMP01SURFADDR( 8),
+ GR3D_GLOBAL_SAMP01SURFADDR( 9),
+ GR3D_GLOBAL_SAMP01SURFADDR(10),
+ GR3D_GLOBAL_SAMP01SURFADDR(11),
+ GR3D_GLOBAL_SAMP01SURFADDR(12),
+ GR3D_GLOBAL_SAMP01SURFADDR(13),
+ GR3D_GLOBAL_SAMP01SURFADDR(14),
+ GR3D_GLOBAL_SAMP01SURFADDR(15),
+ GR3D_GLOBAL_SAMP23SURFADDR( 0),
+ GR3D_GLOBAL_SAMP23SURFADDR( 1),
+ GR3D_GLOBAL_SAMP23SURFADDR( 2),
+ GR3D_GLOBAL_SAMP23SURFADDR( 3),
+ GR3D_GLOBAL_SAMP23SURFADDR( 4),
+ GR3D_GLOBAL_SAMP23SURFADDR( 5),
+ GR3D_GLOBAL_SAMP23SURFADDR( 6),
+ GR3D_GLOBAL_SAMP23SURFADDR( 7),
+ GR3D_GLOBAL_SAMP23SURFADDR( 8),
+ GR3D_GLOBAL_SAMP23SURFADDR( 9),
+ GR3D_GLOBAL_SAMP23SURFADDR(10),
+ GR3D_GLOBAL_SAMP23SURFADDR(11),
+ GR3D_GLOBAL_SAMP23SURFADDR(12),
+ GR3D_GLOBAL_SAMP23SURFADDR(13),
+ GR3D_GLOBAL_SAMP23SURFADDR(14),
+ GR3D_GLOBAL_SAMP23SURFADDR(15),
+};
+
+static int gr3d_power_up_legacy_domain(struct device *dev, const char *name,
+ unsigned int id)
+{
+ struct gr3d *gr3d = dev_get_drvdata(dev);
+ struct reset_control *reset;
+ struct clk *clk;
+ unsigned int i;
+ int err;
+
+ /*
+ * Tegra20 device-tree doesn't specify 3d clock name and there is only
+ * one clock for Tegra20. Tegra30+ device-trees always specified names
+ * for the clocks.
+ */
+ if (gr3d->nclocks == 1) {
+ if (id == TEGRA_POWERGATE_3D1)
+ return 0;
+
+ clk = gr3d->clocks[0].clk;
+ } else {
+ for (i = 0; i < gr3d->nclocks; i++) {
+ if (WARN_ON(!gr3d->clocks[i].id))
+ continue;
+
+ if (!strcmp(gr3d->clocks[i].id, name)) {
+ clk = gr3d->clocks[i].clk;
+ break;
+ }
+ }
+
+ if (WARN_ON(i == gr3d->nclocks))
+ return -EINVAL;
+ }
+
+ /*
+ * We use array of resets, which includes MC resets, and MC
+ * reset shouldn't be asserted while hardware is gated because
+ * MC flushing will fail for gated hardware. Hence for legacy
+ * PD we request the individual reset separately.
+ */
+ reset = reset_control_get_exclusive_released(dev, name);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ err = reset_control_acquire(reset);
+ if (err) {
+ dev_err(dev, "failed to acquire %s reset: %d\n", name, err);
+ } else {
+ err = tegra_powergate_sequence_power_up(id, clk, reset);
+ reset_control_release(reset);
+ }
+
+ reset_control_put(reset);
+ if (err)
+ return err;
+
+ /*
+ * tegra_powergate_sequence_power_up() leaves clocks enabled,
+ * while GENPD not. Hence keep clock-enable balanced.
+ */
+ clk_disable_unprepare(clk);
+
+ return 0;
+}
+
+static void gr3d_del_link(void *link)
+{
+ device_link_del(link);
+}
+
+static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
+{
+ static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL };
+ const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
+ struct device **opp_virt_devs, *pd_dev;
+ struct device_link *link;
+ unsigned int i;
+ int err;
+
+ err = of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells");
+ if (err < 0) {
+ if (err != -ENOENT)
+ return err;
+
+ /*
+ * Older device-trees don't use GENPD. In this case we should
+ * toggle power domain manually.
+ */
+ err = gr3d_power_up_legacy_domain(dev, "3d",
+ TEGRA_POWERGATE_3D);
+ if (err)
+ return err;
+
+ err = gr3d_power_up_legacy_domain(dev, "3d2",
+ TEGRA_POWERGATE_3D1);
+ if (err)
+ return err;
+
+ return 0;
+ }
+
+ /*
+ * The PM domain core automatically attaches a single power domain,
+ * otherwise it skips attaching completely. We have a single domain
+ * on Tegra20 and two domains on Tegra30+.
+ */
+ if (dev->pm_domain)
+ return 0;
+
+ err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs);
+ if (err)
+ return err;
+
+ for (i = 0; opp_genpd_names[i]; i++) {
+ pd_dev = opp_virt_devs[i];
+ if (!pd_dev) {
+ dev_err(dev, "failed to get %s power domain\n",
+ opp_genpd_names[i]);
+ return -EINVAL;
+ }
+
+ link = device_link_add(dev, pd_dev, link_flags);
+ if (!link) {
+ dev_err(dev, "failed to link to %s\n", dev_name(pd_dev));
+ return -EINVAL;
+ }
+
+ err = devm_add_action_or_reset(dev, gr3d_del_link, link);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int gr3d_get_clocks(struct device *dev, struct gr3d *gr3d)
+{
+ int err;
+
+ err = devm_clk_bulk_get_all(dev, &gr3d->clocks);
+ if (err < 0) {
+ dev_err(dev, "failed to get clock: %d\n", err);
+ return err;
+ }
+ gr3d->nclocks = err;
+
+ if (gr3d->nclocks != gr3d->soc->num_clocks) {
+ dev_err(dev, "invalid number of clocks: %u\n", gr3d->nclocks);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int gr3d_get_resets(struct device *dev, struct gr3d *gr3d)
+{
+ int err;
+
+ gr3d->resets[RST_MC].id = "mc";
+ gr3d->resets[RST_MC2].id = "mc2";
+ gr3d->resets[RST_GR3D].id = "3d";
+ gr3d->resets[RST_GR3D2].id = "3d2";
+ gr3d->nresets = gr3d->soc->num_resets;
+
+ err = devm_reset_control_bulk_get_optional_exclusive_released(
+ dev, gr3d->nresets, gr3d->resets);
+ if (err) {
+ dev_err(dev, "failed to get reset: %d\n", err);
+ return err;
+ }
+
+ if (WARN_ON(!gr3d->resets[RST_GR3D].rstc) ||
+ WARN_ON(!gr3d->resets[RST_GR3D2].rstc && gr3d->nresets == 4))
+ return -ENOENT;
+
+ return 0;
+}
+
+static int gr3d_probe(struct platform_device *pdev)
+{
+ struct host1x_syncpt **syncpts;
+ struct gr3d *gr3d;
+ unsigned int i;
+ int err;
+
+ gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
+ if (!gr3d)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, gr3d);
+
+ gr3d->soc = of_device_get_match_data(&pdev->dev);
+
+ syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
+ if (!syncpts)
+ return -ENOMEM;
+
+ err = gr3d_get_clocks(&pdev->dev, gr3d);
+ if (err)
+ return err;
+
+ err = gr3d_get_resets(&pdev->dev, gr3d);
+ if (err)
+ return err;
+
+ err = gr3d_init_power(&pdev->dev, gr3d);
+ if (err)
+ return err;
+
+ INIT_LIST_HEAD(&gr3d->client.base.list);
+ gr3d->client.base.ops = &gr3d_client_ops;
+ gr3d->client.base.dev = &pdev->dev;
+ gr3d->client.base.class = HOST1X_CLASS_GR3D;
+ gr3d->client.base.syncpts = syncpts;
+ gr3d->client.base.num_syncpts = 1;
+
+ INIT_LIST_HEAD(&gr3d->client.list);
+ gr3d->client.version = gr3d->soc->version;
+ gr3d->client.ops = &gr3d_ops;
+
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
+
+ err = host1x_client_register(&gr3d->client.base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ /* initialize address register map */
+ for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++)
+ set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
+
+ return 0;
+}
+
+static int gr3d_remove(struct platform_device *pdev)
+{
+ struct gr3d *gr3d = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&gr3d->client.base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused gr3d_runtime_suspend(struct device *dev)
+{
+ struct gr3d *gr3d = dev_get_drvdata(dev);
+ int err;
+
+ host1x_channel_stop(gr3d->channel);
+
+ err = reset_control_bulk_assert(gr3d->nresets, gr3d->resets);
+ if (err) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ usleep_range(10, 20);
+
+ /*
+ * Older device-trees don't specify MC resets and power-gating can't
+ * be done safely in that case. Hence we will keep the power ungated
+ * for older DTBs. For newer DTBs, GENPD will perform the power-gating.
+ */
+
+ clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks);
+ reset_control_bulk_release(gr3d->nresets, gr3d->resets);
+
+ return 0;
+}
+
+static int __maybe_unused gr3d_runtime_resume(struct device *dev)
+{
+ struct gr3d *gr3d = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_bulk_acquire(gr3d->nresets, gr3d->resets);
+ if (err) {
+ dev_err(dev, "failed to acquire reset: %d\n", err);
+ return err;
+ }
+
+ err = clk_bulk_prepare_enable(gr3d->nclocks, gr3d->clocks);
+ if (err) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto release_reset;
+ }
+
+ err = reset_control_bulk_deassert(gr3d->nresets, gr3d->resets);
+ if (err) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ return 0;
+
+disable_clk:
+ clk_bulk_disable_unprepare(gr3d->nclocks, gr3d->clocks);
+release_reset:
+ reset_control_bulk_release(gr3d->nresets, gr3d->resets);
+
+ return err;
+}
+
+static const struct dev_pm_ops tegra_gr3d_pm = {
+ SET_RUNTIME_PM_OPS(gr3d_runtime_suspend, gr3d_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+struct platform_driver tegra_gr3d_driver = {
+ .driver = {
+ .name = "tegra-gr3d",
+ .of_match_table = tegra_gr3d_match,
+ .pm = &tegra_gr3d_pm,
+ },
+ .probe = gr3d_probe,
+ .remove = gr3d_remove,
+};
diff --git a/drivers/gpu/drm/tegra/gr3d.h b/drivers/gpu/drm/tegra/gr3d.h
new file mode 100644
index 000000000..ca2921b68
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr3d.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ */
+
+#ifndef TEGRA_GR3D_H
+#define TEGRA_GR3D_H
+
+#define GR3D_IDX_ATTRIBUTE(x) (0x100 + (x) * 2)
+#define GR3D_IDX_INDEX_BASE 0x121
+#define GR3D_QR_ZTAG_ADDR 0x415
+#define GR3D_QR_CTAG_ADDR 0x417
+#define GR3D_QR_CZ_ADDR 0x419
+#define GR3D_TEX_TEX_ADDR(x) (0x710 + (x))
+#define GR3D_DW_MEMORY_OUTPUT_ADDRESS 0x904
+#define GR3D_GLOBAL_SURFADDR(x) (0xe00 + (x))
+#define GR3D_GLOBAL_SPILLSURFADDR 0xe2a
+#define GR3D_GLOBAL_SURFOVERADDR(x) (0xe30 + (x))
+#define GR3D_GLOBAL_SAMP01SURFADDR(x) (0xe50 + (x))
+#define GR3D_GLOBAL_SAMP23SURFADDR(x) (0xe60 + (x))
+
+#define GR3D_NUM_REGS 0xe88
+
+#endif
diff --git a/drivers/gpu/drm/tegra/hda.c b/drivers/gpu/drm/tegra/hda.c
new file mode 100644
index 000000000..94245a18a
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hda.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2019 NVIDIA Corporation
+ */
+
+#include <linux/bug.h>
+
+#include <sound/hda_verbs.h>
+
+#include "hda.h"
+
+void tegra_hda_parse_format(unsigned int format, struct tegra_hda_format *fmt)
+{
+ unsigned int mul, div, bits, channels;
+
+ if (format & AC_FMT_TYPE_NON_PCM)
+ fmt->pcm = false;
+ else
+ fmt->pcm = true;
+
+ if (format & AC_FMT_BASE_44K)
+ fmt->sample_rate = 44100;
+ else
+ fmt->sample_rate = 48000;
+
+ mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
+ div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
+
+ fmt->sample_rate *= (mul + 1) / (div + 1);
+
+ switch (format & AC_FMT_BITS_MASK) {
+ case AC_FMT_BITS_8:
+ fmt->bits = 8;
+ break;
+
+ case AC_FMT_BITS_16:
+ fmt->bits = 16;
+ break;
+
+ case AC_FMT_BITS_20:
+ fmt->bits = 20;
+ break;
+
+ case AC_FMT_BITS_24:
+ fmt->bits = 24;
+ break;
+
+ case AC_FMT_BITS_32:
+ fmt->bits = 32;
+ break;
+
+ default:
+ bits = (format & AC_FMT_BITS_MASK) >> AC_FMT_BITS_SHIFT;
+ WARN(1, "invalid number of bits: %#x\n", bits);
+ fmt->bits = 8;
+ break;
+ }
+
+ channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
+
+ /* channels are encoded as n - 1 */
+ fmt->channels = channels + 1;
+}
diff --git a/drivers/gpu/drm/tegra/hda.h b/drivers/gpu/drm/tegra/hda.h
new file mode 100644
index 000000000..77269955a
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hda.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2019 NVIDIA Corporation
+ */
+
+#ifndef DRM_TEGRA_HDA_H
+#define DRM_TEGRA_HDA_H 1
+
+#include <linux/types.h>
+
+struct tegra_hda_format {
+ unsigned int sample_rate;
+ unsigned int channels;
+ unsigned int bits;
+ bool pcm;
+};
+
+void tegra_hda_parse_format(unsigned int format, struct tegra_hda_format *fmt);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
new file mode 100644
index 000000000..bf240767d
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -0,0 +1,1908 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/hdmi.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/common.h>
+#include <sound/hdmi-codec.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "hda.h"
+#include "hdmi.h"
+#include "drm.h"
+#include "dc.h"
+#include "trace.h"
+
+#define HDMI_ELD_BUFFER_SIZE 96
+
+struct tmds_config {
+ unsigned int pclk;
+ u32 pll0;
+ u32 pll1;
+ u32 pe_current;
+ u32 drive_current;
+ u32 peak_current;
+};
+
+struct tegra_hdmi_config {
+ const struct tmds_config *tmds;
+ unsigned int num_tmds;
+
+ unsigned long fuse_override_offset;
+ u32 fuse_override_value;
+
+ bool has_sor_io_peak_current;
+ bool has_hda;
+ bool has_hbr;
+};
+
+struct tegra_hdmi {
+ struct host1x_client client;
+ struct tegra_output output;
+ struct device *dev;
+
+ struct regulator *hdmi;
+ struct regulator *pll;
+ struct regulator *vdd;
+
+ void __iomem *regs;
+ unsigned int irq;
+
+ struct clk *clk_parent;
+ struct clk *clk;
+ struct reset_control *rst;
+
+ const struct tegra_hdmi_config *config;
+
+ unsigned int audio_source;
+ struct tegra_hda_format format;
+
+ unsigned int pixel_clock;
+ bool stereo;
+ bool dvi;
+
+ struct drm_info_list *debugfs_files;
+
+ struct platform_device *audio_pdev;
+ struct mutex audio_lock;
+};
+
+static inline struct tegra_hdmi *
+host1x_client_to_hdmi(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_hdmi, client);
+}
+
+static inline struct tegra_hdmi *to_hdmi(struct tegra_output *output)
+{
+ return container_of(output, struct tegra_hdmi, output);
+}
+
+#define HDMI_AUDIOCLK_FREQ 216000000
+#define HDMI_REKEY_DEFAULT 56
+
+enum {
+ AUTO = 0,
+ SPDIF,
+ HDA,
+};
+
+static inline u32 tegra_hdmi_readl(struct tegra_hdmi *hdmi,
+ unsigned int offset)
+{
+ u32 value = readl(hdmi->regs + (offset << 2));
+
+ trace_hdmi_readl(hdmi->dev, offset, value);
+
+ return value;
+}
+
+static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, u32 value,
+ unsigned int offset)
+{
+ trace_hdmi_writel(hdmi->dev, offset, value);
+ writel(value, hdmi->regs + (offset << 2));
+}
+
+struct tegra_hdmi_audio_config {
+ unsigned int n;
+ unsigned int cts;
+ unsigned int aval;
+};
+
+static const struct tmds_config tegra20_tmds_config[] = {
+ { /* slow pixel clock modes */
+ .pclk = 27000000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
+ SOR_PLL_TX_REG_LOAD(3),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+ PE_CURRENT1(PE_CURRENT_0_0_mA) |
+ PE_CURRENT2(PE_CURRENT_0_0_mA) |
+ PE_CURRENT3(PE_CURRENT_0_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+ },
+ { /* high pixel clock modes */
+ .pclk = UINT_MAX,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+ SOR_PLL_TX_REG_LOAD(3),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
+ PE_CURRENT1(PE_CURRENT_6_0_mA) |
+ PE_CURRENT2(PE_CURRENT_6_0_mA) |
+ PE_CURRENT3(PE_CURRENT_6_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+ },
+};
+
+static const struct tmds_config tegra30_tmds_config[] = {
+ { /* 480p modes */
+ .pclk = 27000000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
+ SOR_PLL_TX_REG_LOAD(0),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+ PE_CURRENT1(PE_CURRENT_0_0_mA) |
+ PE_CURRENT2(PE_CURRENT_0_0_mA) |
+ PE_CURRENT3(PE_CURRENT_0_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+ }, { /* 720p modes */
+ .pclk = 74250000,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+ SOR_PLL_TX_REG_LOAD(0),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+ PE_CURRENT1(PE_CURRENT_5_0_mA) |
+ PE_CURRENT2(PE_CURRENT_5_0_mA) |
+ PE_CURRENT3(PE_CURRENT_5_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+ }, { /* 1080p modes */
+ .pclk = UINT_MAX,
+ .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+ SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(3) |
+ SOR_PLL_TX_REG_LOAD(0),
+ .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+ .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+ PE_CURRENT1(PE_CURRENT_5_0_mA) |
+ PE_CURRENT2(PE_CURRENT_5_0_mA) |
+ PE_CURRENT3(PE_CURRENT_5_0_mA),
+ .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+ DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+ },
+};
+
+static const struct tmds_config tegra114_tmds_config[] = {
+ { /* 480p/576p / 25.2MHz/27MHz modes */
+ .pclk = 27000000,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(0) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(0),
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_0_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+ }, { /* 720p / 74.25MHz modes */
+ .pclk = 74250000,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(1) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+ SOR_PLL_TMDS_TERMADJ(0),
+ .pe_current = PE_CURRENT0(PE_CURRENT_15_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_15_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_15_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_15_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+ }, { /* 1080p / 148.5MHz modes */
+ .pclk = 148500000,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(3) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+ SOR_PLL_TMDS_TERMADJ(0),
+ .pe_current = PE_CURRENT0(PE_CURRENT_10_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_10_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_10_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_10_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_12_400_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_12_400_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_12_400_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_12_400_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+ }, { /* 225/297MHz modes */
+ .pclk = UINT_MAX,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(0xf) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(7)
+ | SOR_PLL_TMDS_TERM_ENABLE,
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_0_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_25_200_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_25_200_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_25_200_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_19_200_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_3_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_3_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_3_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_800_mA),
+ },
+};
+
+static const struct tmds_config tegra124_tmds_config[] = {
+ { /* 480p/576p / 25.2MHz/27MHz modes */
+ .pclk = 27000000,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(0) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(0),
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_0_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+ }, { /* 720p / 74.25MHz modes */
+ .pclk = 74250000,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(1) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+ SOR_PLL_TMDS_TERMADJ(0),
+ .pe_current = PE_CURRENT0(PE_CURRENT_15_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_15_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_15_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_15_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+ }, { /* 1080p / 148.5MHz modes */
+ .pclk = 148500000,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(3) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+ SOR_PLL_TMDS_TERMADJ(0),
+ .pe_current = PE_CURRENT0(PE_CURRENT_10_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_10_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_10_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_10_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_12_400_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_12_400_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_12_400_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_12_400_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+ }, { /* 225/297MHz modes */
+ .pclk = UINT_MAX,
+ .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+ SOR_PLL_VCOCAP(0xf) | SOR_PLL_RESISTORSEL,
+ .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(7)
+ | SOR_PLL_TMDS_TERM_ENABLE,
+ .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+ PE_CURRENT3(PE_CURRENT_0_mA_T114),
+ .drive_current =
+ DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_25_200_mA_T114) |
+ DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_25_200_mA_T114) |
+ DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_25_200_mA_T114) |
+ DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_19_200_mA_T114),
+ .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_3_000_mA) |
+ PEAK_CURRENT_LANE1(PEAK_CURRENT_3_000_mA) |
+ PEAK_CURRENT_LANE2(PEAK_CURRENT_3_000_mA) |
+ PEAK_CURRENT_LANE3(PEAK_CURRENT_0_800_mA),
+ },
+};
+
+static void tegra_hdmi_audio_lock(struct tegra_hdmi *hdmi)
+{
+ mutex_lock(&hdmi->audio_lock);
+ disable_irq(hdmi->irq);
+}
+
+static void tegra_hdmi_audio_unlock(struct tegra_hdmi *hdmi)
+{
+ enable_irq(hdmi->irq);
+ mutex_unlock(&hdmi->audio_lock);
+}
+
+static int
+tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pix_clock,
+ struct tegra_hdmi_audio_config *config)
+{
+ const unsigned int afreq = 128 * audio_freq;
+ const unsigned int min_n = afreq / 1500;
+ const unsigned int max_n = afreq / 300;
+ const unsigned int ideal_n = afreq / 1000;
+ int64_t min_err = (uint64_t)-1 >> 1;
+ unsigned int min_delta = -1;
+ int n;
+
+ memset(config, 0, sizeof(*config));
+ config->n = -1;
+
+ for (n = min_n; n <= max_n; n++) {
+ uint64_t cts_f, aval_f;
+ unsigned int delta;
+ int64_t cts, err;
+
+ /* compute aval in 48.16 fixed point */
+ aval_f = ((int64_t)24000000 << 16) * n;
+ do_div(aval_f, afreq);
+ /* It should round without any rest */
+ if (aval_f & 0xFFFF)
+ continue;
+
+ /* Compute cts in 48.16 fixed point */
+ cts_f = ((int64_t)pix_clock << 16) * n;
+ do_div(cts_f, afreq);
+ /* Round it to the nearest integer */
+ cts = (cts_f & ~0xFFFF) + ((cts_f & BIT(15)) << 1);
+
+ delta = abs(n - ideal_n);
+
+ /* Compute the absolute error */
+ err = abs((int64_t)cts_f - cts);
+ if (err < min_err || (err == min_err && delta < min_delta)) {
+ config->n = n;
+ config->cts = cts >> 16;
+ config->aval = aval_f >> 16;
+ min_delta = delta;
+ min_err = err;
+ }
+ }
+
+ return config->n != -1 ? 0 : -EINVAL;
+}
+
+static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
+{
+ const unsigned int freqs[] = {
+ 32000, 44100, 48000, 88200, 96000, 176400, 192000
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+ unsigned int f = freqs[i];
+ unsigned int eight_half;
+ unsigned int delta;
+ u32 value;
+
+ if (f > 96000)
+ delta = 2;
+ else if (f > 48000)
+ delta = 6;
+ else
+ delta = 9;
+
+ eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128);
+ value = AUDIO_FS_LOW(eight_half - delta) |
+ AUDIO_FS_HIGH(eight_half + delta);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_FS(i));
+ }
+}
+
+static void tegra_hdmi_write_aval(struct tegra_hdmi *hdmi, u32 value)
+{
+ static const struct {
+ unsigned int sample_rate;
+ unsigned int offset;
+ } regs[] = {
+ { 32000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 },
+ { 44100, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 },
+ { 48000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 },
+ { 88200, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 },
+ { 96000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 },
+ { 176400, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 },
+ { 192000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ if (regs[i].sample_rate == hdmi->format.sample_rate) {
+ tegra_hdmi_writel(hdmi, value, regs[i].offset);
+ break;
+ }
+ }
+}
+
+static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi)
+{
+ struct tegra_hdmi_audio_config config;
+ u32 source, value;
+ int err;
+
+ switch (hdmi->audio_source) {
+ case HDA:
+ if (hdmi->config->has_hda)
+ source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+ else
+ return -EINVAL;
+
+ break;
+
+ case SPDIF:
+ if (hdmi->config->has_hda)
+ source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+ else
+ source = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+ break;
+
+ default:
+ if (hdmi->config->has_hda)
+ source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+ else
+ source = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+ break;
+ }
+
+ /*
+ * Tegra30 and later use a slightly modified version of the register
+ * layout to accomodate for changes related to supporting HDA as the
+ * audio input source for HDMI. The source select field has moved to
+ * the SOR_AUDIO_CNTRL0 register, but the error tolerance and frames
+ * per block fields remain in the AUDIO_CNTRL0 register.
+ */
+ if (hdmi->config->has_hda) {
+ /*
+ * Inject null samples into the audio FIFO for every frame in
+ * which the codec did not receive any samples. This applies
+ * to stereo LPCM only.
+ *
+ * XXX: This seems to be a remnant of MCP days when this was
+ * used to work around issues with monitors not being able to
+ * play back system startup sounds early. It is possibly not
+ * needed on Linux at all.
+ */
+ if (hdmi->format.channels == 2)
+ value = SOR_AUDIO_CNTRL0_INJECT_NULLSMPL;
+ else
+ value = 0;
+
+ value |= source;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+ }
+
+ /*
+ * On Tegra20, HDA is not a supported audio source and the source
+ * select field is part of the AUDIO_CNTRL0 register.
+ */
+ value = AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0) |
+ AUDIO_CNTRL0_ERROR_TOLERANCE(6);
+
+ if (!hdmi->config->has_hda)
+ value |= source;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+
+ /*
+ * Advertise support for High Bit-Rate on Tegra114 and later.
+ */
+ if (hdmi->config->has_hbr) {
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
+ value |= SOR_AUDIO_SPARE0_HBR_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
+ }
+
+ err = tegra_hdmi_get_audio_config(hdmi->format.sample_rate,
+ hdmi->pixel_clock, &config);
+ if (err < 0) {
+ dev_err(hdmi->dev,
+ "cannot set audio to %u Hz at %u Hz pixel clock\n",
+ hdmi->format.sample_rate, hdmi->pixel_clock);
+ return err;
+ }
+
+ dev_dbg(hdmi->dev, "audio: pixclk=%u, n=%u, cts=%u, aval=%u\n",
+ hdmi->pixel_clock, config.n, config.cts, config.aval);
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL);
+
+ value = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE |
+ AUDIO_N_VALUE(config.n - 1);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
+
+ tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config.n) | ACR_ENABLE,
+ HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+
+ tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config.cts),
+ HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+
+ value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_AUDIO_N);
+ value &= ~AUDIO_N_RESETF;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
+
+ if (hdmi->config->has_hda)
+ tegra_hdmi_write_aval(hdmi, config.aval);
+
+ tegra_hdmi_setup_audio_fs_tables(hdmi);
+
+ return 0;
+}
+
+static void tegra_hdmi_disable_audio(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value &= ~GENERIC_CTRL_AUDIO;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_hdmi_enable_audio(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value |= GENERIC_CTRL_AUDIO;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_hdmi_write_eld(struct tegra_hdmi *hdmi)
+{
+ size_t length = drm_eld_size(hdmi->output.connector.eld), i;
+ u32 value;
+
+ for (i = 0; i < length; i++)
+ tegra_hdmi_writel(hdmi, i << 8 | hdmi->output.connector.eld[i],
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
+
+ /*
+ * The HDA codec will always report an ELD buffer size of 96 bytes and
+ * the HDA codec driver will check that each byte read from the buffer
+ * is valid. Therefore every byte must be written, even if no 96 bytes
+ * were parsed from EDID.
+ */
+ for (i = length; i < HDMI_ELD_BUFFER_SIZE; i++)
+ tegra_hdmi_writel(hdmi, i << 8 | 0,
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
+
+ value = SOR_AUDIO_HDA_PRESENSE_VALID | SOR_AUDIO_HDA_PRESENSE_PRESENT;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+}
+
+static inline u32 tegra_hdmi_subpack(const u8 *ptr, size_t size)
+{
+ u32 value = 0;
+ size_t i;
+
+ for (i = size; i > 0; i--)
+ value = (value << 8) | ptr[i - 1];
+
+ return value;
+}
+
+static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data,
+ size_t size)
+{
+ const u8 *ptr = data;
+ unsigned long offset;
+ size_t i, j;
+ u32 value;
+
+ switch (ptr[0]) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ offset = HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER;
+ break;
+
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ offset = HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER;
+ break;
+
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ offset = HDMI_NV_PDISP_HDMI_GENERIC_HEADER;
+ break;
+
+ default:
+ dev_err(hdmi->dev, "unsupported infoframe type: %02x\n",
+ ptr[0]);
+ return;
+ }
+
+ value = INFOFRAME_HEADER_TYPE(ptr[0]) |
+ INFOFRAME_HEADER_VERSION(ptr[1]) |
+ INFOFRAME_HEADER_LEN(ptr[2]);
+ tegra_hdmi_writel(hdmi, value, offset);
+ offset++;
+
+ /*
+ * Each subpack contains 7 bytes, divided into:
+ * - subpack_low: bytes 0 - 3
+ * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
+ */
+ for (i = 3, j = 0; i < size; i += 7, j += 8) {
+ size_t rem = size - i, num = min_t(size_t, rem, 4);
+
+ value = tegra_hdmi_subpack(&ptr[i], num);
+ tegra_hdmi_writel(hdmi, value, offset++);
+
+ num = min_t(size_t, rem - num, 3);
+
+ value = tegra_hdmi_subpack(&ptr[i + 4], num);
+ tegra_hdmi_writel(hdmi, value, offset++);
+ }
+}
+
+static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ struct hdmi_avi_infoframe frame;
+ u8 buffer[17];
+ ssize_t err;
+
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+ &hdmi->output.connector, mode);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to pack AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ tegra_hdmi_write_infopack(hdmi, buffer, err);
+}
+
+static void tegra_hdmi_disable_avi_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ value &= ~INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_enable_avi_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ value |= INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+ struct hdmi_audio_infoframe frame;
+ u8 buffer[14];
+ ssize_t err;
+
+ err = hdmi_audio_infoframe_init(&frame);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n",
+ err);
+ return;
+ }
+
+ frame.channels = hdmi->format.channels;
+
+ err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to pack audio infoframe: %zd\n",
+ err);
+ return;
+ }
+
+ /*
+ * The audio infoframe has only one set of subpack registers, so the
+ * infoframe needs to be truncated. One set of subpack registers can
+ * contain 7 bytes. Including the 3 byte header only the first 10
+ * bytes can be programmed.
+ */
+ tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err));
+}
+
+static void tegra_hdmi_disable_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ value &= ~INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_enable_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ value |= INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+ struct hdmi_vendor_infoframe frame;
+ u8 buffer[10];
+ ssize_t err;
+
+ hdmi_vendor_infoframe_init(&frame);
+ frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING;
+
+ err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to pack vendor infoframe: %zd\n",
+ err);
+ return;
+ }
+
+ tegra_hdmi_write_infopack(hdmi, buffer, err);
+}
+
+static void tegra_hdmi_disable_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value &= ~GENERIC_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_hdmi_enable_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value |= GENERIC_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
+ const struct tmds_config *tmds)
+{
+ u32 value;
+
+ tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0);
+ tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
+ tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
+
+ tegra_hdmi_writel(hdmi, tmds->drive_current,
+ HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+
+ value = tegra_hdmi_readl(hdmi, hdmi->config->fuse_override_offset);
+ value |= hdmi->config->fuse_override_value;
+ tegra_hdmi_writel(hdmi, value, hdmi->config->fuse_override_offset);
+
+ if (hdmi->config->has_sor_io_peak_current)
+ tegra_hdmi_writel(hdmi, tmds->peak_current,
+ HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
+}
+
+static int tegra_hdmi_reconfigure_audio(struct tegra_hdmi *hdmi)
+{
+ int err;
+
+ err = tegra_hdmi_setup_audio(hdmi);
+ if (err < 0) {
+ tegra_hdmi_disable_audio_infoframe(hdmi);
+ tegra_hdmi_disable_audio(hdmi);
+ } else {
+ tegra_hdmi_setup_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio(hdmi);
+ }
+
+ return err;
+}
+
+static bool tegra_output_is_hdmi(struct tegra_output *output)
+{
+ struct edid *edid;
+
+ if (!output->connector.edid_blob_ptr)
+ return false;
+
+ edid = (struct edid *)output->connector.edid_blob_ptr->data;
+
+ return drm_detect_hdmi_monitor(edid);
+}
+
+static enum drm_connector_status
+tegra_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ enum drm_connector_status status;
+
+ status = tegra_output_connector_detect(connector, force);
+ if (status == connector_status_connected)
+ return status;
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+ return status;
+}
+
+#define DEBUGFS_REG32(_name) { .name = #_name, .offset = _name }
+
+static const struct debugfs_reg32 tegra_hdmi_regs[] = {
+ DEBUGFS_REG32(HDMI_CTXSW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_STATE0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_STATE1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_STATE2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_AN_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_AN_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CN_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CN_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CMODE),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_RI),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CS_MSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_RG_HDCP_CS_LSB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_EMU0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_EMU1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_EMU2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_STATUS),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_HEADER),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GCP_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GCP_STATUS),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_GCP_SUBPACK),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_EMU0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_EMU1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_EMU1_RDATA),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_SPARE),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CAP),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_PWR),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_TEST),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_PLL0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_PLL1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_PLL2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CSTM),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_LVDS),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CRCA),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CRCB),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_BLANK),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_CTL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(0)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(1)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(2)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(3)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(4)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(5)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(6)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(7)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(8)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(9)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(10)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(11)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(12)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(13)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(14)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_SEQ_INST(15)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_VCRCA0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_VCRCA1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CCRCA0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_CCRCA1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_EDATAA0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_EDATAA1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_COUNTA0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_COUNTA1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_DEBUGA0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_DEBUGA1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_TRIG),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_MSCHECK),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_DEBUG0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_DEBUG1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_DEBUG2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(0)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(1)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(2)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(3)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(4)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(5)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_FS(6)),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_THRESHOLD),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_CNTRL0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_AUDIO_N),
+ DEBUGFS_REG32(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_REFCLK),
+ DEBUGFS_REG32(HDMI_NV_PDISP_CRC_CONTROL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_INPUT_CONTROL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SCRATCH),
+ DEBUGFS_REG32(HDMI_NV_PDISP_PE_CURRENT),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_CTRL),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_DEBUG0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_DEBUG1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_DEBUG2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_HDCP_KEY_0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_HDCP_KEY_1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_HDCP_KEY_2),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_HDCP_KEY_3),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG),
+ DEBUGFS_REG32(HDMI_NV_PDISP_KEY_SKEY_INDEX),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_SPARE0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE),
+ DEBUGFS_REG32(HDMI_NV_PDISP_INT_STATUS),
+ DEBUGFS_REG32(HDMI_NV_PDISP_INT_MASK),
+ DEBUGFS_REG32(HDMI_NV_PDISP_INT_ENABLE),
+ DEBUGFS_REG32(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT),
+};
+
+static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_hdmi *hdmi = node->info_ent->data;
+ struct drm_crtc *crtc = hdmi->output.encoder.crtc;
+ struct drm_device *drm = node->minor->dev;
+ unsigned int i;
+ int err = 0;
+
+ drm_modeset_lock_all(drm);
+
+ if (!crtc || !crtc->state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_hdmi_regs); i++) {
+ unsigned int offset = tegra_hdmi_regs[i].offset;
+
+ seq_printf(s, "%-56s %#05x %08x\n", tegra_hdmi_regs[i].name,
+ offset, tegra_hdmi_readl(hdmi, offset));
+ }
+
+unlock:
+ drm_modeset_unlock_all(drm);
+ return err;
+}
+
+static struct drm_info_list debugfs_files[] = {
+ { "regs", tegra_hdmi_show_regs, 0, NULL },
+};
+
+static int tegra_hdmi_late_register(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ unsigned int i, count = ARRAY_SIZE(debugfs_files);
+ struct drm_minor *minor = connector->dev->primary;
+ struct dentry *root = connector->debugfs_entry;
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+
+ hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!hdmi->debugfs_files)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++)
+ hdmi->debugfs_files[i].data = hdmi;
+
+ drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor);
+
+ return 0;
+}
+
+static void tegra_hdmi_early_unregister(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ struct drm_minor *minor = connector->dev->primary;
+ unsigned int count = ARRAY_SIZE(debugfs_files);
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+
+ drm_debugfs_remove_files(hdmi->debugfs_files, count, minor);
+ kfree(hdmi->debugfs_files);
+ hdmi->debugfs_files = NULL;
+}
+
+static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .detect = tegra_hdmi_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = tegra_output_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = tegra_hdmi_late_register,
+ .early_unregister = tegra_hdmi_early_unregister,
+};
+
+static enum drm_mode_status
+tegra_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ unsigned long pclk = mode->clock * 1000;
+ enum drm_mode_status status = MODE_OK;
+ struct clk *parent;
+ long err;
+
+ parent = clk_get_parent(hdmi->clk_parent);
+
+ err = clk_round_rate(parent, pclk * 4);
+ if (err <= 0)
+ status = MODE_NOCLOCK;
+
+ return status;
+}
+
+static const struct drm_connector_helper_funcs
+tegra_hdmi_connector_helper_funcs = {
+ .get_modes = tegra_output_connector_get_modes,
+ .mode_valid = tegra_hdmi_connector_mode_valid,
+};
+
+static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ u32 value;
+ int err;
+
+ tegra_hdmi_audio_lock(hdmi);
+
+ /*
+ * The following accesses registers of the display controller, so make
+ * sure it's only executed when the output is attached to one.
+ */
+ if (dc) {
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~HDMI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+ }
+
+ if (!hdmi->dvi) {
+ if (hdmi->stereo)
+ tegra_hdmi_disable_stereo_infoframe(hdmi);
+
+ tegra_hdmi_disable_audio_infoframe(hdmi);
+ tegra_hdmi_disable_avi_infoframe(hdmi);
+ tegra_hdmi_disable_audio(hdmi);
+ }
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE);
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK);
+
+ hdmi->pixel_clock = 0;
+
+ tegra_hdmi_audio_unlock(hdmi);
+
+ err = host1x_client_suspend(&hdmi->client);
+ if (err < 0)
+ dev_err(hdmi->dev, "failed to suspend: %d\n", err);
+}
+
+static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+ unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ unsigned int pulse_start, div82;
+ int retries = 1000;
+ u32 value;
+ int err;
+
+ err = host1x_client_resume(&hdmi->client);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to resume: %d\n", err);
+ return;
+ }
+
+ tegra_hdmi_audio_lock(hdmi);
+
+ /*
+ * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+ * is used for interoperability between the HDA codec driver and the
+ * HDMI driver.
+ */
+ tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_ENABLE);
+ tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_MASK);
+
+ hdmi->pixel_clock = mode->clock * 1000;
+ h_sync_width = mode->hsync_end - mode->hsync_start;
+ h_back_porch = mode->htotal - mode->hsync_end;
+ h_front_porch = mode->hsync_start - mode->hdisplay;
+
+ err = dev_pm_opp_set_rate(hdmi->dev, hdmi->pixel_clock);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
+ err);
+ }
+
+ DRM_DEBUG_KMS("HDMI clock rate: %lu Hz\n", clk_get_rate(hdmi->clk));
+
+ /* power up sequence */
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
+ value &= ~SOR_PLL_PDBG;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_PLL0);
+
+ usleep_range(10, 20);
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
+ value &= ~SOR_PLL_PWR;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_PLL0);
+
+ tegra_dc_writel(dc, VSYNC_H_POSITION(1),
+ DC_DISP_DISP_TIMING_OPTIONS);
+ tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE_888,
+ DC_DISP_DISP_COLOR_CONTROL);
+
+ /* video_preamble uses h_pulse2 */
+ pulse_start = 1 + h_sync_width + h_back_porch - 10;
+
+ tegra_dc_writel(dc, H_PULSE2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+
+ value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
+ PULSE_LAST_END_A;
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
+
+ value = PULSE_START(pulse_start) | PULSE_END(pulse_start + 8);
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
+
+ value = VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_START(0x200) |
+ VSYNC_WINDOW_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+
+ if (dc->pipe)
+ value = HDMI_SRC_DISPLAYB;
+ else
+ value = HDMI_SRC_DISPLAYA;
+
+ if ((mode->hdisplay == 720) && ((mode->vdisplay == 480) ||
+ (mode->vdisplay == 576)))
+ tegra_hdmi_writel(hdmi,
+ value | ARM_VIDEO_RANGE_FULL,
+ HDMI_NV_PDISP_INPUT_CONTROL);
+ else
+ tegra_hdmi_writel(hdmi,
+ value | ARM_VIDEO_RANGE_LIMITED,
+ HDMI_NV_PDISP_INPUT_CONTROL);
+
+ div82 = clk_get_rate(hdmi->clk) / 1000000 * 4;
+ value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
+
+ hdmi->dvi = !tegra_output_is_hdmi(output);
+ if (!hdmi->dvi) {
+ /*
+ * Make sure that the audio format has been configured before
+ * enabling audio, otherwise we may try to divide by zero.
+ */
+ if (hdmi->format.sample_rate > 0) {
+ err = tegra_hdmi_setup_audio(hdmi);
+ if (err < 0)
+ hdmi->dvi = true;
+ }
+ }
+
+ if (hdmi->config->has_hda)
+ tegra_hdmi_write_eld(hdmi);
+
+ rekey = HDMI_REKEY_DEFAULT;
+ value = HDMI_CTRL_REKEY(rekey);
+ value |= HDMI_CTRL_MAX_AC_PACKET((h_sync_width + h_back_porch +
+ h_front_porch - rekey - 18) / 32);
+
+ if (!hdmi->dvi)
+ value |= HDMI_CTRL_ENABLE;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
+
+ if (!hdmi->dvi) {
+ tegra_hdmi_setup_avi_infoframe(hdmi, mode);
+ tegra_hdmi_setup_audio_infoframe(hdmi);
+
+ if (hdmi->stereo)
+ tegra_hdmi_setup_stereo_infoframe(hdmi);
+ }
+
+ /* TMDS CONFIG */
+ for (i = 0; i < hdmi->config->num_tmds; i++) {
+ if (hdmi->pixel_clock <= hdmi->config->tmds[i].pclk) {
+ tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]);
+ break;
+ }
+ }
+
+ tegra_hdmi_writel(hdmi,
+ SOR_SEQ_PU_PC(0) |
+ SOR_SEQ_PU_PC_ALT(0) |
+ SOR_SEQ_PD_PC(8) |
+ SOR_SEQ_PD_PC_ALT(8),
+ HDMI_NV_PDISP_SOR_SEQ_CTL);
+
+ value = SOR_SEQ_INST_WAIT_TIME(1) |
+ SOR_SEQ_INST_WAIT_UNITS_VSYNC |
+ SOR_SEQ_INST_HALT |
+ SOR_SEQ_INST_PIN_A_LOW |
+ SOR_SEQ_INST_PIN_B_LOW |
+ SOR_SEQ_INST_DRIVE_PWM_OUT_LO;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0));
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8));
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_CSTM);
+ value &= ~SOR_CSTM_ROTCLK(~0);
+ value |= SOR_CSTM_ROTCLK(2);
+ value |= SOR_CSTM_PLLDIV;
+ value &= ~SOR_CSTM_LVDS_ENABLE;
+ value &= ~SOR_CSTM_MODE_MASK;
+ value |= SOR_CSTM_MODE_TMDS;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
+
+ /* start SOR */
+ tegra_hdmi_writel(hdmi,
+ SOR_PWR_NORMAL_STATE_PU |
+ SOR_PWR_NORMAL_START_NORMAL |
+ SOR_PWR_SAFE_STATE_PD |
+ SOR_PWR_SETTING_NEW_TRIGGER,
+ HDMI_NV_PDISP_SOR_PWR);
+ tegra_hdmi_writel(hdmi,
+ SOR_PWR_NORMAL_STATE_PU |
+ SOR_PWR_NORMAL_START_NORMAL |
+ SOR_PWR_SAFE_STATE_PD |
+ SOR_PWR_SETTING_NEW_DONE,
+ HDMI_NV_PDISP_SOR_PWR);
+
+ do {
+ BUG_ON(--retries < 0);
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR);
+ } while (value & SOR_PWR_SETTING_NEW_PENDING);
+
+ value = SOR_STATE_ASY_CRCMODE_COMPLETE |
+ SOR_STATE_ASY_OWNER_HEAD0 |
+ SOR_STATE_ASY_SUBOWNER_BOTH |
+ SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A |
+ SOR_STATE_ASY_DEPOL_POS;
+
+ /* setup sync polarities */
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL_POS;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL_NEG;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL_POS;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL_NEG;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE2);
+
+ value = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE1);
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+ tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0);
+ tegra_hdmi_writel(hdmi, value | SOR_STATE_ATTACHED,
+ HDMI_NV_PDISP_SOR_STATE1);
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= HDMI_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+
+ if (!hdmi->dvi) {
+ tegra_hdmi_enable_avi_infoframe(hdmi);
+ tegra_hdmi_enable_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio(hdmi);
+
+ if (hdmi->stereo)
+ tegra_hdmi_enable_stereo_infoframe(hdmi);
+ }
+
+ /* TODO: add HDCP support */
+
+ tegra_hdmi_audio_unlock(hdmi);
+}
+
+static int
+tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
+ unsigned long pclk = crtc_state->mode.clock * 1000;
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ int err;
+
+ err = tegra_dc_state_setup_clock(dc, crtc_state, hdmi->clk_parent,
+ pclk, 0);
+ if (err < 0) {
+ dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
+ return err;
+ }
+
+ return err;
+}
+
+static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
+ .disable = tegra_hdmi_encoder_disable,
+ .enable = tegra_hdmi_encoder_enable,
+ .atomic_check = tegra_hdmi_encoder_atomic_check,
+};
+
+static int tegra_hdmi_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct tegra_hdmi *hdmi = data;
+ int ret = 0;
+
+ tegra_hdmi_audio_lock(hdmi);
+
+ hdmi->format.sample_rate = hparms->sample_rate;
+ hdmi->format.channels = hparms->channels;
+
+ if (hdmi->pixel_clock && !hdmi->dvi)
+ ret = tegra_hdmi_reconfigure_audio(hdmi);
+
+ tegra_hdmi_audio_unlock(hdmi);
+
+ return ret;
+}
+
+static int tegra_hdmi_audio_startup(struct device *dev, void *data)
+{
+ struct tegra_hdmi *hdmi = data;
+ int ret;
+
+ ret = host1x_client_resume(&hdmi->client);
+ if (ret < 0)
+ dev_err(hdmi->dev, "failed to resume: %d\n", ret);
+
+ return ret;
+}
+
+static void tegra_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct tegra_hdmi *hdmi = data;
+ int ret;
+
+ tegra_hdmi_audio_lock(hdmi);
+
+ hdmi->format.sample_rate = 0;
+ hdmi->format.channels = 0;
+
+ tegra_hdmi_audio_unlock(hdmi);
+
+ ret = host1x_client_suspend(&hdmi->client);
+ if (ret < 0)
+ dev_err(hdmi->dev, "failed to suspend: %d\n", ret);
+}
+
+static const struct hdmi_codec_ops tegra_hdmi_codec_ops = {
+ .hw_params = tegra_hdmi_hw_params,
+ .audio_startup = tegra_hdmi_audio_startup,
+ .audio_shutdown = tegra_hdmi_audio_shutdown,
+};
+
+static int tegra_hdmi_codec_register(struct tegra_hdmi *hdmi)
+{
+ struct hdmi_codec_pdata codec_data = {};
+
+ if (hdmi->config->has_hda)
+ return 0;
+
+ codec_data.ops = &tegra_hdmi_codec_ops;
+ codec_data.data = hdmi;
+ codec_data.spdif = 1;
+
+ hdmi->audio_pdev = platform_device_register_data(hdmi->dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ if (IS_ERR(hdmi->audio_pdev))
+ return PTR_ERR(hdmi->audio_pdev);
+
+ hdmi->format.channels = 2;
+
+ return 0;
+}
+
+static void tegra_hdmi_codec_unregister(struct tegra_hdmi *hdmi)
+{
+ if (hdmi->audio_pdev)
+ platform_device_unregister(hdmi->audio_pdev);
+}
+
+static int tegra_hdmi_init(struct host1x_client *client)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ struct drm_device *drm = dev_get_drvdata(client->host);
+ int err;
+
+ hdmi->output.dev = client->dev;
+
+ drm_connector_init_with_ddc(drm, &hdmi->output.connector,
+ &tegra_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->output.ddc);
+ drm_connector_helper_add(&hdmi->output.connector,
+ &tegra_hdmi_connector_helper_funcs);
+ hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
+
+ drm_simple_encoder_init(drm, &hdmi->output.encoder,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&hdmi->output.encoder,
+ &tegra_hdmi_encoder_helper_funcs);
+
+ drm_connector_attach_encoder(&hdmi->output.connector,
+ &hdmi->output.encoder);
+ drm_connector_register(&hdmi->output.connector);
+
+ err = tegra_output_init(drm, &hdmi->output);
+ if (err < 0) {
+ dev_err(client->dev, "failed to initialize output: %d\n", err);
+ return err;
+ }
+
+ hdmi->output.encoder.possible_crtcs = 0x3;
+
+ err = regulator_enable(hdmi->hdmi);
+ if (err < 0) {
+ dev_err(client->dev, "failed to enable HDMI regulator: %d\n",
+ err);
+ goto output_exit;
+ }
+
+ err = regulator_enable(hdmi->pll);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
+ goto disable_hdmi;
+ }
+
+ err = regulator_enable(hdmi->vdd);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
+ goto disable_pll;
+ }
+
+ err = tegra_hdmi_codec_register(hdmi);
+ if (err < 0) {
+ dev_err(hdmi->dev, "failed to register audio codec: %d\n", err);
+ goto disable_vdd;
+ }
+
+ return 0;
+
+disable_vdd:
+ regulator_disable(hdmi->vdd);
+disable_pll:
+ regulator_disable(hdmi->pll);
+disable_hdmi:
+ regulator_disable(hdmi->hdmi);
+output_exit:
+ tegra_output_exit(&hdmi->output);
+
+ return err;
+}
+
+static int tegra_hdmi_exit(struct host1x_client *client)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+
+ tegra_hdmi_codec_unregister(hdmi);
+
+ tegra_output_exit(&hdmi->output);
+
+ regulator_disable(hdmi->vdd);
+ regulator_disable(hdmi->pll);
+ regulator_disable(hdmi->hdmi);
+
+ return 0;
+}
+
+static int tegra_hdmi_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = reset_control_assert(hdmi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(hdmi->clk);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_hdmi_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(hdmi->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto put_rpm;
+ }
+
+ usleep_range(1000, 2000);
+
+ err = reset_control_deassert(hdmi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(hdmi->clk);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
+static const struct host1x_client_ops hdmi_client_ops = {
+ .init = tegra_hdmi_init,
+ .exit = tegra_hdmi_exit,
+ .suspend = tegra_hdmi_runtime_suspend,
+ .resume = tegra_hdmi_runtime_resume,
+};
+
+static const struct tegra_hdmi_config tegra20_hdmi_config = {
+ .tmds = tegra20_tmds_config,
+ .num_tmds = ARRAY_SIZE(tegra20_tmds_config),
+ .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
+ .fuse_override_value = 1 << 31,
+ .has_sor_io_peak_current = false,
+ .has_hda = false,
+ .has_hbr = false,
+};
+
+static const struct tegra_hdmi_config tegra30_hdmi_config = {
+ .tmds = tegra30_tmds_config,
+ .num_tmds = ARRAY_SIZE(tegra30_tmds_config),
+ .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
+ .fuse_override_value = 1 << 31,
+ .has_sor_io_peak_current = false,
+ .has_hda = true,
+ .has_hbr = false,
+};
+
+static const struct tegra_hdmi_config tegra114_hdmi_config = {
+ .tmds = tegra114_tmds_config,
+ .num_tmds = ARRAY_SIZE(tegra114_tmds_config),
+ .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
+ .fuse_override_value = 1 << 31,
+ .has_sor_io_peak_current = true,
+ .has_hda = true,
+ .has_hbr = true,
+};
+
+static const struct tegra_hdmi_config tegra124_hdmi_config = {
+ .tmds = tegra124_tmds_config,
+ .num_tmds = ARRAY_SIZE(tegra124_tmds_config),
+ .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
+ .fuse_override_value = 1 << 31,
+ .has_sor_io_peak_current = true,
+ .has_hda = true,
+ .has_hbr = true,
+};
+
+static const struct of_device_id tegra_hdmi_of_match[] = {
+ { .compatible = "nvidia,tegra124-hdmi", .data = &tegra124_hdmi_config },
+ { .compatible = "nvidia,tegra114-hdmi", .data = &tegra114_hdmi_config },
+ { .compatible = "nvidia,tegra30-hdmi", .data = &tegra30_hdmi_config },
+ { .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match);
+
+static irqreturn_t tegra_hdmi_irq(int irq, void *data)
+{
+ struct tegra_hdmi *hdmi = data;
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_INT_STATUS);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_INT_STATUS);
+
+ if (value & INT_CODEC_SCRATCH0) {
+ unsigned int format;
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
+
+ if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
+ format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
+
+ tegra_hda_parse_format(format, &hdmi->format);
+ tegra_hdmi_reconfigure_audio(hdmi);
+ } else {
+ tegra_hdmi_disable_audio_infoframe(hdmi);
+ tegra_hdmi_disable_audio(hdmi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_hdmi_probe(struct platform_device *pdev)
+{
+ struct tegra_hdmi *hdmi;
+ struct resource *regs;
+ int err;
+
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->config = of_device_get_match_data(&pdev->dev);
+ hdmi->dev = &pdev->dev;
+
+ hdmi->audio_source = AUTO;
+ hdmi->stereo = false;
+ hdmi->dvi = false;
+
+ mutex_init(&hdmi->audio_lock);
+
+ hdmi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hdmi->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(hdmi->clk);
+ }
+
+ hdmi->rst = devm_reset_control_get(&pdev->dev, "hdmi");
+ if (IS_ERR(hdmi->rst)) {
+ dev_err(&pdev->dev, "failed to get reset\n");
+ return PTR_ERR(hdmi->rst);
+ }
+
+ hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent");
+ if (IS_ERR(hdmi->clk_parent))
+ return PTR_ERR(hdmi->clk_parent);
+
+ err = clk_set_parent(hdmi->clk, hdmi->clk_parent);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to setup clocks: %d\n", err);
+ return err;
+ }
+
+ hdmi->hdmi = devm_regulator_get(&pdev->dev, "hdmi");
+ err = PTR_ERR_OR_ZERO(hdmi->hdmi);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to get HDMI regulator\n");
+
+ hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
+ err = PTR_ERR_OR_ZERO(hdmi->pll);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to get PLL regulator\n");
+
+ hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ err = PTR_ERR_OR_ZERO(hdmi->vdd);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to get VDD regulator\n");
+
+ hdmi->output.dev = &pdev->dev;
+
+ err = tegra_output_probe(&hdmi->output);
+ if (err < 0)
+ return err;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdmi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ return err;
+
+ hdmi->irq = err;
+
+ err = devm_request_irq(hdmi->dev, hdmi->irq, tegra_hdmi_irq, 0,
+ dev_name(hdmi->dev), hdmi);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
+ hdmi->irq, err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, hdmi);
+
+ err = devm_pm_runtime_enable(&pdev->dev);
+ if (err)
+ return err;
+
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
+
+ INIT_LIST_HEAD(&hdmi->client.list);
+ hdmi->client.ops = &hdmi_client_ops;
+ hdmi->client.dev = &pdev->dev;
+
+ err = host1x_client_register(&hdmi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_hdmi_remove(struct platform_device *pdev)
+{
+ struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&hdmi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ tegra_output_remove(&hdmi->output);
+
+ return 0;
+}
+
+struct platform_driver tegra_hdmi_driver = {
+ .driver = {
+ .name = "tegra-hdmi",
+ .of_match_table = tegra_hdmi_of_match,
+ },
+ .probe = tegra_hdmi_probe,
+ .remove = tegra_hdmi_remove,
+};
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
new file mode 100644
index 000000000..8deb04223
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -0,0 +1,557 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef TEGRA_HDMI_H
+#define TEGRA_HDMI_H 1
+
+/* register definitions */
+#define HDMI_CTXSW 0x00
+
+#define HDMI_NV_PDISP_SOR_STATE0 0x01
+#define SOR_STATE_UPDATE (1 << 0)
+
+#define HDMI_NV_PDISP_SOR_STATE1 0x02
+#define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0)
+#define SOR_STATE_ASY_ORMODE_NORMAL (1 << 2)
+#define SOR_STATE_ATTACHED (1 << 3)
+
+#define HDMI_NV_PDISP_SOR_STATE2 0x03
+#define SOR_STATE_ASY_OWNER_NONE (0 << 0)
+#define SOR_STATE_ASY_OWNER_HEAD0 (1 << 0)
+#define SOR_STATE_ASY_SUBOWNER_NONE (0 << 4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD0 (1 << 4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD1 (2 << 4)
+#define SOR_STATE_ASY_SUBOWNER_BOTH (3 << 4)
+#define SOR_STATE_ASY_CRCMODE_ACTIVE (0 << 6)
+#define SOR_STATE_ASY_CRCMODE_COMPLETE (1 << 6)
+#define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 << 6)
+#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8)
+#define SOR_STATE_ASY_PROTOCOL_CUSTOM (15 << 8)
+#define SOR_STATE_ASY_HSYNCPOL_POS (0 << 12)
+#define SOR_STATE_ASY_HSYNCPOL_NEG (1 << 12)
+#define SOR_STATE_ASY_VSYNCPOL_POS (0 << 13)
+#define SOR_STATE_ASY_VSYNCPOL_NEG (1 << 13)
+#define SOR_STATE_ASY_DEPOL_POS (0 << 14)
+#define SOR_STATE_ASY_DEPOL_NEG (1 << 14)
+
+#define HDMI_NV_PDISP_RG_HDCP_AN_MSB 0x04
+#define HDMI_NV_PDISP_RG_HDCP_AN_LSB 0x05
+#define HDMI_NV_PDISP_RG_HDCP_CN_MSB 0x06
+#define HDMI_NV_PDISP_RG_HDCP_CN_LSB 0x07
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB 0x08
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB 0x09
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB 0x0a
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB 0x0b
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB 0x0c
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB 0x0d
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB 0x0e
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB 0x0f
+#define HDMI_NV_PDISP_RG_HDCP_CTRL 0x10
+#define HDMI_NV_PDISP_RG_HDCP_CMODE 0x11
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB 0x12
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB 0x13
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB 0x14
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2 0x15
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1 0x16
+#define HDMI_NV_PDISP_RG_HDCP_RI 0x17
+#define HDMI_NV_PDISP_RG_HDCP_CS_MSB 0x18
+#define HDMI_NV_PDISP_RG_HDCP_CS_LSB 0x19
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0 0x1a
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0 0x1b
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1 0x1c
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2 0x1d
+
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL 0x1e
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS 0x1f
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER 0x20
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW 0x21
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH 0x22
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL 0x23
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS 0x24
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER 0x25
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW 0x26
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH 0x27
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW 0x28
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH 0x29
+
+#define INFOFRAME_CTRL_ENABLE (1 << 0)
+
+#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
+#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
+#define INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL 0x2a
+#define GENERIC_CTRL_ENABLE (1 << 0)
+#define GENERIC_CTRL_OTHER (1 << 4)
+#define GENERIC_CTRL_SINGLE (1 << 8)
+#define GENERIC_CTRL_HBLANK (1 << 12)
+#define GENERIC_CTRL_AUDIO (1 << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS 0x2b
+#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER 0x2c
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW 0x2d
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH 0x2e
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW 0x2f
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH 0x30
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW 0x31
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH 0x32
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW 0x33
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH 0x34
+
+#define HDMI_NV_PDISP_HDMI_ACR_CTRL 0x35
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW 0x36
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH 0x37
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW 0x38
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH 0x39
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW 0x3a
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH 0x3b
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW 0x3c
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH 0x3d
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW 0x3e
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH 0x3f
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW 0x40
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH 0x41
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW 0x42
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH 0x43
+
+#define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8)
+#define ACR_SUBPACK_N(x) (((x) & 0xffffff) << 0)
+#define ACR_ENABLE (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_CTRL 0x44
+#define HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+#define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define HDMI_CTRL_ENABLE (1 << 30)
+
+#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT 0x45
+#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW 0x46
+#define VSYNC_WINDOW_END(x) (((x) & 0x3ff) << 0)
+#define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16)
+#define VSYNC_WINDOW_ENABLE (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_GCP_CTRL 0x47
+#define HDMI_NV_PDISP_HDMI_GCP_STATUS 0x48
+#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK 0x49
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1 0x4a
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2 0x4b
+#define HDMI_NV_PDISP_HDMI_EMU0 0x4c
+#define HDMI_NV_PDISP_HDMI_EMU1 0x4d
+#define HDMI_NV_PDISP_HDMI_EMU1_RDATA 0x4e
+
+#define HDMI_NV_PDISP_HDMI_SPARE 0x4f
+#define SPARE_HW_CTS (1 << 0)
+#define SPARE_FORCE_SW_CTS (1 << 1)
+#define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16)
+
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1 0x50
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2 0x51
+#define HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL 0x53
+#define HDMI_NV_PDISP_SOR_CAP 0x54
+#define HDMI_NV_PDISP_SOR_PWR 0x55
+#define SOR_PWR_NORMAL_STATE_PD (0 << 0)
+#define SOR_PWR_NORMAL_STATE_PU (1 << 0)
+#define SOR_PWR_NORMAL_START_NORMAL (0 << 1)
+#define SOR_PWR_NORMAL_START_ALT (1 << 1)
+#define SOR_PWR_SAFE_STATE_PD (0 << 16)
+#define SOR_PWR_SAFE_STATE_PU (1 << 16)
+#define SOR_PWR_SETTING_NEW_DONE (0 << 31)
+#define SOR_PWR_SETTING_NEW_PENDING (1 << 31)
+#define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31)
+
+#define HDMI_NV_PDISP_SOR_TEST 0x56
+#define HDMI_NV_PDISP_SOR_PLL0 0x57
+#define SOR_PLL_PWR (1 << 0)
+#define SOR_PLL_PDBG (1 << 1)
+#define SOR_PLL_VCAPD (1 << 2)
+#define SOR_PLL_PDPORT (1 << 3)
+#define SOR_PLL_RESISTORSEL (1 << 4)
+#define SOR_PLL_PULLDOWN (1 << 5)
+#define SOR_PLL_VCOCAP(x) (((x) & 0xf) << 8)
+#define SOR_PLL_BG_V17_S(x) (((x) & 0xf) << 12)
+#define SOR_PLL_FILTER(x) (((x) & 0xf) << 16)
+#define SOR_PLL_ICHPMP(x) (((x) & 0xf) << 24)
+#define SOR_PLL_TX_REG_LOAD(x) (((x) & 0xf) << 28)
+
+#define HDMI_NV_PDISP_SOR_PLL1 0x58
+#define SOR_PLL_TMDS_TERM_ENABLE (1 << 8)
+#define SOR_PLL_TMDS_TERMADJ(x) (((x) & 0xf) << 9)
+#define SOR_PLL_LOADADJ(x) (((x) & 0xf) << 20)
+#define SOR_PLL_PE_EN (1 << 28)
+#define SOR_PLL_HALF_FULL_PE (1 << 29)
+#define SOR_PLL_S_D_PIN_PE (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_PLL2 0x59
+
+#define HDMI_NV_PDISP_SOR_CSTM 0x5a
+#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
+#define SOR_CSTM_PLLDIV (1 << 21)
+#define SOR_CSTM_LVDS_ENABLE (1 << 16)
+#define SOR_CSTM_MODE_LVDS (0 << 12)
+#define SOR_CSTM_MODE_TMDS (1 << 12)
+#define SOR_CSTM_MODE_MASK (3 << 12)
+
+#define HDMI_NV_PDISP_SOR_LVDS 0x5b
+#define HDMI_NV_PDISP_SOR_CRCA 0x5c
+#define HDMI_NV_PDISP_SOR_CRCB 0x5d
+#define HDMI_NV_PDISP_SOR_BLANK 0x5e
+#define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f
+#define SOR_SEQ_PU_PC(x) (((x) & 0xf) << 0)
+#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4)
+#define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8)
+#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12)
+#define SOR_SEQ_PC(x) (((x) & 0xf) << 16)
+#define SOR_SEQ_STATUS (1 << 28)
+#define SOR_SEQ_SWITCH (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_SEQ_INST(x) (0x60 + (x))
+
+#define SOR_SEQ_INST_WAIT_TIME(x) (((x) & 0x3ff) << 0)
+#define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12)
+#define SOR_SEQ_INST_HALT (1 << 15)
+#define SOR_SEQ_INST_PIN_A_LOW (0 << 21)
+#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
+#define SOR_SEQ_INST_PIN_B_LOW (0 << 22)
+#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
+#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+
+#define HDMI_NV_PDISP_SOR_VCRCA0 0x72
+#define HDMI_NV_PDISP_SOR_VCRCA1 0x73
+#define HDMI_NV_PDISP_SOR_CCRCA0 0x74
+#define HDMI_NV_PDISP_SOR_CCRCA1 0x75
+#define HDMI_NV_PDISP_SOR_EDATAA0 0x76
+#define HDMI_NV_PDISP_SOR_EDATAA1 0x77
+#define HDMI_NV_PDISP_SOR_COUNTA0 0x78
+#define HDMI_NV_PDISP_SOR_COUNTA1 0x79
+#define HDMI_NV_PDISP_SOR_DEBUGA0 0x7a
+#define HDMI_NV_PDISP_SOR_DEBUGA1 0x7b
+#define HDMI_NV_PDISP_SOR_TRIG 0x7c
+#define HDMI_NV_PDISP_SOR_MSCHECK 0x7d
+
+#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT 0x7e
+#define DRIVE_CURRENT_LANE0(x) (((x) & 0x3f) << 0)
+#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8)
+#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16)
+#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24)
+#define DRIVE_CURRENT_LANE0_T114(x) (((x) & 0x7f) << 0)
+#define DRIVE_CURRENT_LANE1_T114(x) (((x) & 0x7f) << 8)
+#define DRIVE_CURRENT_LANE2_T114(x) (((x) & 0x7f) << 16)
+#define DRIVE_CURRENT_LANE3_T114(x) (((x) & 0x7f) << 24)
+
+#define DRIVE_CURRENT_1_500_mA 0x00
+#define DRIVE_CURRENT_1_875_mA 0x01
+#define DRIVE_CURRENT_2_250_mA 0x02
+#define DRIVE_CURRENT_2_625_mA 0x03
+#define DRIVE_CURRENT_3_000_mA 0x04
+#define DRIVE_CURRENT_3_375_mA 0x05
+#define DRIVE_CURRENT_3_750_mA 0x06
+#define DRIVE_CURRENT_4_125_mA 0x07
+#define DRIVE_CURRENT_4_500_mA 0x08
+#define DRIVE_CURRENT_4_875_mA 0x09
+#define DRIVE_CURRENT_5_250_mA 0x0a
+#define DRIVE_CURRENT_5_625_mA 0x0b
+#define DRIVE_CURRENT_6_000_mA 0x0c
+#define DRIVE_CURRENT_6_375_mA 0x0d
+#define DRIVE_CURRENT_6_750_mA 0x0e
+#define DRIVE_CURRENT_7_125_mA 0x0f
+#define DRIVE_CURRENT_7_500_mA 0x10
+#define DRIVE_CURRENT_7_875_mA 0x11
+#define DRIVE_CURRENT_8_250_mA 0x12
+#define DRIVE_CURRENT_8_625_mA 0x13
+#define DRIVE_CURRENT_9_000_mA 0x14
+#define DRIVE_CURRENT_9_375_mA 0x15
+#define DRIVE_CURRENT_9_750_mA 0x16
+#define DRIVE_CURRENT_10_125_mA 0x17
+#define DRIVE_CURRENT_10_500_mA 0x18
+#define DRIVE_CURRENT_10_875_mA 0x19
+#define DRIVE_CURRENT_11_250_mA 0x1a
+#define DRIVE_CURRENT_11_625_mA 0x1b
+#define DRIVE_CURRENT_12_000_mA 0x1c
+#define DRIVE_CURRENT_12_375_mA 0x1d
+#define DRIVE_CURRENT_12_750_mA 0x1e
+#define DRIVE_CURRENT_13_125_mA 0x1f
+#define DRIVE_CURRENT_13_500_mA 0x20
+#define DRIVE_CURRENT_13_875_mA 0x21
+#define DRIVE_CURRENT_14_250_mA 0x22
+#define DRIVE_CURRENT_14_625_mA 0x23
+#define DRIVE_CURRENT_15_000_mA 0x24
+#define DRIVE_CURRENT_15_375_mA 0x25
+#define DRIVE_CURRENT_15_750_mA 0x26
+#define DRIVE_CURRENT_16_125_mA 0x27
+#define DRIVE_CURRENT_16_500_mA 0x28
+#define DRIVE_CURRENT_16_875_mA 0x29
+#define DRIVE_CURRENT_17_250_mA 0x2a
+#define DRIVE_CURRENT_17_625_mA 0x2b
+#define DRIVE_CURRENT_18_000_mA 0x2c
+#define DRIVE_CURRENT_18_375_mA 0x2d
+#define DRIVE_CURRENT_18_750_mA 0x2e
+#define DRIVE_CURRENT_19_125_mA 0x2f
+#define DRIVE_CURRENT_19_500_mA 0x30
+#define DRIVE_CURRENT_19_875_mA 0x31
+#define DRIVE_CURRENT_20_250_mA 0x32
+#define DRIVE_CURRENT_20_625_mA 0x33
+#define DRIVE_CURRENT_21_000_mA 0x34
+#define DRIVE_CURRENT_21_375_mA 0x35
+#define DRIVE_CURRENT_21_750_mA 0x36
+#define DRIVE_CURRENT_22_125_mA 0x37
+#define DRIVE_CURRENT_22_500_mA 0x38
+#define DRIVE_CURRENT_22_875_mA 0x39
+#define DRIVE_CURRENT_23_250_mA 0x3a
+#define DRIVE_CURRENT_23_625_mA 0x3b
+#define DRIVE_CURRENT_24_000_mA 0x3c
+#define DRIVE_CURRENT_24_375_mA 0x3d
+#define DRIVE_CURRENT_24_750_mA 0x3e
+
+#define DRIVE_CURRENT_0_000_mA_T114 0x00
+#define DRIVE_CURRENT_0_400_mA_T114 0x01
+#define DRIVE_CURRENT_0_800_mA_T114 0x02
+#define DRIVE_CURRENT_1_200_mA_T114 0x03
+#define DRIVE_CURRENT_1_600_mA_T114 0x04
+#define DRIVE_CURRENT_2_000_mA_T114 0x05
+#define DRIVE_CURRENT_2_400_mA_T114 0x06
+#define DRIVE_CURRENT_2_800_mA_T114 0x07
+#define DRIVE_CURRENT_3_200_mA_T114 0x08
+#define DRIVE_CURRENT_3_600_mA_T114 0x09
+#define DRIVE_CURRENT_4_000_mA_T114 0x0a
+#define DRIVE_CURRENT_4_400_mA_T114 0x0b
+#define DRIVE_CURRENT_4_800_mA_T114 0x0c
+#define DRIVE_CURRENT_5_200_mA_T114 0x0d
+#define DRIVE_CURRENT_5_600_mA_T114 0x0e
+#define DRIVE_CURRENT_6_000_mA_T114 0x0f
+#define DRIVE_CURRENT_6_400_mA_T114 0x10
+#define DRIVE_CURRENT_6_800_mA_T114 0x11
+#define DRIVE_CURRENT_7_200_mA_T114 0x12
+#define DRIVE_CURRENT_7_600_mA_T114 0x13
+#define DRIVE_CURRENT_8_000_mA_T114 0x14
+#define DRIVE_CURRENT_8_400_mA_T114 0x15
+#define DRIVE_CURRENT_8_800_mA_T114 0x16
+#define DRIVE_CURRENT_9_200_mA_T114 0x17
+#define DRIVE_CURRENT_9_600_mA_T114 0x18
+#define DRIVE_CURRENT_10_000_mA_T114 0x19
+#define DRIVE_CURRENT_10_400_mA_T114 0x1a
+#define DRIVE_CURRENT_10_800_mA_T114 0x1b
+#define DRIVE_CURRENT_11_200_mA_T114 0x1c
+#define DRIVE_CURRENT_11_600_mA_T114 0x1d
+#define DRIVE_CURRENT_12_000_mA_T114 0x1e
+#define DRIVE_CURRENT_12_400_mA_T114 0x1f
+#define DRIVE_CURRENT_12_800_mA_T114 0x20
+#define DRIVE_CURRENT_13_200_mA_T114 0x21
+#define DRIVE_CURRENT_13_600_mA_T114 0x22
+#define DRIVE_CURRENT_14_000_mA_T114 0x23
+#define DRIVE_CURRENT_14_400_mA_T114 0x24
+#define DRIVE_CURRENT_14_800_mA_T114 0x25
+#define DRIVE_CURRENT_15_200_mA_T114 0x26
+#define DRIVE_CURRENT_15_600_mA_T114 0x27
+#define DRIVE_CURRENT_16_000_mA_T114 0x28
+#define DRIVE_CURRENT_16_400_mA_T114 0x29
+#define DRIVE_CURRENT_16_800_mA_T114 0x2a
+#define DRIVE_CURRENT_17_200_mA_T114 0x2b
+#define DRIVE_CURRENT_17_600_mA_T114 0x2c
+#define DRIVE_CURRENT_18_000_mA_T114 0x2d
+#define DRIVE_CURRENT_18_400_mA_T114 0x2e
+#define DRIVE_CURRENT_18_800_mA_T114 0x2f
+#define DRIVE_CURRENT_19_200_mA_T114 0x30
+#define DRIVE_CURRENT_19_600_mA_T114 0x31
+#define DRIVE_CURRENT_20_000_mA_T114 0x32
+#define DRIVE_CURRENT_20_400_mA_T114 0x33
+#define DRIVE_CURRENT_20_800_mA_T114 0x34
+#define DRIVE_CURRENT_21_200_mA_T114 0x35
+#define DRIVE_CURRENT_21_600_mA_T114 0x36
+#define DRIVE_CURRENT_22_000_mA_T114 0x37
+#define DRIVE_CURRENT_22_400_mA_T114 0x38
+#define DRIVE_CURRENT_22_800_mA_T114 0x39
+#define DRIVE_CURRENT_23_200_mA_T114 0x3a
+#define DRIVE_CURRENT_23_600_mA_T114 0x3b
+#define DRIVE_CURRENT_24_000_mA_T114 0x3c
+#define DRIVE_CURRENT_24_400_mA_T114 0x3d
+#define DRIVE_CURRENT_24_800_mA_T114 0x3e
+#define DRIVE_CURRENT_25_200_mA_T114 0x3f
+#define DRIVE_CURRENT_25_400_mA_T114 0x40
+#define DRIVE_CURRENT_25_800_mA_T114 0x41
+#define DRIVE_CURRENT_26_200_mA_T114 0x42
+#define DRIVE_CURRENT_26_600_mA_T114 0x43
+#define DRIVE_CURRENT_27_000_mA_T114 0x44
+#define DRIVE_CURRENT_27_400_mA_T114 0x45
+#define DRIVE_CURRENT_27_800_mA_T114 0x46
+#define DRIVE_CURRENT_28_200_mA_T114 0x47
+
+#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f
+#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80
+#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81
+
+#define HDMI_NV_PDISP_AUDIO_FS(x) (0x82 + (x))
+#define AUDIO_FS_LOW(x) (((x) & 0xfff) << 0)
+#define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16)
+
+#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH 0x89
+#define HDMI_NV_PDISP_AUDIO_THRESHOLD 0x8a
+#define HDMI_NV_PDISP_AUDIO_CNTRL0 0x8b
+#define AUDIO_CNTRL0_ERROR_TOLERANCE(x) (((x) & 0xff) << 0)
+#define AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20)
+#define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24)
+
+#define HDMI_NV_PDISP_AUDIO_N 0x8c
+#define AUDIO_N_VALUE(x) (((x) & 0xfffff) << 0)
+#define AUDIO_N_RESETF (1 << 20)
+#define AUDIO_N_GENERATE_NORMAL (0 << 24)
+#define AUDIO_N_GENERATE_ALTERNATE (1 << 24)
+
+#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING 0x94
+#define HDMI_NV_PDISP_SOR_REFCLK 0x95
+#define SOR_REFCLK_DIV_INT(x) (((x) & 0xff) << 8)
+#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x03) << 6)
+
+#define HDMI_NV_PDISP_CRC_CONTROL 0x96
+#define HDMI_NV_PDISP_INPUT_CONTROL 0x97
+#define HDMI_SRC_DISPLAYA (0 << 0)
+#define HDMI_SRC_DISPLAYB (1 << 0)
+#define ARM_VIDEO_RANGE_FULL (0 << 1)
+#define ARM_VIDEO_RANGE_LIMITED (1 << 1)
+
+#define HDMI_NV_PDISP_SCRATCH 0x98
+#define HDMI_NV_PDISP_PE_CURRENT 0x99
+#define PE_CURRENT0(x) (((x) & 0xf) << 0)
+#define PE_CURRENT1(x) (((x) & 0xf) << 8)
+#define PE_CURRENT2(x) (((x) & 0xf) << 16)
+#define PE_CURRENT3(x) (((x) & 0xf) << 24)
+
+#define PE_CURRENT_0_0_mA 0x0
+#define PE_CURRENT_0_5_mA 0x1
+#define PE_CURRENT_1_0_mA 0x2
+#define PE_CURRENT_1_5_mA 0x3
+#define PE_CURRENT_2_0_mA 0x4
+#define PE_CURRENT_2_5_mA 0x5
+#define PE_CURRENT_3_0_mA 0x6
+#define PE_CURRENT_3_5_mA 0x7
+#define PE_CURRENT_4_0_mA 0x8
+#define PE_CURRENT_4_5_mA 0x9
+#define PE_CURRENT_5_0_mA 0xa
+#define PE_CURRENT_5_5_mA 0xb
+#define PE_CURRENT_6_0_mA 0xc
+#define PE_CURRENT_6_5_mA 0xd
+#define PE_CURRENT_7_0_mA 0xe
+#define PE_CURRENT_7_5_mA 0xf
+
+#define PE_CURRENT_0_mA_T114 0x0
+#define PE_CURRENT_1_mA_T114 0x1
+#define PE_CURRENT_2_mA_T114 0x2
+#define PE_CURRENT_3_mA_T114 0x3
+#define PE_CURRENT_4_mA_T114 0x4
+#define PE_CURRENT_5_mA_T114 0x5
+#define PE_CURRENT_6_mA_T114 0x6
+#define PE_CURRENT_7_mA_T114 0x7
+#define PE_CURRENT_8_mA_T114 0x8
+#define PE_CURRENT_9_mA_T114 0x9
+#define PE_CURRENT_10_mA_T114 0xa
+#define PE_CURRENT_11_mA_T114 0xb
+#define PE_CURRENT_12_mA_T114 0xc
+#define PE_CURRENT_13_mA_T114 0xd
+#define PE_CURRENT_14_mA_T114 0xe
+#define PE_CURRENT_15_mA_T114 0xf
+
+#define HDMI_NV_PDISP_KEY_CTRL 0x9a
+#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b
+#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c
+#define HDMI_NV_PDISP_KEY_DEBUG2 0x9d
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_0 0x9e
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_1 0x9f
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_2 0xa0
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_3 0xa1
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG 0xa2
+#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3
+
+#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0 0xac
+#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20)
+#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
+#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20)
+#define SOR_AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
+#define HDMI_NV_PDISP_SOR_AUDIO_SPARE0 0xae
+#define SOR_AUDIO_SPARE0_HBR_ENABLE (1 << 27)
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0 0xba
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID (1 << 30)
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK 0xffff
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1 0xbb
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR 0xbc
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE 0xbd
+#define SOR_AUDIO_HDA_PRESENSE_VALID (1 << 1)
+#define SOR_AUDIO_HDA_PRESENSE_PRESENT (1 << 0)
+
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 0xbf
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 0xc0
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 0xc1
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 0xc2
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 0xc3
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 0xc4
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
+
+#define HDMI_NV_PDISP_INT_STATUS 0xcc
+#define INT_SCRATCH (1 << 3)
+#define INT_CP_REQUEST (1 << 2)
+#define INT_CODEC_SCRATCH1 (1 << 1)
+#define INT_CODEC_SCRATCH0 (1 << 0)
+#define HDMI_NV_PDISP_INT_MASK 0xcd
+#define HDMI_NV_PDISP_INT_ENABLE 0xce
+
+#define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT 0xd1
+#define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) << 0)
+#define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) << 8)
+#define PEAK_CURRENT_LANE2(x) (((x) & 0x7f) << 16)
+#define PEAK_CURRENT_LANE3(x) (((x) & 0x7f) << 24)
+
+#define PEAK_CURRENT_0_000_mA 0x00
+#define PEAK_CURRENT_0_200_mA 0x01
+#define PEAK_CURRENT_0_400_mA 0x02
+#define PEAK_CURRENT_0_600_mA 0x03
+#define PEAK_CURRENT_0_800_mA 0x04
+#define PEAK_CURRENT_1_000_mA 0x05
+#define PEAK_CURRENT_1_200_mA 0x06
+#define PEAK_CURRENT_1_400_mA 0x07
+#define PEAK_CURRENT_1_600_mA 0x08
+#define PEAK_CURRENT_1_800_mA 0x09
+#define PEAK_CURRENT_2_000_mA 0x0a
+#define PEAK_CURRENT_2_200_mA 0x0b
+#define PEAK_CURRENT_2_400_mA 0x0c
+#define PEAK_CURRENT_2_600_mA 0x0d
+#define PEAK_CURRENT_2_800_mA 0x0e
+#define PEAK_CURRENT_3_000_mA 0x0f
+#define PEAK_CURRENT_3_200_mA 0x10
+#define PEAK_CURRENT_3_400_mA 0x11
+#define PEAK_CURRENT_3_600_mA 0x12
+#define PEAK_CURRENT_3_800_mA 0x13
+#define PEAK_CURRENT_4_000_mA 0x14
+#define PEAK_CURRENT_4_200_mA 0x15
+#define PEAK_CURRENT_4_400_mA 0x16
+#define PEAK_CURRENT_4_600_mA 0x17
+#define PEAK_CURRENT_4_800_mA 0x18
+#define PEAK_CURRENT_5_000_mA 0x19
+#define PEAK_CURRENT_5_200_mA 0x1a
+#define PEAK_CURRENT_5_400_mA 0x1b
+#define PEAK_CURRENT_5_600_mA 0x1c
+#define PEAK_CURRENT_5_800_mA 0x1d
+#define PEAK_CURRENT_6_000_mA 0x1e
+#define PEAK_CURRENT_6_200_mA 0x1f
+#define PEAK_CURRENT_6_400_mA 0x20
+#define PEAK_CURRENT_6_600_mA 0x21
+#define PEAK_CURRENT_6_800_mA 0x22
+#define PEAK_CURRENT_7_000_mA 0x23
+#define PEAK_CURRENT_7_200_mA 0x24
+#define PEAK_CURRENT_7_400_mA 0x25
+#define PEAK_CURRENT_7_600_mA 0x26
+#define PEAK_CURRENT_7_800_mA 0x27
+#define PEAK_CURRENT_8_000_mA 0x28
+#define PEAK_CURRENT_8_200_mA 0x29
+#define PEAK_CURRENT_8_400_mA 0x2a
+#define PEAK_CURRENT_8_600_mA 0x2b
+#define PEAK_CURRENT_8_800_mA 0x2c
+#define PEAK_CURRENT_9_000_mA 0x2d
+#define PEAK_CURRENT_9_200_mA 0x2e
+#define PEAK_CURRENT_9_400_mA 0x2f
+
+#define HDMI_NV_PDISP_SOR_PAD_CTLS0 0xd2
+
+#endif /* TEGRA_HDMI_H */
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
new file mode 100644
index 000000000..b872527a1
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -0,0 +1,1230 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_probe_helper.h>
+
+#include "drm.h"
+#include "dc.h"
+#include "plane.h"
+
+#define NFB 24
+
+static const u32 tegra_shared_plane_formats[] = {
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ /* new on Tegra114 */
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ /* planar formats */
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+};
+
+static const u64 tegra_shared_plane_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
+ /*
+ * The GPU sector layout is only supported on Tegra194, but these will
+ * be filtered out later on by ->format_mod_supported() on SoCs where
+ * it isn't supported.
+ */
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+ /* sentinel */
+ DRM_FORMAT_MOD_INVALID
+};
+
+static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
+ unsigned int offset)
+{
+ if (offset >= 0x500 && offset <= 0x581) {
+ offset = 0x000 + (offset - 0x500);
+ return plane->offset + offset;
+ }
+
+ if (offset >= 0x700 && offset <= 0x73c) {
+ offset = 0x180 + (offset - 0x700);
+ return plane->offset + offset;
+ }
+
+ if (offset >= 0x800 && offset <= 0x83e) {
+ offset = 0x1c0 + (offset - 0x800);
+ return plane->offset + offset;
+ }
+
+ dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
+
+ return plane->offset + offset;
+}
+
+static inline u32 tegra_plane_readl(struct tegra_plane *plane,
+ unsigned int offset)
+{
+ return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
+}
+
+static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
+ unsigned int offset)
+{
+ tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
+}
+
+static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
+{
+ int err = 0;
+
+ mutex_lock(&wgrp->lock);
+
+ if (wgrp->usecount == 0) {
+ err = host1x_client_resume(wgrp->parent);
+ if (err < 0) {
+ dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
+ goto unlock;
+ }
+
+ reset_control_deassert(wgrp->rst);
+ }
+
+ wgrp->usecount++;
+
+unlock:
+ mutex_unlock(&wgrp->lock);
+ return err;
+}
+
+static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
+{
+ int err;
+
+ mutex_lock(&wgrp->lock);
+
+ if (wgrp->usecount == 1) {
+ err = reset_control_assert(wgrp->rst);
+ if (err < 0) {
+ pr_err("failed to assert reset for window group %u\n",
+ wgrp->index);
+ }
+
+ host1x_client_suspend(wgrp->parent);
+ }
+
+ wgrp->usecount--;
+ mutex_unlock(&wgrp->lock);
+}
+
+int tegra_display_hub_prepare(struct tegra_display_hub *hub)
+{
+ unsigned int i;
+
+ /*
+ * XXX Enabling/disabling windowgroups needs to happen when the owner
+ * display controller is disabled. There's currently no good point at
+ * which this could be executed, so unconditionally enable all window
+ * groups for now.
+ */
+ for (i = 0; i < hub->soc->num_wgrps; i++) {
+ struct tegra_windowgroup *wgrp = &hub->wgrps[i];
+
+ /* Skip orphaned window group whose parent DC is disabled */
+ if (wgrp->parent)
+ tegra_windowgroup_enable(wgrp);
+ }
+
+ return 0;
+}
+
+void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
+{
+ unsigned int i;
+
+ /*
+ * XXX Remove this once window groups can be more fine-grainedly
+ * enabled and disabled.
+ */
+ for (i = 0; i < hub->soc->num_wgrps; i++) {
+ struct tegra_windowgroup *wgrp = &hub->wgrps[i];
+
+ /* Skip orphaned window group whose parent DC is disabled */
+ if (wgrp->parent)
+ tegra_windowgroup_disable(wgrp);
+ }
+}
+
+static void tegra_shared_plane_update(struct tegra_plane *plane)
+{
+ struct tegra_dc *dc = plane->dc;
+ unsigned long timeout;
+ u32 mask, value;
+
+ mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
+ tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+ if ((value & mask) == 0)
+ break;
+
+ usleep_range(100, 400);
+ }
+}
+
+static void tegra_shared_plane_activate(struct tegra_plane *plane)
+{
+ struct tegra_dc *dc = plane->dc;
+ unsigned long timeout;
+ u32 mask, value;
+
+ mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
+ tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+ if ((value & mask) == 0)
+ break;
+
+ usleep_range(100, 400);
+ }
+}
+
+static unsigned int
+tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
+{
+ unsigned int offset =
+ tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
+
+ return tegra_dc_readl(dc, offset) & OWNER_MASK;
+}
+
+static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
+ struct tegra_plane *plane)
+{
+ struct device *dev = dc->dev;
+
+ if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
+ if (plane->dc == dc)
+ return true;
+
+ dev_WARN(dev, "head %u owns window %u but is not attached\n",
+ dc->pipe, plane->index);
+ }
+
+ return false;
+}
+
+static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
+ struct tegra_dc *new)
+{
+ unsigned int offset =
+ tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
+ struct tegra_dc *old = plane->dc, *dc = new ? new : old;
+ struct device *dev = new ? new->dev : old->dev;
+ unsigned int owner, index = plane->index;
+ u32 value;
+
+ value = tegra_dc_readl(dc, offset);
+ owner = value & OWNER_MASK;
+
+ if (new && (owner != OWNER_MASK && owner != new->pipe)) {
+ dev_WARN(dev, "window %u owned by head %u\n", index, owner);
+ return -EBUSY;
+ }
+
+ /*
+ * This seems to happen whenever the head has been disabled with one
+ * or more windows being active. This is harmless because we'll just
+ * reassign the window to the new head anyway.
+ */
+ if (old && owner == OWNER_MASK)
+ dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
+ old->pipe, owner);
+
+ value &= ~OWNER_MASK;
+
+ if (new)
+ value |= OWNER(new->pipe);
+ else
+ value |= OWNER_MASK;
+
+ tegra_dc_writel(dc, value, offset);
+
+ plane->dc = new;
+
+ return 0;
+}
+
+static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
+{
+ static const unsigned int coeffs[192] = {
+ 0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
+ 0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
+ 0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
+ 0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
+ 0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
+ 0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
+ 0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
+ 0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
+ 0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
+ 0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
+ 0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
+ 0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
+ 0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
+ 0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
+ 0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
+ 0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
+ 0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
+ 0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
+ 0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
+ 0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
+ 0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
+ 0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
+ 0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
+ 0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
+ 0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
+ 0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
+ 0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
+ 0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
+ 0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
+ 0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
+ 0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
+ 0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
+ 0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
+ 0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
+ 0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
+ 0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
+ 0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
+ 0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
+ 0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
+ 0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
+ 0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
+ 0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
+ 0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
+ 0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
+ 0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
+ 0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
+ 0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
+ 0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
+ };
+ unsigned int ratio, row, column;
+
+ for (ratio = 0; ratio <= 2; ratio++) {
+ for (row = 0; row <= 15; row++) {
+ for (column = 0; column <= 3; column++) {
+ unsigned int index = (ratio << 6) + (row << 2) + column;
+ u32 value;
+
+ value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
+ tegra_plane_writel(plane, value,
+ DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
+ }
+ }
+ }
+}
+
+static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
+ struct tegra_plane *plane)
+{
+ u32 value;
+ int err;
+
+ if (!tegra_dc_owns_shared_plane(dc, plane)) {
+ err = tegra_shared_plane_set_owner(plane, dc);
+ if (err < 0)
+ return;
+ }
+
+ value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
+ value |= MODE_FOUR_LINES;
+ tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
+
+ value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
+ value = SLOTS(1);
+ tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
+
+ /* disable watermark */
+ value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
+ value &= ~LATENCY_CTL_MODE_ENABLE;
+ tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
+
+ value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
+ value |= WATERMARK_MASK;
+ tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
+
+ /* pipe meter */
+ value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
+ value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
+ tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
+
+ /* mempool entries */
+ value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
+ value = MEMPOOL_ENTRIES(0x331);
+ tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
+
+ value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
+ value &= ~THREAD_NUM_MASK;
+ value |= THREAD_NUM(plane->base.index);
+ value |= THREAD_GROUP_ENABLE;
+ tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
+
+ tegra_shared_plane_setup_scaler(plane);
+
+ tegra_shared_plane_update(plane);
+ tegra_shared_plane_activate(plane);
+}
+
+static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
+ struct tegra_plane *plane)
+{
+ tegra_shared_plane_set_owner(plane, NULL);
+}
+
+static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
+ struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
+ struct tegra_bo_tiling *tiling = &plane_state->tiling;
+ struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
+ int err;
+
+ /* no need for further checks if the plane is being disabled */
+ if (!new_plane_state->crtc || !new_plane_state->fb)
+ return 0;
+
+ err = tegra_plane_format(new_plane_state->fb->format->format,
+ &plane_state->format,
+ &plane_state->swap);
+ if (err < 0)
+ return err;
+
+ err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
+ if (err < 0)
+ return err;
+
+ if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
+ !dc->soc->supports_block_linear) {
+ DRM_ERROR("hardware doesn't support block linear mode\n");
+ return -EINVAL;
+ }
+
+ if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
+ !dc->soc->supports_sector_layout) {
+ DRM_ERROR("hardware doesn't support GPU sector layout\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Tegra doesn't support different strides for U and V planes so we
+ * error out if the user tries to display a framebuffer with such a
+ * configuration.
+ */
+ if (new_plane_state->fb->format->num_planes > 2) {
+ if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
+ DRM_ERROR("unsupported UV-plane configuration\n");
+ return -EINVAL;
+ }
+ }
+
+ /* XXX scaling is not yet supported, add a check here */
+
+ err = tegra_plane_state_add(&tegra->base, new_plane_state);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct tegra_plane *p = to_tegra_plane(plane);
+ struct tegra_dc *dc;
+ u32 value;
+ int err;
+
+ /* rien ne va plus */
+ if (!old_state || !old_state->crtc)
+ return;
+
+ dc = to_tegra_dc(old_state->crtc);
+
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
+
+ /*
+ * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
+ * on planes that are already disabled. Make sure we fallback to the
+ * head for this particular state instead of crashing.
+ */
+ if (WARN_ON(p->dc == NULL))
+ p->dc = dc;
+
+ value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
+ value &= ~WIN_ENABLE;
+ tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
+
+ tegra_dc_remove_shared_plane(dc, p);
+
+ host1x_client_suspend(&dc->client);
+}
+
+static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
+{
+ u64 tmp, tmp1, tmp2;
+
+ tmp = (u64)dfixed_trunc(in);
+ tmp2 = (u64)out;
+ tmp1 = (tmp << NFB) + (tmp2 >> 1);
+ do_div(tmp1, tmp2);
+
+ return lower_32_bits(tmp1);
+}
+
+static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
+ struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
+ unsigned int zpos = new_state->normalized_zpos;
+ struct drm_framebuffer *fb = new_state->fb;
+ struct tegra_plane *p = to_tegra_plane(plane);
+ u32 value, min_width, bypass = 0;
+ dma_addr_t base, addr_flag = 0;
+ unsigned int bpc, planes;
+ bool yuv;
+ int err;
+
+ /* rien ne va plus */
+ if (!new_state->crtc || !new_state->fb)
+ return;
+
+ if (!new_state->visible) {
+ tegra_shared_plane_atomic_disable(plane, state);
+ return;
+ }
+
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
+
+ yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planes, &bpc);
+
+ tegra_dc_assign_shared_plane(dc, p);
+
+ tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
+
+ /* blending */
+ value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
+ BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
+ BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
+ tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
+
+ value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
+ BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
+ BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
+ tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
+
+ value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
+ tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
+
+ /* scaling */
+ min_width = min(new_state->src_w >> 16, new_state->crtc_w);
+
+ value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
+
+ if (min_width < MAX_PIXELS_5TAP444(value)) {
+ value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
+ } else {
+ value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
+
+ if (min_width < MAX_PIXELS_2TAP444(value))
+ value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
+ else
+ dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
+ }
+
+ value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
+ tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
+
+ if (new_state->src_w != new_state->crtc_w << 16) {
+ fixed20_12 width = dfixed_init(new_state->src_w >> 16);
+ u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
+ u32 init = (1 << (NFB - 1)) + (incr >> 1);
+
+ tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
+ tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
+ } else {
+ bypass |= INPUT_SCALER_HBYPASS;
+ }
+
+ if (new_state->src_h != new_state->crtc_h << 16) {
+ fixed20_12 height = dfixed_init(new_state->src_h >> 16);
+ u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
+ u32 init = (1 << (NFB - 1)) + (incr >> 1);
+
+ tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
+ tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
+ } else {
+ bypass |= INPUT_SCALER_VBYPASS;
+ }
+
+ tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
+
+ /* disable compression */
+ tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ /*
+ * Physical address bit 39 in Tegra194 is used as a switch for special
+ * logic that swizzles the memory using either the legacy Tegra or the
+ * dGPU sector layout.
+ */
+ if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
+ addr_flag = BIT_ULL(39);
+#endif
+
+ base = tegra_plane_state->iova[0] + fb->offsets[0];
+ base |= addr_flag;
+
+ tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
+ tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
+
+ value = V_POSITION(new_state->crtc_y) |
+ H_POSITION(new_state->crtc_x);
+ tegra_plane_writel(p, value, DC_WIN_POSITION);
+
+ value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
+ tegra_plane_writel(p, value, DC_WIN_SIZE);
+
+ value = WIN_ENABLE | COLOR_EXPAND;
+ tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
+
+ value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
+ tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
+
+ tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
+ tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
+
+ value = PITCH(fb->pitches[0]);
+ tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
+
+ if (yuv && planes > 1) {
+ base = tegra_plane_state->iova[1] + fb->offsets[1];
+ base |= addr_flag;
+
+ tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
+ tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
+
+ if (planes > 2) {
+ base = tegra_plane_state->iova[2] + fb->offsets[2];
+ base |= addr_flag;
+
+ tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
+ tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
+ }
+
+ value = PITCH_U(fb->pitches[1]);
+
+ if (planes > 2)
+ value |= PITCH_V(fb->pitches[2]);
+
+ tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
+ } else {
+ tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
+ tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
+ tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
+ tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
+ tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
+ }
+
+ value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
+
+ if (yuv) {
+ if (bpc < 12)
+ value |= DEGAMMA_YUV8_10;
+ else
+ value |= DEGAMMA_YUV12;
+
+ /* XXX parameterize */
+ value |= COLOR_SPACE_YUV_2020;
+ } else {
+ if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
+ value |= DEGAMMA_SRGB;
+ }
+
+ tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
+
+ value = OFFSET_X(new_state->src_y >> 16) |
+ OFFSET_Y(new_state->src_x >> 16);
+ tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
+
+ if (dc->soc->supports_block_linear) {
+ unsigned long height = tegra_plane_state->tiling.value;
+
+ /* XXX */
+ switch (tegra_plane_state->tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
+ DC_WINBUF_SURFACE_KIND_PITCH;
+ break;
+
+ /* XXX not supported on Tegra186 and later */
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WINBUF_SURFACE_KIND_TILED;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
+ DC_WINBUF_SURFACE_KIND_BLOCK;
+ break;
+ }
+
+ tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
+ }
+
+ /* disable gamut CSC */
+ value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
+ value &= ~CONTROL_CSC_ENABLE;
+ tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
+
+ host1x_client_suspend(&dc->client);
+}
+
+static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
+ .atomic_check = tegra_shared_plane_atomic_check,
+ .atomic_update = tegra_shared_plane_atomic_update,
+ .atomic_disable = tegra_shared_plane_atomic_disable,
+};
+
+struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
+ struct tegra_dc *dc,
+ unsigned int wgrp,
+ unsigned int index)
+{
+ enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
+ struct tegra_drm *tegra = drm->dev_private;
+ struct tegra_display_hub *hub = tegra->hub;
+ struct tegra_shared_plane *plane;
+ unsigned int possible_crtcs;
+ unsigned int num_formats;
+ const u64 *modifiers;
+ struct drm_plane *p;
+ const u32 *formats;
+ int err;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ plane->base.offset = 0x0a00 + 0x0300 * index;
+ plane->base.index = index;
+
+ plane->wgrp = &hub->wgrps[wgrp];
+ plane->wgrp->parent = &dc->client;
+
+ p = &plane->base.base;
+
+ /* planes can be assigned to arbitrary CRTCs */
+ possible_crtcs = BIT(tegra->num_crtcs) - 1;
+
+ num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
+ formats = tegra_shared_plane_formats;
+ modifiers = tegra_shared_plane_modifiers;
+
+ err = drm_universal_plane_init(drm, p, possible_crtcs,
+ &tegra_plane_funcs, formats,
+ num_formats, modifiers, type, NULL);
+ if (err < 0) {
+ kfree(plane);
+ return ERR_PTR(err);
+ }
+
+ drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
+ drm_plane_create_zpos_property(p, 0, 0, 255);
+
+ return p;
+}
+
+static struct drm_private_state *
+tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
+{
+ struct tegra_display_hub_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct tegra_display_hub_state *hub_state =
+ to_tegra_display_hub_state(state);
+
+ kfree(hub_state);
+}
+
+static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
+ .atomic_duplicate_state = tegra_display_hub_duplicate_state,
+ .atomic_destroy_state = tegra_display_hub_destroy_state,
+};
+
+static struct tegra_display_hub_state *
+tegra_display_hub_get_state(struct tegra_display_hub *hub,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv;
+
+ priv = drm_atomic_get_private_obj_state(state, &hub->base);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ return to_tegra_display_hub_state(priv);
+}
+
+int tegra_display_hub_atomic_check(struct drm_device *drm,
+ struct drm_atomic_state *state)
+{
+ struct tegra_drm *tegra = drm->dev_private;
+ struct tegra_display_hub_state *hub_state;
+ struct drm_crtc_state *old, *new;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ if (!tegra->hub)
+ return 0;
+
+ hub_state = tegra_display_hub_get_state(tegra->hub, state);
+ if (IS_ERR(hub_state))
+ return PTR_ERR(hub_state);
+
+ /*
+ * The display hub display clock needs to be fed by the display clock
+ * with the highest frequency to ensure proper functioning of all the
+ * displays.
+ *
+ * Note that this isn't used before Tegra186, but it doesn't hurt and
+ * conditionalizing it would make the code less clean.
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
+ struct tegra_dc_state *dc = to_dc_state(new);
+
+ if (new->active) {
+ if (!hub_state->clk || dc->pclk > hub_state->rate) {
+ hub_state->dc = to_tegra_dc(dc->base.crtc);
+ hub_state->clk = hub_state->dc->clk;
+ hub_state->rate = dc->pclk;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void tegra_display_hub_update(struct tegra_dc *dc)
+{
+ u32 value;
+ int err;
+
+ err = host1x_client_resume(&dc->client);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to resume: %d\n", err);
+ return;
+ }
+
+ value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
+ value &= ~LATENCY_EVENT;
+ tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
+
+ value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
+ value = CURS_SLOTS(1) | WGRP_SLOTS(1);
+ tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
+
+ tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
+ tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+ tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
+ tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
+
+ host1x_client_suspend(&dc->client);
+}
+
+void tegra_display_hub_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *state)
+{
+ struct tegra_drm *tegra = drm->dev_private;
+ struct tegra_display_hub *hub = tegra->hub;
+ struct tegra_display_hub_state *hub_state;
+ struct device *dev = hub->client.dev;
+ int err;
+
+ hub_state = to_tegra_display_hub_state(hub->base.state);
+
+ if (hub_state->clk) {
+ err = clk_set_rate(hub_state->clk, hub_state->rate);
+ if (err < 0)
+ dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
+ hub_state->clk, hub_state->rate);
+
+ err = clk_set_parent(hub->clk_disp, hub_state->clk);
+ if (err < 0)
+ dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
+ hub->clk_disp, hub_state->clk, err);
+ }
+
+ if (hub_state->dc)
+ tegra_display_hub_update(hub_state->dc);
+}
+
+static int tegra_display_hub_init(struct host1x_client *client)
+{
+ struct tegra_display_hub *hub = to_tegra_display_hub(client);
+ struct drm_device *drm = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = drm->dev_private;
+ struct tegra_display_hub_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(drm, &hub->base, &state->base,
+ &tegra_display_hub_state_funcs);
+
+ tegra->hub = hub;
+
+ return 0;
+}
+
+static int tegra_display_hub_exit(struct host1x_client *client)
+{
+ struct drm_device *drm = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = drm->dev_private;
+
+ drm_atomic_private_obj_fini(&tegra->hub->base);
+ tegra->hub = NULL;
+
+ return 0;
+}
+
+static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_display_hub *hub = to_tegra_display_hub(client);
+ struct device *dev = client->dev;
+ unsigned int i = hub->num_heads;
+ int err;
+
+ err = reset_control_assert(hub->rst);
+ if (err < 0)
+ return err;
+
+ while (i--)
+ clk_disable_unprepare(hub->clk_heads[i]);
+
+ clk_disable_unprepare(hub->clk_hub);
+ clk_disable_unprepare(hub->clk_dsc);
+ clk_disable_unprepare(hub->clk_disp);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_display_hub_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_display_hub *hub = to_tegra_display_hub(client);
+ struct device *dev = client->dev;
+ unsigned int i;
+ int err;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(hub->clk_disp);
+ if (err < 0)
+ goto put_rpm;
+
+ err = clk_prepare_enable(hub->clk_dsc);
+ if (err < 0)
+ goto disable_disp;
+
+ err = clk_prepare_enable(hub->clk_hub);
+ if (err < 0)
+ goto disable_dsc;
+
+ for (i = 0; i < hub->num_heads; i++) {
+ err = clk_prepare_enable(hub->clk_heads[i]);
+ if (err < 0)
+ goto disable_heads;
+ }
+
+ err = reset_control_deassert(hub->rst);
+ if (err < 0)
+ goto disable_heads;
+
+ return 0;
+
+disable_heads:
+ while (i--)
+ clk_disable_unprepare(hub->clk_heads[i]);
+
+ clk_disable_unprepare(hub->clk_hub);
+disable_dsc:
+ clk_disable_unprepare(hub->clk_dsc);
+disable_disp:
+ clk_disable_unprepare(hub->clk_disp);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
+static const struct host1x_client_ops tegra_display_hub_ops = {
+ .init = tegra_display_hub_init,
+ .exit = tegra_display_hub_exit,
+ .suspend = tegra_display_hub_runtime_suspend,
+ .resume = tegra_display_hub_runtime_resume,
+};
+
+static int tegra_display_hub_probe(struct platform_device *pdev)
+{
+ u64 dma_mask = dma_get_mask(pdev->dev.parent);
+ struct device_node *child = NULL;
+ struct tegra_display_hub *hub;
+ struct clk *clk;
+ unsigned int i;
+ int err;
+
+ err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
+ hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
+ if (!hub)
+ return -ENOMEM;
+
+ hub->soc = of_device_get_match_data(&pdev->dev);
+
+ hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
+ if (IS_ERR(hub->clk_disp)) {
+ err = PTR_ERR(hub->clk_disp);
+ return err;
+ }
+
+ if (hub->soc->supports_dsc) {
+ hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
+ if (IS_ERR(hub->clk_dsc)) {
+ err = PTR_ERR(hub->clk_dsc);
+ return err;
+ }
+ }
+
+ hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
+ if (IS_ERR(hub->clk_hub)) {
+ err = PTR_ERR(hub->clk_hub);
+ return err;
+ }
+
+ hub->rst = devm_reset_control_get(&pdev->dev, "misc");
+ if (IS_ERR(hub->rst)) {
+ err = PTR_ERR(hub->rst);
+ return err;
+ }
+
+ hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
+ sizeof(*hub->wgrps), GFP_KERNEL);
+ if (!hub->wgrps)
+ return -ENOMEM;
+
+ for (i = 0; i < hub->soc->num_wgrps; i++) {
+ struct tegra_windowgroup *wgrp = &hub->wgrps[i];
+ char id[8];
+
+ snprintf(id, sizeof(id), "wgrp%u", i);
+ mutex_init(&wgrp->lock);
+ wgrp->usecount = 0;
+ wgrp->index = i;
+
+ wgrp->rst = devm_reset_control_get(&pdev->dev, id);
+ if (IS_ERR(wgrp->rst))
+ return PTR_ERR(wgrp->rst);
+
+ err = reset_control_assert(wgrp->rst);
+ if (err < 0)
+ return err;
+ }
+
+ hub->num_heads = of_get_child_count(pdev->dev.of_node);
+
+ hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
+ GFP_KERNEL);
+ if (!hub->clk_heads)
+ return -ENOMEM;
+
+ for (i = 0; i < hub->num_heads; i++) {
+ child = of_get_next_child(pdev->dev.of_node, child);
+ if (!child) {
+ dev_err(&pdev->dev, "failed to find node for head %u\n",
+ i);
+ return -ENODEV;
+ }
+
+ clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock for head %u\n",
+ i);
+ of_node_put(child);
+ return PTR_ERR(clk);
+ }
+
+ hub->clk_heads[i] = clk;
+ }
+
+ of_node_put(child);
+
+ /* XXX: enable clock across reset? */
+ err = reset_control_assert(hub->rst);
+ if (err < 0)
+ return err;
+
+ platform_set_drvdata(pdev, hub);
+ pm_runtime_enable(&pdev->dev);
+
+ INIT_LIST_HEAD(&hub->client.list);
+ hub->client.ops = &tegra_display_hub_ops;
+ hub->client.dev = &pdev->dev;
+
+ err = host1x_client_register(&hub->client);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+
+ err = devm_of_platform_populate(&pdev->dev);
+ if (err < 0)
+ goto unregister;
+
+ return err;
+
+unregister:
+ host1x_client_unregister(&hub->client);
+ pm_runtime_disable(&pdev->dev);
+ return err;
+}
+
+static int tegra_display_hub_remove(struct platform_device *pdev)
+{
+ struct tegra_display_hub *hub = platform_get_drvdata(pdev);
+ unsigned int i;
+ int err;
+
+ err = host1x_client_unregister(&hub->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ }
+
+ for (i = 0; i < hub->soc->num_wgrps; i++) {
+ struct tegra_windowgroup *wgrp = &hub->wgrps[i];
+
+ mutex_destroy(&wgrp->lock);
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ return err;
+}
+
+static const struct tegra_display_hub_soc tegra186_display_hub = {
+ .num_wgrps = 6,
+ .supports_dsc = true,
+};
+
+static const struct tegra_display_hub_soc tegra194_display_hub = {
+ .num_wgrps = 6,
+ .supports_dsc = false,
+};
+
+static const struct of_device_id tegra_display_hub_of_match[] = {
+ {
+ .compatible = "nvidia,tegra194-display",
+ .data = &tegra194_display_hub
+ }, {
+ .compatible = "nvidia,tegra186-display",
+ .data = &tegra186_display_hub
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
+
+struct platform_driver tegra_display_hub_driver = {
+ .driver = {
+ .name = "tegra-display-hub",
+ .of_match_table = tegra_display_hub_of_match,
+ },
+ .probe = tegra_display_hub_probe,
+ .remove = tegra_display_hub_remove,
+};
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
new file mode 100644
index 000000000..23c4b2115
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef TEGRA_HUB_H
+#define TEGRA_HUB_H 1
+
+#include <drm/drm_plane.h>
+
+#include "plane.h"
+
+struct tegra_dc;
+
+struct tegra_windowgroup {
+ unsigned int usecount;
+ struct mutex lock;
+
+ unsigned int index;
+ struct host1x_client *parent;
+ struct reset_control *rst;
+};
+
+struct tegra_shared_plane {
+ struct tegra_plane base;
+ struct tegra_windowgroup *wgrp;
+};
+
+static inline struct tegra_shared_plane *
+to_tegra_shared_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct tegra_shared_plane, base.base);
+}
+
+struct tegra_display_hub_soc {
+ unsigned int num_wgrps;
+ bool supports_dsc;
+};
+
+struct tegra_display_hub {
+ struct drm_private_obj base;
+ struct host1x_client client;
+ struct clk *clk_disp;
+ struct clk *clk_dsc;
+ struct clk *clk_hub;
+ struct reset_control *rst;
+
+ unsigned int num_heads;
+ struct clk **clk_heads;
+
+ const struct tegra_display_hub_soc *soc;
+ struct tegra_windowgroup *wgrps;
+};
+
+static inline struct tegra_display_hub *
+to_tegra_display_hub(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_display_hub, client);
+}
+
+struct tegra_display_hub_state {
+ struct drm_private_state base;
+
+ struct tegra_dc *dc;
+ unsigned long rate;
+ struct clk *clk;
+};
+
+static inline struct tegra_display_hub_state *
+to_tegra_display_hub_state(struct drm_private_state *priv)
+{
+ return container_of(priv, struct tegra_display_hub_state, base);
+}
+
+struct tegra_plane;
+
+int tegra_display_hub_prepare(struct tegra_display_hub *hub);
+void tegra_display_hub_cleanup(struct tegra_display_hub *hub);
+
+struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
+ struct tegra_dc *dc,
+ unsigned int wgrp,
+ unsigned int index);
+
+int tegra_display_hub_atomic_check(struct drm_device *drm,
+ struct drm_atomic_state *state);
+void tegra_display_hub_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *state);
+
+#define DC_CMD_IHUB_COMMON_MISC_CTL 0x068
+#define LATENCY_EVENT (1 << 3)
+
+#define DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER 0x451
+#define CURS_SLOTS(x) (((x) & 0xff) << 8)
+#define WGRP_SLOTS(x) (((x) & 0xff) << 0)
+
+#endif /* TEGRA_HUB_H */
diff --git a/drivers/gpu/drm/tegra/mipi-phy.c b/drivers/gpu/drm/tegra/mipi-phy.c
new file mode 100644
index 000000000..4fe05f2df
--- /dev/null
+++ b/drivers/gpu/drm/tegra/mipi-phy.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#include "mipi-phy.h"
+
+/*
+ * Default D-PHY timings based on MIPI D-PHY specification. Derived from the
+ * valid ranges specified in Section 6.9, Table 14, Page 40 of the D-PHY
+ * specification (v1.2) with minor adjustments.
+ */
+int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
+ unsigned long period)
+{
+ timing->clkmiss = 0;
+ timing->clkpost = 70 + 52 * period;
+ timing->clkpre = 8;
+ timing->clkprepare = 65;
+ timing->clksettle = 95;
+ timing->clktermen = 0;
+ timing->clktrail = 80;
+ timing->clkzero = 260;
+ timing->dtermen = 0;
+ timing->eot = 0;
+ timing->hsexit = 120;
+ timing->hsprepare = 65 + 5 * period;
+ timing->hszero = 145 + 5 * period;
+ timing->hssettle = 85 + 6 * period;
+ timing->hsskip = 40;
+
+ /*
+ * The MIPI D-PHY specification (Section 6.9, v1.2, Table 14, Page 40)
+ * contains this formula as:
+ *
+ * T_HS-TRAIL = max(n * 8 * period, 60 + n * 4 * period)
+ *
+ * where n = 1 for forward-direction HS mode and n = 4 for reverse-
+ * direction HS mode. There's only one setting and this function does
+ * not parameterize on anything other that period, so this code will
+ * assumes that reverse-direction HS mode is supported and uses n = 4.
+ */
+ timing->hstrail = max(4 * 8 * period, 60 + 4 * 4 * period);
+
+ timing->init = 100000;
+ timing->lpx = 60;
+ timing->taget = 5 * timing->lpx;
+ timing->tago = 4 * timing->lpx;
+ timing->tasure = 2 * timing->lpx;
+ timing->wakeup = 1000000;
+
+ return 0;
+}
+
+/*
+ * Validate D-PHY timing according to MIPI D-PHY specification (v1.2, Section
+ * Section 6.9 "Global Operation Timing Parameters").
+ */
+int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing,
+ unsigned long period)
+{
+ if (timing->clkmiss > 60)
+ return -EINVAL;
+
+ if (timing->clkpost < (60 + 52 * period))
+ return -EINVAL;
+
+ if (timing->clkpre < 8)
+ return -EINVAL;
+
+ if (timing->clkprepare < 38 || timing->clkprepare > 95)
+ return -EINVAL;
+
+ if (timing->clksettle < 95 || timing->clksettle > 300)
+ return -EINVAL;
+
+ if (timing->clktermen > 38)
+ return -EINVAL;
+
+ if (timing->clktrail < 60)
+ return -EINVAL;
+
+ if (timing->clkprepare + timing->clkzero < 300)
+ return -EINVAL;
+
+ if (timing->dtermen > 35 + 4 * period)
+ return -EINVAL;
+
+ if (timing->eot > 105 + 12 * period)
+ return -EINVAL;
+
+ if (timing->hsexit < 100)
+ return -EINVAL;
+
+ if (timing->hsprepare < 40 + 4 * period ||
+ timing->hsprepare > 85 + 6 * period)
+ return -EINVAL;
+
+ if (timing->hsprepare + timing->hszero < 145 + 10 * period)
+ return -EINVAL;
+
+ if ((timing->hssettle < 85 + 6 * period) ||
+ (timing->hssettle > 145 + 10 * period))
+ return -EINVAL;
+
+ if (timing->hsskip < 40 || timing->hsskip > 55 + 4 * period)
+ return -EINVAL;
+
+ if (timing->hstrail < max(8 * period, 60 + 4 * period))
+ return -EINVAL;
+
+ if (timing->init < 100000)
+ return -EINVAL;
+
+ if (timing->lpx < 50)
+ return -EINVAL;
+
+ if (timing->taget != 5 * timing->lpx)
+ return -EINVAL;
+
+ if (timing->tago != 4 * timing->lpx)
+ return -EINVAL;
+
+ if (timing->tasure < timing->lpx || timing->tasure > 2 * timing->lpx)
+ return -EINVAL;
+
+ if (timing->wakeup < 1000000)
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/mipi-phy.h b/drivers/gpu/drm/tegra/mipi-phy.h
new file mode 100644
index 000000000..41889a750
--- /dev/null
+++ b/drivers/gpu/drm/tegra/mipi-phy.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ */
+
+#ifndef DRM_TEGRA_MIPI_PHY_H
+#define DRM_TEGRA_MIPI_PHY_H
+
+/*
+ * D-PHY timing parameters
+ *
+ * A detailed description of these parameters can be found in the MIPI
+ * Alliance Specification for D-PHY, Section 5.9 "Global Operation Timing
+ * Parameters".
+ *
+ * All parameters are specified in nanoseconds.
+ */
+struct mipi_dphy_timing {
+ unsigned int clkmiss;
+ unsigned int clkpost;
+ unsigned int clkpre;
+ unsigned int clkprepare;
+ unsigned int clksettle;
+ unsigned int clktermen;
+ unsigned int clktrail;
+ unsigned int clkzero;
+ unsigned int dtermen;
+ unsigned int eot;
+ unsigned int hsexit;
+ unsigned int hsprepare;
+ unsigned int hszero;
+ unsigned int hssettle;
+ unsigned int hsskip;
+ unsigned int hstrail;
+ unsigned int init;
+ unsigned int lpx;
+ unsigned int taget;
+ unsigned int tago;
+ unsigned int tasure;
+ unsigned int wakeup;
+};
+
+int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
+ unsigned long period);
+int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing,
+ unsigned long period);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c
new file mode 100644
index 000000000..276fe0472
--- /dev/null
+++ b/drivers/gpu/drm/tegra/nvdec.c
@@ -0,0 +1,478 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, NVIDIA Corporation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/host1x.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/pmc.h>
+
+#include "drm.h"
+#include "falcon.h"
+#include "vic.h"
+
+#define NVDEC_TFBIF_TRANSCFG 0x2c44
+
+struct nvdec_config {
+ const char *firmware;
+ unsigned int version;
+ bool supports_sid;
+};
+
+struct nvdec {
+ struct falcon falcon;
+
+ void __iomem *regs;
+ struct tegra_drm_client client;
+ struct host1x_channel *channel;
+ struct device *dev;
+ struct clk *clk;
+
+ /* Platform configuration */
+ const struct nvdec_config *config;
+};
+
+static inline struct nvdec *to_nvdec(struct tegra_drm_client *client)
+{
+ return container_of(client, struct nvdec, client);
+}
+
+static inline void nvdec_writel(struct nvdec *nvdec, u32 value,
+ unsigned int offset)
+{
+ writel(value, nvdec->regs + offset);
+}
+
+static int nvdec_boot(struct nvdec *nvdec)
+{
+#ifdef CONFIG_IOMMU_API
+ struct iommu_fwspec *spec = dev_iommu_fwspec_get(nvdec->dev);
+#endif
+ int err;
+
+#ifdef CONFIG_IOMMU_API
+ if (nvdec->config->supports_sid && spec) {
+ u32 value;
+
+ value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW);
+ nvdec_writel(nvdec, value, NVDEC_TFBIF_TRANSCFG);
+
+ if (spec->num_ids > 0) {
+ value = spec->ids[0] & 0xffff;
+
+ nvdec_writel(nvdec, value, VIC_THI_STREAMID0);
+ nvdec_writel(nvdec, value, VIC_THI_STREAMID1);
+ }
+ }
+#endif
+
+ err = falcon_boot(&nvdec->falcon);
+ if (err < 0)
+ return err;
+
+ err = falcon_wait_idle(&nvdec->falcon);
+ if (err < 0) {
+ dev_err(nvdec->dev, "falcon boot timed out\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int nvdec_init(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = dev->dev_private;
+ struct nvdec *nvdec = to_nvdec(drm);
+ int err;
+
+ err = host1x_client_iommu_attach(client);
+ if (err < 0 && err != -ENODEV) {
+ dev_err(nvdec->dev, "failed to attach to domain: %d\n", err);
+ return err;
+ }
+
+ nvdec->channel = host1x_channel_request(client);
+ if (!nvdec->channel) {
+ err = -ENOMEM;
+ goto detach;
+ }
+
+ client->syncpts[0] = host1x_syncpt_request(client, 0);
+ if (!client->syncpts[0]) {
+ err = -ENOMEM;
+ goto free_channel;
+ }
+
+ pm_runtime_enable(client->dev);
+ pm_runtime_use_autosuspend(client->dev);
+ pm_runtime_set_autosuspend_delay(client->dev, 500);
+
+ err = tegra_drm_register_client(tegra, drm);
+ if (err < 0)
+ goto disable_rpm;
+
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent host1x device.
+ */
+ client->dev->dma_parms = client->host->dma_parms;
+
+ return 0;
+
+disable_rpm:
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
+ host1x_syncpt_put(client->syncpts[0]);
+free_channel:
+ host1x_channel_put(nvdec->channel);
+detach:
+ host1x_client_iommu_detach(client);
+
+ return err;
+}
+
+static int nvdec_exit(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = dev->dev_private;
+ struct nvdec *nvdec = to_nvdec(drm);
+ int err;
+
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
+ err = tegra_drm_unregister_client(tegra, drm);
+ if (err < 0)
+ return err;
+
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
+ host1x_syncpt_put(client->syncpts[0]);
+ host1x_channel_put(nvdec->channel);
+ host1x_client_iommu_detach(client);
+
+ nvdec->channel = NULL;
+
+ if (client->group) {
+ dma_unmap_single(nvdec->dev, nvdec->falcon.firmware.phys,
+ nvdec->falcon.firmware.size, DMA_TO_DEVICE);
+ tegra_drm_free(tegra, nvdec->falcon.firmware.size,
+ nvdec->falcon.firmware.virt,
+ nvdec->falcon.firmware.iova);
+ } else {
+ dma_free_coherent(nvdec->dev, nvdec->falcon.firmware.size,
+ nvdec->falcon.firmware.virt,
+ nvdec->falcon.firmware.iova);
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops nvdec_client_ops = {
+ .init = nvdec_init,
+ .exit = nvdec_exit,
+};
+
+static int nvdec_load_firmware(struct nvdec *nvdec)
+{
+ struct host1x_client *client = &nvdec->client.base;
+ struct tegra_drm *tegra = nvdec->client.drm;
+ dma_addr_t iova;
+ size_t size;
+ void *virt;
+ int err;
+
+ if (nvdec->falcon.firmware.virt)
+ return 0;
+
+ err = falcon_read_firmware(&nvdec->falcon, nvdec->config->firmware);
+ if (err < 0)
+ return err;
+
+ size = nvdec->falcon.firmware.size;
+
+ if (!client->group) {
+ virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL);
+
+ err = dma_mapping_error(nvdec->dev, iova);
+ if (err < 0)
+ return err;
+ } else {
+ virt = tegra_drm_alloc(tegra, size, &iova);
+ }
+
+ nvdec->falcon.firmware.virt = virt;
+ nvdec->falcon.firmware.iova = iova;
+
+ err = falcon_load_firmware(&nvdec->falcon);
+ if (err < 0)
+ goto cleanup;
+
+ /*
+ * In this case we have received an IOVA from the shared domain, so we
+ * need to make sure to get the physical address so that the DMA API
+ * knows what memory pages to flush the cache for.
+ */
+ if (client->group) {
+ dma_addr_t phys;
+
+ phys = dma_map_single(nvdec->dev, virt, size, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(nvdec->dev, phys);
+ if (err < 0)
+ goto cleanup;
+
+ nvdec->falcon.firmware.phys = phys;
+ }
+
+ return 0;
+
+cleanup:
+ if (!client->group)
+ dma_free_coherent(nvdec->dev, size, virt, iova);
+ else
+ tegra_drm_free(tegra, size, virt, iova);
+
+ return err;
+}
+
+
+static __maybe_unused int nvdec_runtime_resume(struct device *dev)
+{
+ struct nvdec *nvdec = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(nvdec->clk);
+ if (err < 0)
+ return err;
+
+ usleep_range(10, 20);
+
+ err = nvdec_load_firmware(nvdec);
+ if (err < 0)
+ goto disable;
+
+ err = nvdec_boot(nvdec);
+ if (err < 0)
+ goto disable;
+
+ return 0;
+
+disable:
+ clk_disable_unprepare(nvdec->clk);
+ return err;
+}
+
+static __maybe_unused int nvdec_runtime_suspend(struct device *dev)
+{
+ struct nvdec *nvdec = dev_get_drvdata(dev);
+
+ host1x_channel_stop(nvdec->channel);
+
+ clk_disable_unprepare(nvdec->clk);
+
+ return 0;
+}
+
+static int nvdec_open_channel(struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ struct nvdec *nvdec = to_nvdec(client);
+
+ context->channel = host1x_channel_get(nvdec->channel);
+ if (!context->channel)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void nvdec_close_channel(struct tegra_drm_context *context)
+{
+ host1x_channel_put(context->channel);
+}
+
+static int nvdec_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
+{
+ *supported = true;
+
+ return 0;
+}
+
+static const struct tegra_drm_client_ops nvdec_ops = {
+ .open_channel = nvdec_open_channel,
+ .close_channel = nvdec_close_channel,
+ .submit = tegra_drm_submit,
+ .get_streamid_offset = tegra_drm_get_streamid_offset_thi,
+ .can_use_memory_ctx = nvdec_can_use_memory_ctx,
+};
+
+#define NVIDIA_TEGRA_210_NVDEC_FIRMWARE "nvidia/tegra210/nvdec.bin"
+
+static const struct nvdec_config nvdec_t210_config = {
+ .firmware = NVIDIA_TEGRA_210_NVDEC_FIRMWARE,
+ .version = 0x21,
+ .supports_sid = false,
+};
+
+#define NVIDIA_TEGRA_186_NVDEC_FIRMWARE "nvidia/tegra186/nvdec.bin"
+
+static const struct nvdec_config nvdec_t186_config = {
+ .firmware = NVIDIA_TEGRA_186_NVDEC_FIRMWARE,
+ .version = 0x18,
+ .supports_sid = true,
+};
+
+#define NVIDIA_TEGRA_194_NVDEC_FIRMWARE "nvidia/tegra194/nvdec.bin"
+
+static const struct nvdec_config nvdec_t194_config = {
+ .firmware = NVIDIA_TEGRA_194_NVDEC_FIRMWARE,
+ .version = 0x19,
+ .supports_sid = true,
+};
+
+static const struct of_device_id tegra_nvdec_of_match[] = {
+ { .compatible = "nvidia,tegra210-nvdec", .data = &nvdec_t210_config },
+ { .compatible = "nvidia,tegra186-nvdec", .data = &nvdec_t186_config },
+ { .compatible = "nvidia,tegra194-nvdec", .data = &nvdec_t194_config },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_nvdec_of_match);
+
+static int nvdec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct host1x_syncpt **syncpts;
+ struct nvdec *nvdec;
+ u32 host_class;
+ int err;
+
+ /* inherit DMA mask from host1x parent */
+ err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
+ nvdec = devm_kzalloc(dev, sizeof(*nvdec), GFP_KERNEL);
+ if (!nvdec)
+ return -ENOMEM;
+
+ nvdec->config = of_device_get_match_data(dev);
+
+ syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
+ if (!syncpts)
+ return -ENOMEM;
+
+ nvdec->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(nvdec->regs))
+ return PTR_ERR(nvdec->regs);
+
+ nvdec->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(nvdec->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(nvdec->clk);
+ }
+
+ err = clk_set_rate(nvdec->clk, ULONG_MAX);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set clock rate\n");
+ return err;
+ }
+
+ err = of_property_read_u32(dev->of_node, "nvidia,host1x-class", &host_class);
+ if (err < 0)
+ host_class = HOST1X_CLASS_NVDEC;
+
+ nvdec->falcon.dev = dev;
+ nvdec->falcon.regs = nvdec->regs;
+
+ err = falcon_init(&nvdec->falcon);
+ if (err < 0)
+ return err;
+
+ platform_set_drvdata(pdev, nvdec);
+
+ INIT_LIST_HEAD(&nvdec->client.base.list);
+ nvdec->client.base.ops = &nvdec_client_ops;
+ nvdec->client.base.dev = dev;
+ nvdec->client.base.class = host_class;
+ nvdec->client.base.syncpts = syncpts;
+ nvdec->client.base.num_syncpts = 1;
+ nvdec->dev = dev;
+
+ INIT_LIST_HEAD(&nvdec->client.list);
+ nvdec->client.version = nvdec->config->version;
+ nvdec->client.ops = &nvdec_ops;
+
+ err = host1x_client_register(&nvdec->client.base);
+ if (err < 0) {
+ dev_err(dev, "failed to register host1x client: %d\n", err);
+ goto exit_falcon;
+ }
+
+ return 0;
+
+exit_falcon:
+ falcon_exit(&nvdec->falcon);
+
+ return err;
+}
+
+static int nvdec_remove(struct platform_device *pdev)
+{
+ struct nvdec *nvdec = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&nvdec->client.base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ falcon_exit(&nvdec->falcon);
+
+ return 0;
+}
+
+static const struct dev_pm_ops nvdec_pm_ops = {
+ SET_RUNTIME_PM_OPS(nvdec_runtime_suspend, nvdec_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+struct platform_driver tegra_nvdec_driver = {
+ .driver = {
+ .name = "tegra-nvdec",
+ .of_match_table = tegra_nvdec_of_match,
+ .pm = &nvdec_pm_ops
+ },
+ .probe = nvdec_probe,
+ .remove = nvdec_remove,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVDEC_FIRMWARE);
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_186_NVDEC_FIRMWARE);
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_194_NVDEC_FIRMWARE);
+#endif
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
new file mode 100644
index 000000000..47d26b5d9
--- /dev/null
+++ b/drivers/gpu/drm/tegra/output.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "drm.h"
+#include "dc.h"
+
+#include <media/cec-notifier.h>
+
+int tegra_output_connector_get_modes(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ struct edid *edid = NULL;
+ int err = 0;
+
+ /*
+ * If the panel provides one or more modes, use them exclusively and
+ * ignore any other means of obtaining a mode.
+ */
+ if (output->panel) {
+ err = drm_panel_get_modes(output->panel, connector);
+ if (err > 0)
+ return err;
+ }
+
+ if (output->edid)
+ edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
+ else if (output->ddc)
+ edid = drm_get_edid(connector, output->ddc);
+
+ cec_notifier_set_phys_addr_from_edid(output->cec, edid);
+ drm_connector_update_edid_property(connector, edid);
+
+ if (edid) {
+ err = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return err;
+}
+
+enum drm_connector_status
+tegra_output_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ enum drm_connector_status status = connector_status_unknown;
+
+ if (output->hpd_gpio) {
+ if (gpiod_get_value(output->hpd_gpio) == 0)
+ status = connector_status_disconnected;
+ else
+ status = connector_status_connected;
+ } else {
+ if (!output->panel)
+ status = connector_status_disconnected;
+ else
+ status = connector_status_connected;
+ }
+
+ if (status != connector_status_connected)
+ cec_notifier_phys_addr_invalidate(output->cec);
+
+ return status;
+}
+
+void tegra_output_connector_destroy(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+
+ if (output->cec)
+ cec_notifier_conn_unregister(output->cec);
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static irqreturn_t hpd_irq(int irq, void *data)
+{
+ struct tegra_output *output = data;
+
+ if (output->connector.dev)
+ drm_helper_hpd_irq_event(output->connector.dev);
+
+ return IRQ_HANDLED;
+}
+
+int tegra_output_probe(struct tegra_output *output)
+{
+ struct device_node *ddc, *panel;
+ unsigned long flags;
+ int err, size;
+
+ if (!output->of_node)
+ output->of_node = output->dev->of_node;
+
+ err = drm_of_find_panel_or_bridge(output->of_node, -1, -1,
+ &output->panel, &output->bridge);
+ if (err && err != -ENODEV)
+ return err;
+
+ panel = of_parse_phandle(output->of_node, "nvidia,panel", 0);
+ if (panel) {
+ /*
+ * Don't mix nvidia,panel phandle with the graph in a
+ * device-tree.
+ */
+ WARN_ON(output->panel || output->bridge);
+
+ output->panel = of_drm_find_panel(panel);
+ of_node_put(panel);
+
+ if (IS_ERR(output->panel))
+ return PTR_ERR(output->panel);
+ }
+
+ output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
+
+ ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
+ if (ddc) {
+ output->ddc = of_get_i2c_adapter_by_node(ddc);
+ of_node_put(ddc);
+
+ if (!output->ddc) {
+ err = -EPROBE_DEFER;
+ return err;
+ }
+ }
+
+ output->hpd_gpio = devm_gpiod_get_from_of_node(output->dev,
+ output->of_node,
+ "nvidia,hpd-gpio", 0,
+ GPIOD_IN,
+ "HDMI hotplug detect");
+ if (IS_ERR(output->hpd_gpio)) {
+ if (PTR_ERR(output->hpd_gpio) != -ENOENT)
+ return PTR_ERR(output->hpd_gpio);
+
+ output->hpd_gpio = NULL;
+ }
+
+ if (output->hpd_gpio) {
+ err = gpiod_to_irq(output->hpd_gpio);
+ if (err < 0) {
+ dev_err(output->dev, "gpiod_to_irq(): %d\n", err);
+ return err;
+ }
+
+ output->hpd_irq = err;
+
+ flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT;
+
+ err = request_threaded_irq(output->hpd_irq, NULL, hpd_irq,
+ flags, "hpd", output);
+ if (err < 0) {
+ dev_err(output->dev, "failed to request IRQ#%u: %d\n",
+ output->hpd_irq, err);
+ return err;
+ }
+
+ output->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ /*
+ * Disable the interrupt until the connector has been
+ * initialized to avoid a race in the hotplug interrupt
+ * handler.
+ */
+ disable_irq(output->hpd_irq);
+ }
+
+ return 0;
+}
+
+void tegra_output_remove(struct tegra_output *output)
+{
+ if (output->hpd_gpio)
+ free_irq(output->hpd_irq, output);
+
+ if (output->ddc)
+ i2c_put_adapter(output->ddc);
+}
+
+int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
+{
+ int connector_type;
+
+ /*
+ * The connector is now registered and ready to receive hotplug events
+ * so the hotplug interrupt can be enabled.
+ */
+ if (output->hpd_gpio)
+ enable_irq(output->hpd_irq);
+
+ connector_type = output->connector.connector_type;
+ /*
+ * Create a CEC notifier for HDMI connector.
+ */
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ struct cec_connector_info conn_info;
+
+ cec_fill_conn_info_from_drm(&conn_info, &output->connector);
+ output->cec = cec_notifier_conn_register(output->dev, NULL,
+ &conn_info);
+ if (!output->cec)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void tegra_output_exit(struct tegra_output *output)
+{
+ /*
+ * The connector is going away, so the interrupt must be disabled to
+ * prevent the hotplug interrupt handler from potentially crashing.
+ */
+ if (output->hpd_gpio)
+ disable_irq(output->hpd_irq);
+}
+
+void tegra_output_find_possible_crtcs(struct tegra_output *output,
+ struct drm_device *drm)
+{
+ struct device *dev = output->dev;
+ struct drm_crtc *crtc;
+ unsigned int mask = 0;
+
+ drm_for_each_crtc(crtc, drm) {
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (tegra_dc_has_output(dc, dev))
+ mask |= drm_crtc_mask(crtc);
+ }
+
+ if (mask == 0) {
+ dev_warn(dev, "missing output definition for heads in DT\n");
+ mask = 0x3;
+ }
+
+ output->encoder.possible_crtcs = mask;
+}
+
+int tegra_output_suspend(struct tegra_output *output)
+{
+ if (output->hpd_irq)
+ disable_irq(output->hpd_irq);
+
+ return 0;
+}
+
+int tegra_output_resume(struct tegra_output *output)
+{
+ if (output->hpd_irq)
+ enable_irq(output->hpd_irq);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
new file mode 100644
index 000000000..100901168
--- /dev/null
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -0,0 +1,798 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/interconnect.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+
+#include "dc.h"
+#include "plane.h"
+
+static void tegra_plane_destroy(struct drm_plane *plane)
+{
+ struct tegra_plane *p = to_tegra_plane(plane);
+
+ drm_plane_cleanup(plane);
+ kfree(p);
+}
+
+static void tegra_plane_reset(struct drm_plane *plane)
+{
+ struct tegra_plane *p = to_tegra_plane(plane);
+ struct tegra_plane_state *state;
+ unsigned int i;
+
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+
+ kfree(plane->state);
+ plane->state = NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ plane->state = &state->base;
+ plane->state->plane = plane;
+ plane->state->zpos = p->index;
+ plane->state->normalized_zpos = p->index;
+
+ for (i = 0; i < 3; i++)
+ state->iova[i] = DMA_MAPPING_ERROR;
+ }
+}
+
+static struct drm_plane_state *
+tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
+ struct tegra_plane_state *copy;
+ unsigned int i;
+
+ copy = kmalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
+ copy->tiling = state->tiling;
+ copy->format = state->format;
+ copy->swap = state->swap;
+ copy->reflect_x = state->reflect_x;
+ copy->reflect_y = state->reflect_y;
+ copy->opaque = state->opaque;
+ copy->total_peak_memory_bandwidth = state->total_peak_memory_bandwidth;
+ copy->peak_memory_bandwidth = state->peak_memory_bandwidth;
+ copy->avg_memory_bandwidth = state->avg_memory_bandwidth;
+
+ for (i = 0; i < 2; i++)
+ copy->blending[i] = state->blending[i];
+
+ for (i = 0; i < 3; i++) {
+ copy->iova[i] = DMA_MAPPING_ERROR;
+ copy->map[i] = NULL;
+ }
+
+ return &copy->base;
+}
+
+static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(state);
+}
+
+static bool tegra_plane_supports_sector_layout(struct drm_plane *plane)
+{
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, plane->dev) {
+ if (plane->possible_crtcs & drm_crtc_mask(crtc)) {
+ struct tegra_dc *dc = to_tegra_dc(crtc);
+
+ if (!dc->soc->supports_sector_layout)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ const struct drm_format_info *info = drm_format_info(format);
+
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ /* check for the sector layout bit */
+ if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
+ if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) {
+ if (!tegra_plane_supports_sector_layout(plane))
+ return false;
+ }
+ }
+
+ if (info->num_planes == 1)
+ return true;
+
+ return false;
+}
+
+const struct drm_plane_funcs tegra_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = tegra_plane_destroy,
+ .reset = tegra_plane_reset,
+ .atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
+ .atomic_destroy_state = tegra_plane_atomic_destroy_state,
+ .format_mod_supported = tegra_plane_format_mod_supported,
+};
+
+static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < state->base.fb->format->num_planes; i++) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+ struct host1x_bo_mapping *map;
+
+ map = host1x_bo_pin(dc->dev, &bo->base, DMA_TO_DEVICE, &dc->client.cache);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto unpin;
+ }
+
+ if (!dc->client.group) {
+ /*
+ * The display controller needs contiguous memory, so
+ * fail if the buffer is discontiguous and we fail to
+ * map its SG table to a single contiguous chunk of
+ * I/O virtual memory.
+ */
+ if (map->chunks > 1) {
+ err = -EINVAL;
+ goto unpin;
+ }
+
+ state->iova[i] = map->phys;
+ } else {
+ state->iova[i] = bo->iova;
+ }
+
+ state->map[i] = map;
+ }
+
+ return 0;
+
+unpin:
+ dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
+
+ while (i--) {
+ host1x_bo_unpin(state->map[i]);
+ state->iova[i] = DMA_MAPPING_ERROR;
+ state->map[i] = NULL;
+ }
+
+ return err;
+}
+
+static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
+{
+ unsigned int i;
+
+ for (i = 0; i < state->base.fb->format->num_planes; i++) {
+ host1x_bo_unpin(state->map[i]);
+ state->iova[i] = DMA_MAPPING_ERROR;
+ state->map[i] = NULL;
+ }
+}
+
+int tegra_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(state->crtc);
+ int err;
+
+ if (!state->fb)
+ return 0;
+
+ err = drm_gem_plane_helper_prepare_fb(plane, state);
+ if (err < 0)
+ return err;
+
+ return tegra_dc_pin(dc, to_tegra_plane_state(state));
+}
+
+void tegra_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(state->crtc);
+
+ if (dc)
+ tegra_dc_unpin(dc, to_tegra_plane_state(state));
+}
+
+static int tegra_plane_calculate_memory_bandwidth(struct drm_plane_state *state)
+{
+ struct tegra_plane_state *tegra_state = to_tegra_plane_state(state);
+ unsigned int i, bpp, dst_w, dst_h, src_w, src_h, mul;
+ const struct tegra_dc_soc_info *soc;
+ const struct drm_format_info *fmt;
+ struct drm_crtc_state *crtc_state;
+ u64 avg_bandwidth, peak_bandwidth;
+
+ if (!state->visible)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
+ if (!crtc_state)
+ return -EINVAL;
+
+ src_w = drm_rect_width(&state->src) >> 16;
+ src_h = drm_rect_height(&state->src) >> 16;
+ dst_w = drm_rect_width(&state->dst);
+ dst_h = drm_rect_height(&state->dst);
+
+ fmt = state->fb->format;
+ soc = to_tegra_dc(state->crtc)->soc;
+
+ /*
+ * Note that real memory bandwidth vary depending on format and
+ * memory layout, we are not taking that into account because small
+ * estimation error isn't important since bandwidth is rounded up
+ * anyway.
+ */
+ for (i = 0, bpp = 0; i < fmt->num_planes; i++) {
+ unsigned int bpp_plane = fmt->cpp[i] * 8;
+
+ /*
+ * Sub-sampling is relevant for chroma planes only and vertical
+ * readouts are not cached, hence only horizontal sub-sampling
+ * matters.
+ */
+ if (i > 0)
+ bpp_plane /= fmt->hsub;
+
+ bpp += bpp_plane;
+ }
+
+ /* average bandwidth in kbytes/sec */
+ avg_bandwidth = min(src_w, dst_w) * min(src_h, dst_h);
+ avg_bandwidth *= drm_mode_vrefresh(&crtc_state->adjusted_mode);
+ avg_bandwidth = DIV_ROUND_UP(avg_bandwidth * bpp, 8) + 999;
+ do_div(avg_bandwidth, 1000);
+
+ /* mode.clock in kHz, peak bandwidth in kbytes/sec */
+ peak_bandwidth = DIV_ROUND_UP(crtc_state->adjusted_mode.clock * bpp, 8);
+
+ /*
+ * Tegra30/114 Memory Controller can't interleave DC memory requests
+ * for the tiled windows because DC uses 16-bytes atom, while DDR3
+ * uses 32-bytes atom. Hence there is x2 memory overfetch for tiled
+ * framebuffer and DDR3 on these SoCs.
+ */
+ if (soc->plane_tiled_memory_bandwidth_x2 &&
+ tegra_state->tiling.mode == TEGRA_BO_TILING_MODE_TILED)
+ mul = 2;
+ else
+ mul = 1;
+
+ /* ICC bandwidth in kbytes/sec */
+ tegra_state->peak_memory_bandwidth = kBps_to_icc(peak_bandwidth) * mul;
+ tegra_state->avg_memory_bandwidth = kBps_to_icc(avg_bandwidth) * mul;
+
+ return 0;
+}
+
+int tegra_plane_state_add(struct tegra_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct tegra_dc_state *tegra;
+ int err;
+
+ /* Propagate errors from allocation or locking failures. */
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ /* Check plane state for visibility and calculate clipping bounds */
+ err = drm_atomic_helper_check_plane_state(state, crtc_state,
+ 0, INT_MAX, true, true);
+ if (err < 0)
+ return err;
+
+ err = tegra_plane_calculate_memory_bandwidth(state);
+ if (err < 0)
+ return err;
+
+ tegra = to_dc_state(crtc_state);
+
+ tegra->planes |= WIN_A_ACT_REQ << plane->index;
+
+ return 0;
+}
+
+int tegra_plane_format(u32 fourcc, u32 *format, u32 *swap)
+{
+ /* assume no swapping of fetched data */
+ if (swap)
+ *swap = BYTE_SWAP_NOSWAP;
+
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB4444:
+ *format = WIN_COLOR_DEPTH_B4G4R4A4;
+ break;
+
+ case DRM_FORMAT_ARGB1555:
+ *format = WIN_COLOR_DEPTH_B5G5R5A1;
+ break;
+
+ case DRM_FORMAT_RGB565:
+ *format = WIN_COLOR_DEPTH_B5G6R5;
+ break;
+
+ case DRM_FORMAT_RGBA5551:
+ *format = WIN_COLOR_DEPTH_A1B5G5R5;
+ break;
+
+ case DRM_FORMAT_ARGB8888:
+ *format = WIN_COLOR_DEPTH_B8G8R8A8;
+ break;
+
+ case DRM_FORMAT_ABGR8888:
+ *format = WIN_COLOR_DEPTH_R8G8B8A8;
+ break;
+
+ case DRM_FORMAT_ABGR4444:
+ *format = WIN_COLOR_DEPTH_R4G4B4A4;
+ break;
+
+ case DRM_FORMAT_ABGR1555:
+ *format = WIN_COLOR_DEPTH_R5G5B5A;
+ break;
+
+ case DRM_FORMAT_BGRA5551:
+ *format = WIN_COLOR_DEPTH_AR5G5B5;
+ break;
+
+ case DRM_FORMAT_XRGB1555:
+ *format = WIN_COLOR_DEPTH_B5G5R5X1;
+ break;
+
+ case DRM_FORMAT_RGBX5551:
+ *format = WIN_COLOR_DEPTH_X1B5G5R5;
+ break;
+
+ case DRM_FORMAT_XBGR1555:
+ *format = WIN_COLOR_DEPTH_R5G5B5X1;
+ break;
+
+ case DRM_FORMAT_BGRX5551:
+ *format = WIN_COLOR_DEPTH_X1R5G5B5;
+ break;
+
+ case DRM_FORMAT_BGR565:
+ *format = WIN_COLOR_DEPTH_R5G6B5;
+ break;
+
+ case DRM_FORMAT_BGRA8888:
+ *format = WIN_COLOR_DEPTH_A8R8G8B8;
+ break;
+
+ case DRM_FORMAT_RGBA8888:
+ *format = WIN_COLOR_DEPTH_A8B8G8R8;
+ break;
+
+ case DRM_FORMAT_XRGB8888:
+ *format = WIN_COLOR_DEPTH_B8G8R8X8;
+ break;
+
+ case DRM_FORMAT_XBGR8888:
+ *format = WIN_COLOR_DEPTH_R8G8B8X8;
+ break;
+
+ case DRM_FORMAT_UYVY:
+ *format = WIN_COLOR_DEPTH_YCbCr422;
+ break;
+
+ case DRM_FORMAT_YUYV:
+ if (!swap)
+ return -EINVAL;
+
+ *format = WIN_COLOR_DEPTH_YCbCr422;
+ *swap = BYTE_SWAP_SWAP2;
+ break;
+
+ case DRM_FORMAT_YVYU:
+ if (!swap)
+ return -EINVAL;
+
+ *format = WIN_COLOR_DEPTH_YCbCr422;
+ *swap = BYTE_SWAP_SWAP4;
+ break;
+
+ case DRM_FORMAT_VYUY:
+ if (!swap)
+ return -EINVAL;
+
+ *format = WIN_COLOR_DEPTH_YCbCr422;
+ *swap = BYTE_SWAP_SWAP4HW;
+ break;
+
+ case DRM_FORMAT_YUV420:
+ *format = WIN_COLOR_DEPTH_YCbCr420P;
+ break;
+
+ case DRM_FORMAT_YUV422:
+ *format = WIN_COLOR_DEPTH_YCbCr422P;
+ break;
+
+ case DRM_FORMAT_YUV444:
+ *format = WIN_COLOR_DEPTH_YCbCr444P;
+ break;
+
+ case DRM_FORMAT_NV12:
+ *format = WIN_COLOR_DEPTH_YCbCr420SP;
+ break;
+
+ case DRM_FORMAT_NV21:
+ *format = WIN_COLOR_DEPTH_YCrCb420SP;
+ break;
+
+ case DRM_FORMAT_NV16:
+ *format = WIN_COLOR_DEPTH_YCbCr422SP;
+ break;
+
+ case DRM_FORMAT_NV61:
+ *format = WIN_COLOR_DEPTH_YCrCb422SP;
+ break;
+
+ case DRM_FORMAT_NV24:
+ *format = WIN_COLOR_DEPTH_YCbCr444SP;
+ break;
+
+ case DRM_FORMAT_NV42:
+ *format = WIN_COLOR_DEPTH_YCrCb444SP;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+bool tegra_plane_format_is_indexed(unsigned int format)
+{
+ switch (format) {
+ case WIN_COLOR_DEPTH_P1:
+ case WIN_COLOR_DEPTH_P2:
+ case WIN_COLOR_DEPTH_P4:
+ case WIN_COLOR_DEPTH_P8:
+ return true;
+ }
+
+ return false;
+}
+
+bool tegra_plane_format_is_yuv(unsigned int format, unsigned int *planes, unsigned int *bpc)
+{
+ switch (format) {
+ case WIN_COLOR_DEPTH_YCbCr422:
+ case WIN_COLOR_DEPTH_YUV422:
+ if (planes)
+ *planes = 1;
+
+ if (bpc)
+ *bpc = 8;
+
+ return true;
+
+ case WIN_COLOR_DEPTH_YCbCr420P:
+ case WIN_COLOR_DEPTH_YUV420P:
+ case WIN_COLOR_DEPTH_YCbCr422P:
+ case WIN_COLOR_DEPTH_YUV422P:
+ case WIN_COLOR_DEPTH_YCbCr422R:
+ case WIN_COLOR_DEPTH_YUV422R:
+ case WIN_COLOR_DEPTH_YCbCr422RA:
+ case WIN_COLOR_DEPTH_YUV422RA:
+ case WIN_COLOR_DEPTH_YCbCr444P:
+ if (planes)
+ *planes = 3;
+
+ if (bpc)
+ *bpc = 8;
+
+ return true;
+
+ case WIN_COLOR_DEPTH_YCrCb420SP:
+ case WIN_COLOR_DEPTH_YCbCr420SP:
+ case WIN_COLOR_DEPTH_YCrCb422SP:
+ case WIN_COLOR_DEPTH_YCbCr422SP:
+ case WIN_COLOR_DEPTH_YCrCb444SP:
+ case WIN_COLOR_DEPTH_YCbCr444SP:
+ if (planes)
+ *planes = 2;
+
+ if (bpc)
+ *bpc = 8;
+
+ return true;
+ }
+
+ if (planes)
+ *planes = 1;
+
+ return false;
+}
+
+static bool __drm_format_has_alpha(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
+ return true;
+ }
+
+ return false;
+}
+
+static int tegra_plane_format_get_alpha(unsigned int opaque,
+ unsigned int *alpha)
+{
+ if (tegra_plane_format_is_yuv(opaque, NULL, NULL)) {
+ *alpha = opaque;
+ return 0;
+ }
+
+ switch (opaque) {
+ case WIN_COLOR_DEPTH_B5G5R5X1:
+ *alpha = WIN_COLOR_DEPTH_B5G5R5A1;
+ return 0;
+
+ case WIN_COLOR_DEPTH_X1B5G5R5:
+ *alpha = WIN_COLOR_DEPTH_A1B5G5R5;
+ return 0;
+
+ case WIN_COLOR_DEPTH_R8G8B8X8:
+ *alpha = WIN_COLOR_DEPTH_R8G8B8A8;
+ return 0;
+
+ case WIN_COLOR_DEPTH_B8G8R8X8:
+ *alpha = WIN_COLOR_DEPTH_B8G8R8A8;
+ return 0;
+
+ case WIN_COLOR_DEPTH_B5G6R5:
+ *alpha = opaque;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * This is applicable to Tegra20 and Tegra30 only where the opaque formats can
+ * be emulated using the alpha formats and alpha blending disabled.
+ */
+static int tegra_plane_setup_opacity(struct tegra_plane *tegra,
+ struct tegra_plane_state *state)
+{
+ unsigned int format;
+ int err;
+
+ switch (state->format) {
+ case WIN_COLOR_DEPTH_B5G5R5A1:
+ case WIN_COLOR_DEPTH_A1B5G5R5:
+ case WIN_COLOR_DEPTH_R8G8B8A8:
+ case WIN_COLOR_DEPTH_B8G8R8A8:
+ state->opaque = false;
+ break;
+
+ default:
+ err = tegra_plane_format_get_alpha(state->format, &format);
+ if (err < 0)
+ return err;
+
+ state->format = format;
+ state->opaque = true;
+ break;
+ }
+
+ return 0;
+}
+
+static int tegra_plane_check_transparency(struct tegra_plane *tegra,
+ struct tegra_plane_state *state)
+{
+ struct drm_plane_state *old, *plane_state;
+ struct drm_plane *plane;
+
+ old = drm_atomic_get_old_plane_state(state->base.state, &tegra->base);
+
+ /* check if zpos / transparency changed */
+ if (old->normalized_zpos == state->base.normalized_zpos &&
+ to_tegra_plane_state(old)->opaque == state->opaque)
+ return 0;
+
+ /* include all sibling planes into this commit */
+ drm_for_each_plane(plane, tegra->base.dev) {
+ struct tegra_plane *p = to_tegra_plane(plane);
+
+ /* skip this plane and planes on different CRTCs */
+ if (p == tegra || p->dc != tegra->dc)
+ continue;
+
+ plane_state = drm_atomic_get_plane_state(state->base.state,
+ plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 1;
+}
+
+static unsigned int tegra_plane_get_overlap_index(struct tegra_plane *plane,
+ struct tegra_plane *other)
+{
+ unsigned int index = 0, i;
+
+ WARN_ON(plane == other);
+
+ for (i = 0; i < 3; i++) {
+ if (i == plane->index)
+ continue;
+
+ if (i == other->index)
+ break;
+
+ index++;
+ }
+
+ return index;
+}
+
+static void tegra_plane_update_transparency(struct tegra_plane *tegra,
+ struct tegra_plane_state *state)
+{
+ struct drm_plane_state *new;
+ struct drm_plane *plane;
+ unsigned int i;
+
+ for_each_new_plane_in_state(state->base.state, plane, new, i) {
+ struct tegra_plane *p = to_tegra_plane(plane);
+ unsigned index;
+
+ /* skip this plane and planes on different CRTCs */
+ if (p == tegra || p->dc != tegra->dc)
+ continue;
+
+ index = tegra_plane_get_overlap_index(tegra, p);
+
+ if (new->fb && __drm_format_has_alpha(new->fb->format->format))
+ state->blending[index].alpha = true;
+ else
+ state->blending[index].alpha = false;
+
+ if (new->normalized_zpos > state->base.normalized_zpos)
+ state->blending[index].top = true;
+ else
+ state->blending[index].top = false;
+
+ /*
+ * Missing framebuffer means that plane is disabled, in this
+ * case mark B / C window as top to be able to differentiate
+ * windows indices order in regards to zPos for the middle
+ * window X / Y registers programming.
+ */
+ if (!new->fb)
+ state->blending[index].top = (index == 1);
+ }
+}
+
+static int tegra_plane_setup_transparency(struct tegra_plane *tegra,
+ struct tegra_plane_state *state)
+{
+ struct tegra_plane_state *tegra_state;
+ struct drm_plane_state *new;
+ struct drm_plane *plane;
+ int err;
+
+ /*
+ * If planes zpos / transparency changed, sibling planes blending
+ * state may require adjustment and in this case they will be included
+ * into this atom commit, otherwise blending state is unchanged.
+ */
+ err = tegra_plane_check_transparency(tegra, state);
+ if (err <= 0)
+ return err;
+
+ /*
+ * All planes are now in the atomic state, walk them up and update
+ * transparency state for each plane.
+ */
+ drm_for_each_plane(plane, tegra->base.dev) {
+ struct tegra_plane *p = to_tegra_plane(plane);
+
+ /* skip planes on different CRTCs */
+ if (p->dc != tegra->dc)
+ continue;
+
+ new = drm_atomic_get_new_plane_state(state->base.state, plane);
+ tegra_state = to_tegra_plane_state(new);
+
+ /*
+ * There is no need to update blending state for the disabled
+ * plane.
+ */
+ if (new->fb)
+ tegra_plane_update_transparency(p, tegra_state);
+ }
+
+ return 0;
+}
+
+int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
+ struct tegra_plane_state *state)
+{
+ int err;
+
+ err = tegra_plane_setup_opacity(tegra, state);
+ if (err < 0)
+ return err;
+
+ err = tegra_plane_setup_transparency(tegra, state);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static const char * const tegra_plane_icc_names[TEGRA_DC_LEGACY_PLANES_NUM] = {
+ "wina", "winb", "winc", NULL, NULL, NULL, "cursor",
+};
+
+int tegra_plane_interconnect_init(struct tegra_plane *plane)
+{
+ const char *icc_name = tegra_plane_icc_names[plane->index];
+ struct device *dev = plane->dc->dev;
+ struct tegra_dc *dc = plane->dc;
+ int err;
+
+ if (WARN_ON(plane->index >= TEGRA_DC_LEGACY_PLANES_NUM) ||
+ WARN_ON(!tegra_plane_icc_names[plane->index]))
+ return -EINVAL;
+
+ plane->icc_mem = devm_of_icc_get(dev, icc_name);
+ err = PTR_ERR_OR_ZERO(plane->icc_mem);
+ if (err) {
+ dev_err_probe(dev, err, "failed to get %s interconnect\n",
+ icc_name);
+ return err;
+ }
+
+ /* plane B on T20/30 has a dedicated memory client for a 6-tap vertical filter */
+ if (plane->index == 1 && dc->soc->has_win_b_vfilter_mem_client) {
+ plane->icc_mem_vfilter = devm_of_icc_get(dev, "winb-vfilter");
+ err = PTR_ERR_OR_ZERO(plane->icc_mem_vfilter);
+ if (err) {
+ dev_err_probe(dev, err, "failed to get %s interconnect\n",
+ "winb-vfilter");
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h
new file mode 100644
index 000000000..e33a581e6
--- /dev/null
+++ b/drivers/gpu/drm/tegra/plane.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef TEGRA_PLANE_H
+#define TEGRA_PLANE_H 1
+
+#include <drm/drm_plane.h>
+
+struct icc_path;
+struct tegra_bo;
+struct tegra_dc;
+
+struct tegra_plane {
+ struct drm_plane base;
+ struct tegra_dc *dc;
+ unsigned int offset;
+ unsigned int index;
+
+ struct icc_path *icc_mem;
+ struct icc_path *icc_mem_vfilter;
+};
+
+struct tegra_cursor {
+ struct tegra_plane base;
+
+ struct tegra_bo *bo;
+ unsigned int width;
+ unsigned int height;
+};
+
+static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct tegra_plane, base);
+}
+
+struct tegra_plane_legacy_blending_state {
+ bool alpha;
+ bool top;
+};
+
+struct tegra_plane_state {
+ struct drm_plane_state base;
+
+ struct host1x_bo_mapping *map[3];
+ dma_addr_t iova[3];
+
+ struct tegra_bo_tiling tiling;
+ u32 format;
+ u32 swap;
+
+ bool reflect_x;
+ bool reflect_y;
+
+ /* used for legacy blending support only */
+ struct tegra_plane_legacy_blending_state blending[2];
+ bool opaque;
+
+ /* bandwidths are in ICC units, i.e. kbytes/sec */
+ u32 total_peak_memory_bandwidth;
+ u32 peak_memory_bandwidth;
+ u32 avg_memory_bandwidth;
+};
+
+static inline struct tegra_plane_state *
+to_tegra_plane_state(struct drm_plane_state *state)
+{
+ if (state)
+ return container_of(state, struct tegra_plane_state, base);
+
+ return NULL;
+}
+
+static inline const struct tegra_plane_state *
+to_const_tegra_plane_state(const struct drm_plane_state *state)
+{
+ return to_tegra_plane_state((struct drm_plane_state *)state);
+}
+
+extern const struct drm_plane_funcs tegra_plane_funcs;
+
+int tegra_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void tegra_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
+int tegra_plane_state_add(struct tegra_plane *plane,
+ struct drm_plane_state *state);
+
+int tegra_plane_format(u32 fourcc, u32 *format, u32 *swap);
+bool tegra_plane_format_is_indexed(unsigned int format);
+bool tegra_plane_format_is_yuv(unsigned int format, unsigned int *planes, unsigned int *bpc);
+int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
+ struct tegra_plane_state *state);
+int tegra_plane_interconnect_init(struct tegra_plane *plane);
+
+#endif /* TEGRA_PLANE_H */
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
new file mode 100644
index 000000000..ff8fce36d
--- /dev/null
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_rgb {
+ struct tegra_output output;
+ struct tegra_dc *dc;
+
+ struct clk *pll_d_out0;
+ struct clk *pll_d2_out0;
+ struct clk *clk_parent;
+ struct clk *clk;
+};
+
+static inline struct tegra_rgb *to_rgb(struct tegra_output *output)
+{
+ return container_of(output, struct tegra_rgb, output);
+}
+
+struct reg_entry {
+ unsigned long offset;
+ unsigned long value;
+};
+
+static const struct reg_entry rgb_enable[] = {
+ { DC_COM_PIN_OUTPUT_ENABLE(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(1), 0x01000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(4), 0x00210222 },
+ { DC_COM_PIN_OUTPUT_SELECT(5), 0x00002200 },
+ { DC_COM_PIN_OUTPUT_SELECT(6), 0x00020000 },
+};
+
+static const struct reg_entry rgb_disable[] = {
+ { DC_COM_PIN_OUTPUT_SELECT(6), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(5), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(4), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_DATA(3), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_DATA(2), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_DATA(1), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_DATA(0), 0xaaaaaaaa },
+ { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(1), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
+ { DC_COM_PIN_OUTPUT_ENABLE(3), 0x55555555 },
+ { DC_COM_PIN_OUTPUT_ENABLE(2), 0x55555555 },
+ { DC_COM_PIN_OUTPUT_ENABLE(1), 0x55150005 },
+ { DC_COM_PIN_OUTPUT_ENABLE(0), 0x55555555 },
+};
+
+static void tegra_dc_write_regs(struct tegra_dc *dc,
+ const struct reg_entry *table,
+ unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ tegra_dc_writel(dc, table[i].value, table[i].offset);
+}
+
+static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_rgb *rgb = to_rgb(output);
+
+ tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+ tegra_dc_commit(rgb->dc);
+}
+
+static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_rgb *rgb = to_rgb(output);
+ u32 value;
+
+ tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
+
+ value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
+ tegra_dc_writel(rgb->dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
+
+ /* XXX: parameterize? */
+ value = tegra_dc_readl(rgb->dc, DC_COM_PIN_OUTPUT_POLARITY(1));
+ value &= ~LVS_OUTPUT_POLARITY_LOW;
+ value &= ~LHS_OUTPUT_POLARITY_LOW;
+ tegra_dc_writel(rgb->dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
+
+ /* XXX: parameterize? */
+ value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
+ DISP_ORDER_RED_BLUE;
+ tegra_dc_writel(rgb->dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
+
+ tegra_dc_commit(rgb->dc);
+}
+
+static bool tegra_rgb_pll_rate_change_allowed(struct tegra_rgb *rgb)
+{
+ if (!rgb->pll_d2_out0)
+ return false;
+
+ if (!clk_is_match(rgb->clk_parent, rgb->pll_d_out0) &&
+ !clk_is_match(rgb->clk_parent, rgb->pll_d2_out0))
+ return false;
+
+ return true;
+}
+
+static int
+tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
+ unsigned long pclk = crtc_state->mode.clock * 1000;
+ struct tegra_rgb *rgb = to_rgb(output);
+ unsigned int div;
+ int err;
+
+ /*
+ * We may not want to change the frequency of the parent clock, since
+ * it may be a parent for other peripherals. This is due to the fact
+ * that on Tegra20 there's only a single clock dedicated to display
+ * (pll_d_out0), whereas later generations have a second one that can
+ * be used to independently drive a second output (pll_d2_out0).
+ *
+ * As a way to support multiple outputs on Tegra20 as well, pll_p is
+ * typically used as the parent clock for the display controllers.
+ * But this comes at a cost: pll_p is the parent of several other
+ * peripherals, so its frequency shouldn't change out of the blue.
+ *
+ * The best we can do at this point is to use the shift clock divider
+ * and hope that the desired frequency can be matched (or at least
+ * matched sufficiently close that the panel will still work).
+ */
+ if (tegra_rgb_pll_rate_change_allowed(rgb)) {
+ /*
+ * Set display controller clock to x2 of PCLK in order to
+ * produce higher resolution pulse positions.
+ */
+ div = 2;
+ pclk *= 2;
+ } else {
+ div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2;
+ pclk = 0;
+ }
+
+ err = tegra_dc_state_setup_clock(dc, crtc_state, rgb->clk_parent,
+ pclk, div);
+ if (err < 0) {
+ dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
+ return err;
+ }
+
+ return err;
+}
+
+static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = {
+ .disable = tegra_rgb_encoder_disable,
+ .enable = tegra_rgb_encoder_enable,
+ .atomic_check = tegra_rgb_encoder_atomic_check,
+};
+
+int tegra_dc_rgb_probe(struct tegra_dc *dc)
+{
+ struct device_node *np;
+ struct tegra_rgb *rgb;
+ int err;
+
+ np = of_get_child_by_name(dc->dev->of_node, "rgb");
+ if (!np || !of_device_is_available(np))
+ return -ENODEV;
+
+ rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL);
+ if (!rgb)
+ return -ENOMEM;
+
+ rgb->output.dev = dc->dev;
+ rgb->output.of_node = np;
+ rgb->dc = dc;
+
+ err = tegra_output_probe(&rgb->output);
+ if (err < 0)
+ return err;
+
+ rgb->clk = devm_clk_get(dc->dev, NULL);
+ if (IS_ERR(rgb->clk)) {
+ dev_err(dc->dev, "failed to get clock\n");
+ return PTR_ERR(rgb->clk);
+ }
+
+ rgb->clk_parent = devm_clk_get(dc->dev, "parent");
+ if (IS_ERR(rgb->clk_parent)) {
+ dev_err(dc->dev, "failed to get parent clock\n");
+ return PTR_ERR(rgb->clk_parent);
+ }
+
+ err = clk_set_parent(rgb->clk, rgb->clk_parent);
+ if (err < 0) {
+ dev_err(dc->dev, "failed to set parent clock: %d\n", err);
+ return err;
+ }
+
+ rgb->pll_d_out0 = clk_get_sys(NULL, "pll_d_out0");
+ if (IS_ERR(rgb->pll_d_out0)) {
+ err = PTR_ERR(rgb->pll_d_out0);
+ dev_err(dc->dev, "failed to get pll_d_out0: %d\n", err);
+ return err;
+ }
+
+ if (dc->soc->has_pll_d2_out0) {
+ rgb->pll_d2_out0 = clk_get_sys(NULL, "pll_d2_out0");
+ if (IS_ERR(rgb->pll_d2_out0)) {
+ err = PTR_ERR(rgb->pll_d2_out0);
+ dev_err(dc->dev, "failed to get pll_d2_out0: %d\n", err);
+ return err;
+ }
+ }
+
+ dc->rgb = &rgb->output;
+
+ return 0;
+}
+
+int tegra_dc_rgb_remove(struct tegra_dc *dc)
+{
+ struct tegra_rgb *rgb;
+
+ if (!dc->rgb)
+ return 0;
+
+ rgb = to_rgb(dc->rgb);
+ clk_put(rgb->pll_d2_out0);
+ clk_put(rgb->pll_d_out0);
+
+ tegra_output_remove(dc->rgb);
+ dc->rgb = NULL;
+
+ return 0;
+}
+
+int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
+{
+ struct tegra_output *output = dc->rgb;
+ struct drm_connector *connector;
+ int err;
+
+ if (!dc->rgb)
+ return -ENODEV;
+
+ drm_simple_encoder_init(drm, &output->encoder, DRM_MODE_ENCODER_LVDS);
+ drm_encoder_helper_add(&output->encoder,
+ &tegra_rgb_encoder_helper_funcs);
+
+ /*
+ * Wrap directly-connected panel into DRM bridge in order to let
+ * DRM core to handle panel for us.
+ */
+ if (output->panel) {
+ output->bridge = devm_drm_panel_bridge_add(output->dev,
+ output->panel);
+ if (IS_ERR(output->bridge)) {
+ dev_err(output->dev,
+ "failed to wrap panel into bridge: %pe\n",
+ output->bridge);
+ return PTR_ERR(output->bridge);
+ }
+
+ output->panel = NULL;
+ }
+
+ /*
+ * Tegra devices that have LVDS panel utilize LVDS encoder bridge
+ * for converting up to 28 LCD LVTTL lanes into 5/4 LVDS lanes that
+ * go to display panel's receiver.
+ *
+ * Encoder usually have a power-down control which needs to be enabled
+ * in order to transmit data to the panel. Historically devices that
+ * use an older device-tree version didn't model the bridge, assuming
+ * that encoder is turned ON by default, while today's DRM allows us
+ * to model LVDS encoder properly.
+ *
+ * Newer device-trees utilize LVDS encoder bridge, which provides
+ * us with a connector and handles the display panel.
+ *
+ * For older device-trees we wrapped panel into the panel-bridge.
+ */
+ if (output->bridge) {
+ err = drm_bridge_attach(&output->encoder, output->bridge,
+ NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (err)
+ return err;
+
+ connector = drm_bridge_connector_init(drm, &output->encoder);
+ if (IS_ERR(connector)) {
+ dev_err(output->dev,
+ "failed to initialize bridge connector: %pe\n",
+ connector);
+ return PTR_ERR(connector);
+ }
+
+ drm_connector_attach_encoder(connector, &output->encoder);
+ }
+
+ err = tegra_output_init(drm, output);
+ if (err < 0) {
+ dev_err(output->dev, "failed to initialize output: %d\n", err);
+ return err;
+ }
+
+ /*
+ * Other outputs can be attached to either display controller. The RGB
+ * outputs are an exception and work only with their parent display
+ * controller.
+ */
+ output->encoder.possible_crtcs = drm_crtc_mask(&dc->base);
+
+ return 0;
+}
+
+int tegra_dc_rgb_exit(struct tegra_dc *dc)
+{
+ if (dc->rgb)
+ tegra_output_exit(dc->rgb);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
new file mode 100644
index 000000000..77723d5f1
--- /dev/null
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -0,0 +1,4063 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/pmc.h>
+
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_scdc_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "dc.h"
+#include "dp.h"
+#include "drm.h"
+#include "hda.h"
+#include "sor.h"
+#include "trace.h"
+
+#define SOR_REKEY 0x38
+
+struct tegra_sor_hdmi_settings {
+ unsigned long frequency;
+
+ u8 vcocap;
+ u8 filter;
+ u8 ichpmp;
+ u8 loadadj;
+ u8 tmds_termadj;
+ u8 tx_pu_value;
+ u8 bg_temp_coef;
+ u8 bg_vref_level;
+ u8 avdd10_level;
+ u8 avdd14_level;
+ u8 sparepll;
+
+ u8 drive_current[4];
+ u8 preemphasis[4];
+};
+
+#if 1
+static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
+ {
+ .frequency = 54000000,
+ .vcocap = 0x0,
+ .filter = 0x0,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x10,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0x8,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
+ .drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 75000000,
+ .vcocap = 0x3,
+ .filter = 0x0,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x40,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0x8,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
+ .drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 150000000,
+ .vcocap = 0x3,
+ .filter = 0x0,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x66,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0x8,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
+ .drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 300000000,
+ .vcocap = 0x3,
+ .filter = 0x0,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x66,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0xa,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
+ .drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
+ .preemphasis = { 0x00, 0x17, 0x17, 0x17 },
+ }, {
+ .frequency = 600000000,
+ .vcocap = 0x3,
+ .filter = 0x0,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x66,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0x8,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
+ .drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ },
+};
+#else
+static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
+ {
+ .frequency = 75000000,
+ .vcocap = 0x3,
+ .filter = 0x0,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x40,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0x8,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
+ .drive_current = { 0x29, 0x29, 0x29, 0x29 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 150000000,
+ .vcocap = 0x3,
+ .filter = 0x0,
+ .ichpmp = 0x1,
+ .loadadj = 0x3,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x66,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0x8,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
+ .drive_current = { 0x30, 0x37, 0x37, 0x37 },
+ .preemphasis = { 0x01, 0x02, 0x02, 0x02 },
+ }, {
+ .frequency = 300000000,
+ .vcocap = 0x3,
+ .filter = 0x0,
+ .ichpmp = 0x6,
+ .loadadj = 0x3,
+ .tmds_termadj = 0x9,
+ .tx_pu_value = 0x66,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0xf,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
+ .drive_current = { 0x30, 0x37, 0x37, 0x37 },
+ .preemphasis = { 0x10, 0x3e, 0x3e, 0x3e },
+ }, {
+ .frequency = 600000000,
+ .vcocap = 0x3,
+ .filter = 0x0,
+ .ichpmp = 0xa,
+ .loadadj = 0x3,
+ .tmds_termadj = 0xb,
+ .tx_pu_value = 0x66,
+ .bg_temp_coef = 0x3,
+ .bg_vref_level = 0xe,
+ .avdd10_level = 0x4,
+ .avdd14_level = 0x4,
+ .sparepll = 0x0,
+ .drive_current = { 0x35, 0x3e, 0x3e, 0x3e },
+ .preemphasis = { 0x02, 0x3f, 0x3f, 0x3f },
+ },
+};
+#endif
+
+static const struct tegra_sor_hdmi_settings tegra186_sor_hdmi_defaults[] = {
+ {
+ .frequency = 54000000,
+ .vcocap = 0,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 0xf,
+ .tx_pu_value = 0,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x54,
+ .drive_current = { 0x3a, 0x3a, 0x3a, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 75000000,
+ .vcocap = 1,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 0xf,
+ .tx_pu_value = 0,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x44,
+ .drive_current = { 0x3a, 0x3a, 0x3a, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 150000000,
+ .vcocap = 3,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 15,
+ .tx_pu_value = 0x66 /* 0 */,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x00, /* 0x34 */
+ .drive_current = { 0x3a, 0x3a, 0x3a, 0x37 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 300000000,
+ .vcocap = 3,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 15,
+ .tx_pu_value = 64,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x34,
+ .drive_current = { 0x3d, 0x3d, 0x3d, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 600000000,
+ .vcocap = 3,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 12,
+ .tx_pu_value = 96,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x34,
+ .drive_current = { 0x3d, 0x3d, 0x3d, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }
+};
+
+static const struct tegra_sor_hdmi_settings tegra194_sor_hdmi_defaults[] = {
+ {
+ .frequency = 54000000,
+ .vcocap = 0,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 0xf,
+ .tx_pu_value = 0,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x54,
+ .drive_current = { 0x3a, 0x3a, 0x3a, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 75000000,
+ .vcocap = 1,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 0xf,
+ .tx_pu_value = 0,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x44,
+ .drive_current = { 0x3a, 0x3a, 0x3a, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 150000000,
+ .vcocap = 3,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 15,
+ .tx_pu_value = 0x66 /* 0 */,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x00, /* 0x34 */
+ .drive_current = { 0x3a, 0x3a, 0x3a, 0x37 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 300000000,
+ .vcocap = 3,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 15,
+ .tx_pu_value = 64,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x34,
+ .drive_current = { 0x3d, 0x3d, 0x3d, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }, {
+ .frequency = 600000000,
+ .vcocap = 3,
+ .filter = 5,
+ .ichpmp = 5,
+ .loadadj = 3,
+ .tmds_termadj = 12,
+ .tx_pu_value = 96,
+ .bg_temp_coef = 3,
+ .bg_vref_level = 8,
+ .avdd10_level = 4,
+ .avdd14_level = 4,
+ .sparepll = 0x34,
+ .drive_current = { 0x3d, 0x3d, 0x3d, 0x33 },
+ .preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+ }
+};
+
+struct tegra_sor_regs {
+ unsigned int head_state0;
+ unsigned int head_state1;
+ unsigned int head_state2;
+ unsigned int head_state3;
+ unsigned int head_state4;
+ unsigned int head_state5;
+ unsigned int pll0;
+ unsigned int pll1;
+ unsigned int pll2;
+ unsigned int pll3;
+ unsigned int dp_padctl0;
+ unsigned int dp_padctl2;
+};
+
+struct tegra_sor_soc {
+ bool supports_lvds;
+ bool supports_hdmi;
+ bool supports_dp;
+ bool supports_audio;
+ bool supports_hdcp;
+
+ const struct tegra_sor_regs *regs;
+ bool has_nvdisplay;
+
+ const struct tegra_sor_hdmi_settings *settings;
+ unsigned int num_settings;
+
+ const u8 *xbar_cfg;
+ const u8 *lane_map;
+
+ const u8 (*voltage_swing)[4][4];
+ const u8 (*pre_emphasis)[4][4];
+ const u8 (*post_cursor)[4][4];
+ const u8 (*tx_pu)[4][4];
+};
+
+struct tegra_sor;
+
+struct tegra_sor_ops {
+ const char *name;
+ int (*probe)(struct tegra_sor *sor);
+ void (*audio_enable)(struct tegra_sor *sor);
+ void (*audio_disable)(struct tegra_sor *sor);
+};
+
+struct tegra_sor {
+ struct host1x_client client;
+ struct tegra_output output;
+ struct device *dev;
+
+ const struct tegra_sor_soc *soc;
+ void __iomem *regs;
+ unsigned int index;
+ unsigned int irq;
+
+ struct reset_control *rst;
+ struct clk *clk_parent;
+ struct clk *clk_safe;
+ struct clk *clk_out;
+ struct clk *clk_pad;
+ struct clk *clk_dp;
+ struct clk *clk;
+
+ u8 xbar_cfg[5];
+
+ struct drm_dp_link link;
+ struct drm_dp_aux *aux;
+
+ struct drm_info_list *debugfs_files;
+
+ const struct tegra_sor_ops *ops;
+ enum tegra_io_pad pad;
+
+ /* for HDMI 2.0 */
+ struct tegra_sor_hdmi_settings *settings;
+ unsigned int num_settings;
+
+ struct regulator *avdd_io_supply;
+ struct regulator *vdd_pll_supply;
+ struct regulator *hdmi_supply;
+
+ struct delayed_work scdc;
+ bool scdc_enabled;
+
+ struct tegra_hda_format format;
+};
+
+struct tegra_sor_state {
+ struct drm_connector_state base;
+
+ unsigned int link_speed;
+ unsigned long pclk;
+ unsigned int bpc;
+};
+
+static inline struct tegra_sor_state *
+to_sor_state(struct drm_connector_state *state)
+{
+ return container_of(state, struct tegra_sor_state, base);
+}
+
+struct tegra_sor_config {
+ u32 bits_per_pixel;
+
+ u32 active_polarity;
+ u32 active_count;
+ u32 tu_size;
+ u32 active_frac;
+ u32 watermark;
+
+ u32 hblank_symbols;
+ u32 vblank_symbols;
+};
+
+static inline struct tegra_sor *
+host1x_client_to_sor(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_sor, client);
+}
+
+static inline struct tegra_sor *to_sor(struct tegra_output *output)
+{
+ return container_of(output, struct tegra_sor, output);
+}
+
+static inline u32 tegra_sor_readl(struct tegra_sor *sor, unsigned int offset)
+{
+ u32 value = readl(sor->regs + (offset << 2));
+
+ trace_sor_readl(sor->dev, offset, value);
+
+ return value;
+}
+
+static inline void tegra_sor_writel(struct tegra_sor *sor, u32 value,
+ unsigned int offset)
+{
+ trace_sor_writel(sor->dev, offset, value);
+ writel(value, sor->regs + (offset << 2));
+}
+
+static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
+{
+ int err;
+
+ clk_disable_unprepare(sor->clk);
+
+ err = clk_set_parent(sor->clk_out, parent);
+ if (err < 0)
+ return err;
+
+ err = clk_prepare_enable(sor->clk);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+struct tegra_clk_sor_pad {
+ struct clk_hw hw;
+ struct tegra_sor *sor;
+};
+
+static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw)
+{
+ return container_of(hw, struct tegra_clk_sor_pad, hw);
+}
+
+static const char * const tegra_clk_sor_pad_parents[2][2] = {
+ { "pll_d_out0", "pll_dp" },
+ { "pll_d2_out0", "pll_dp" },
+};
+
+/*
+ * Implementing ->set_parent() here isn't really required because the parent
+ * will be explicitly selected in the driver code via the DP_CLK_SEL mux in
+ * the SOR_CLK_CNTRL register. This is primarily for compatibility with the
+ * Tegra186 and later SoC generations where the BPMP implements this clock
+ * and doesn't expose the mux via the common clock framework.
+ */
+
+static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tegra_clk_sor_pad *pad = to_pad(hw);
+ struct tegra_sor *sor = pad->sor;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+ switch (index) {
+ case 0:
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
+ break;
+
+ case 1:
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
+ break;
+ }
+
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ return 0;
+}
+
+static u8 tegra_clk_sor_pad_get_parent(struct clk_hw *hw)
+{
+ struct tegra_clk_sor_pad *pad = to_pad(hw);
+ struct tegra_sor *sor = pad->sor;
+ u8 parent = U8_MAX;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+
+ switch (value & SOR_CLK_CNTRL_DP_CLK_SEL_MASK) {
+ case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK:
+ case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_PCLK:
+ parent = 0;
+ break;
+
+ case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK:
+ case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK:
+ parent = 1;
+ break;
+ }
+
+ return parent;
+}
+
+static const struct clk_ops tegra_clk_sor_pad_ops = {
+ .set_parent = tegra_clk_sor_pad_set_parent,
+ .get_parent = tegra_clk_sor_pad_get_parent,
+};
+
+static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
+ const char *name)
+{
+ struct tegra_clk_sor_pad *pad;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ pad = devm_kzalloc(sor->dev, sizeof(*pad), GFP_KERNEL);
+ if (!pad)
+ return ERR_PTR(-ENOMEM);
+
+ pad->sor = sor;
+
+ init.name = name;
+ init.flags = 0;
+ init.parent_names = tegra_clk_sor_pad_parents[sor->index];
+ init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents[sor->index]);
+ init.ops = &tegra_clk_sor_pad_ops;
+
+ pad->hw.init = &init;
+
+ clk = devm_clk_register(sor->dev, &pad->hw);
+
+ return clk;
+}
+
+static void tegra_sor_filter_rates(struct tegra_sor *sor)
+{
+ struct drm_dp_link *link = &sor->link;
+ unsigned int i;
+
+ /* Tegra only supports RBR, HBR and HBR2 */
+ for (i = 0; i < link->num_rates; i++) {
+ switch (link->rates[i]) {
+ case 1620000:
+ case 2700000:
+ case 5400000:
+ break;
+
+ default:
+ DRM_DEBUG_KMS("link rate %lu kHz not supported\n",
+ link->rates[i]);
+ link->rates[i] = 0;
+ break;
+ }
+ }
+
+ drm_dp_link_update_rates(link);
+}
+
+static int tegra_sor_power_up_lanes(struct tegra_sor *sor, unsigned int lanes)
+{
+ unsigned long timeout;
+ u32 value;
+
+ /*
+ * Clear or set the PD_TXD bit corresponding to each lane, depending
+ * on whether it is used or not.
+ */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+
+ if (lanes <= 2)
+ value &= ~(SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]));
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]);
+
+ if (lanes <= 1)
+ value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
+
+ if (lanes == 0)
+ value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
+
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ /* start lane sequencer */
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
+ SOR_LANE_SEQ_CTL_POWER_STATE_UP;
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(250, 1000);
+ }
+
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int tegra_sor_power_down_lanes(struct tegra_sor *sor)
+{
+ unsigned long timeout;
+ u32 value;
+
+ /* power down all lanes */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
+ SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ /* start lane sequencer */
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
+ SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(25, 100);
+ }
+
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void tegra_sor_dp_precharge(struct tegra_sor *sor, unsigned int lanes)
+{
+ u32 value;
+
+ /* pre-charge all used lanes */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+
+ if (lanes <= 2)
+ value &= ~(SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]));
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]);
+
+ if (lanes <= 1)
+ value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
+
+ if (lanes == 0)
+ value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
+
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ usleep_range(15, 100);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
+ SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+}
+
+static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor)
+{
+ u32 mask = 0x08, adj = 0, value;
+
+ /* enable pad calibration logic */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value |= SOR_PLL1_TMDS_TERM;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
+
+ while (mask) {
+ adj |= mask;
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
+
+ usleep_range(100, 200);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ if (value & SOR_PLL1_TERM_COMPOUT)
+ adj &= ~mask;
+
+ mask >>= 1;
+ }
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
+
+ /* disable pad calibration logic */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value |= SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+}
+
+static int tegra_sor_dp_link_apply_training(struct drm_dp_link *link)
+{
+ struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
+ u32 voltage_swing = 0, pre_emphasis = 0, post_cursor = 0;
+ const struct tegra_sor_soc *soc = sor->soc;
+ u32 pattern = 0, tx_pu = 0, value;
+ unsigned int i;
+
+ for (value = 0, i = 0; i < link->lanes; i++) {
+ u8 vs = link->train.request.voltage_swing[i];
+ u8 pe = link->train.request.pre_emphasis[i];
+ u8 pc = link->train.request.post_cursor[i];
+ u8 shift = sor->soc->lane_map[i] << 3;
+
+ voltage_swing |= soc->voltage_swing[pc][vs][pe] << shift;
+ pre_emphasis |= soc->pre_emphasis[pc][vs][pe] << shift;
+ post_cursor |= soc->post_cursor[pc][vs][pe] << shift;
+
+ if (sor->soc->tx_pu[pc][vs][pe] > tx_pu)
+ tx_pu = sor->soc->tx_pu[pc][vs][pe];
+
+ switch (link->train.pattern) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ value = SOR_DP_TPG_SCRAMBLER_GALIOS |
+ SOR_DP_TPG_PATTERN_NONE;
+ break;
+
+ case DP_TRAINING_PATTERN_1:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN1;
+ break;
+
+ case DP_TRAINING_PATTERN_2:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN2;
+ break;
+
+ case DP_TRAINING_PATTERN_3:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN3;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (link->caps.channel_coding)
+ value |= SOR_DP_TPG_CHANNEL_CODING;
+
+ pattern = pattern << 8 | value;
+ }
+
+ tegra_sor_writel(sor, voltage_swing, SOR_LANE_DRIVE_CURRENT0);
+ tegra_sor_writel(sor, pre_emphasis, SOR_LANE_PREEMPHASIS0);
+
+ if (link->caps.tps3_supported)
+ tegra_sor_writel(sor, post_cursor, SOR_LANE_POSTCURSOR0);
+
+ tegra_sor_writel(sor, pattern, SOR_DP_TPG);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~SOR_DP_PADCTL_TX_PU_MASK;
+ value |= SOR_DP_PADCTL_TX_PU_ENABLE;
+ value |= SOR_DP_PADCTL_TX_PU(tx_pu);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ usleep_range(20, 100);
+
+ return 0;
+}
+
+static int tegra_sor_dp_link_configure(struct drm_dp_link *link)
+{
+ struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
+ unsigned int rate, lanes;
+ u32 value;
+ int err;
+
+ rate = drm_dp_link_rate_to_bw_code(link->rate);
+ lanes = link->lanes;
+
+ /* configure link speed and lane count */
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
+ value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
+
+ if (link->caps.enhanced_framing)
+ value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
+
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ usleep_range(400, 1000);
+
+ /* configure load pulse position adjustment */
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_LOADADJ_MASK;
+
+ switch (rate) {
+ case DP_LINK_BW_1_62:
+ value |= SOR_PLL1_LOADADJ(0x3);
+ break;
+
+ case DP_LINK_BW_2_7:
+ value |= SOR_PLL1_LOADADJ(0x4);
+ break;
+
+ case DP_LINK_BW_5_4:
+ value |= SOR_PLL1_LOADADJ(0x6);
+ break;
+ }
+
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
+
+ /* use alternate scrambler reset for eDP */
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
+
+ if (link->edp == 0)
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+ else
+ value |= SOR_DP_SPARE_PANEL_INTERNAL;
+
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ err = tegra_sor_power_down_lanes(sor);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power down lanes: %d\n", err);
+ return err;
+ }
+
+ /* power up and pre-charge lanes */
+ err = tegra_sor_power_up_lanes(sor, lanes);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power up %u lane%s: %d\n",
+ lanes, (lanes != 1) ? "s" : "", err);
+ return err;
+ }
+
+ tegra_sor_dp_precharge(sor, lanes);
+
+ return 0;
+}
+
+static const struct drm_dp_link_ops tegra_sor_dp_link_ops = {
+ .apply_training = tegra_sor_dp_link_apply_training,
+ .configure = tegra_sor_dp_link_configure,
+};
+
+static void tegra_sor_super_update(struct tegra_sor *sor)
+{
+ tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
+ tegra_sor_writel(sor, 1, SOR_SUPER_STATE0);
+ tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
+}
+
+static void tegra_sor_update(struct tegra_sor *sor)
+{
+ tegra_sor_writel(sor, 0, SOR_STATE0);
+ tegra_sor_writel(sor, 1, SOR_STATE0);
+ tegra_sor_writel(sor, 0, SOR_STATE0);
+}
+
+static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_PWM_DIV);
+ value &= ~SOR_PWM_DIV_MASK;
+ value |= 0x400; /* period */
+ tegra_sor_writel(sor, value, SOR_PWM_DIV);
+
+ value = tegra_sor_readl(sor, SOR_PWM_CTL);
+ value &= ~SOR_PWM_CTL_DUTY_CYCLE_MASK;
+ value |= 0x400; /* duty cycle */
+ value &= ~SOR_PWM_CTL_CLK_SEL; /* clock source: PCLK */
+ value |= SOR_PWM_CTL_TRIGGER;
+ tegra_sor_writel(sor, value, SOR_PWM_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_PWM_CTL);
+ if ((value & SOR_PWM_CTL_TRIGGER) == 0)
+ return 0;
+
+ usleep_range(25, 100);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int tegra_sor_attach(struct tegra_sor *sor)
+{
+ unsigned long value, timeout;
+
+ /* wake up in normal mode */
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
+ value |= SOR_SUPER_STATE_HEAD_MODE_AWAKE;
+ value |= SOR_SUPER_STATE_MODE_NORMAL;
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
+ tegra_sor_super_update(sor);
+
+ /* attach */
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
+ value |= SOR_SUPER_STATE_ATTACHED;
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
+ tegra_sor_super_update(sor);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_TEST);
+ if ((value & SOR_TEST_ATTACHED) != 0)
+ return 0;
+
+ usleep_range(25, 100);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int tegra_sor_wakeup(struct tegra_sor *sor)
+{
+ unsigned long value, timeout;
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ /* wait for head to wake up */
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_TEST);
+ value &= SOR_TEST_HEAD_MODE_MASK;
+
+ if (value == SOR_TEST_HEAD_MODE_AWAKE)
+ return 0;
+
+ usleep_range(25, 100);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int tegra_sor_power_up(struct tegra_sor *sor, unsigned long timeout)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_PWR);
+ value |= SOR_PWR_TRIGGER | SOR_PWR_NORMAL_STATE_PU;
+ tegra_sor_writel(sor, value, SOR_PWR);
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_PWR);
+ if ((value & SOR_PWR_TRIGGER) == 0)
+ return 0;
+
+ usleep_range(25, 100);
+ }
+
+ return -ETIMEDOUT;
+}
+
+struct tegra_sor_params {
+ /* number of link clocks per line */
+ unsigned int num_clocks;
+ /* ratio between input and output */
+ u64 ratio;
+ /* precision factor */
+ u64 precision;
+
+ unsigned int active_polarity;
+ unsigned int active_count;
+ unsigned int active_frac;
+ unsigned int tu_size;
+ unsigned int error;
+};
+
+static int tegra_sor_compute_params(struct tegra_sor *sor,
+ struct tegra_sor_params *params,
+ unsigned int tu_size)
+{
+ u64 active_sym, active_count, frac, approx;
+ u32 active_polarity, active_frac = 0;
+ const u64 f = params->precision;
+ s64 error;
+
+ active_sym = params->ratio * tu_size;
+ active_count = div_u64(active_sym, f) * f;
+ frac = active_sym - active_count;
+
+ /* fraction < 0.5 */
+ if (frac >= (f / 2)) {
+ active_polarity = 1;
+ frac = f - frac;
+ } else {
+ active_polarity = 0;
+ }
+
+ if (frac != 0) {
+ frac = div_u64(f * f, frac); /* 1/fraction */
+ if (frac <= (15 * f)) {
+ active_frac = div_u64(frac, f);
+
+ /* round up */
+ if (active_polarity)
+ active_frac++;
+ } else {
+ active_frac = active_polarity ? 1 : 15;
+ }
+ }
+
+ if (active_frac == 1)
+ active_polarity = 0;
+
+ if (active_polarity == 1) {
+ if (active_frac) {
+ approx = active_count + (active_frac * (f - 1)) * f;
+ approx = div_u64(approx, active_frac * f);
+ } else {
+ approx = active_count + f;
+ }
+ } else {
+ if (active_frac)
+ approx = active_count + div_u64(f, active_frac);
+ else
+ approx = active_count;
+ }
+
+ error = div_s64(active_sym - approx, tu_size);
+ error *= params->num_clocks;
+
+ if (error <= 0 && abs(error) < params->error) {
+ params->active_count = div_u64(active_count, f);
+ params->active_polarity = active_polarity;
+ params->active_frac = active_frac;
+ params->error = abs(error);
+ params->tu_size = tu_size;
+
+ if (error == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static int tegra_sor_compute_config(struct tegra_sor *sor,
+ const struct drm_display_mode *mode,
+ struct tegra_sor_config *config,
+ struct drm_dp_link *link)
+{
+ const u64 f = 100000, link_rate = link->rate * 1000;
+ const u64 pclk = (u64)mode->clock * 1000;
+ u64 input, output, watermark, num;
+ struct tegra_sor_params params;
+ u32 num_syms_per_line;
+ unsigned int i;
+
+ if (!link_rate || !link->lanes || !pclk || !config->bits_per_pixel)
+ return -EINVAL;
+
+ input = pclk * config->bits_per_pixel;
+ output = link_rate * 8 * link->lanes;
+
+ if (input >= output)
+ return -ERANGE;
+
+ memset(&params, 0, sizeof(params));
+ params.ratio = div64_u64(input * f, output);
+ params.num_clocks = div_u64(link_rate * mode->hdisplay, pclk);
+ params.precision = f;
+ params.error = 64 * f;
+ params.tu_size = 64;
+
+ for (i = params.tu_size; i >= 32; i--)
+ if (tegra_sor_compute_params(sor, &params, i))
+ break;
+
+ if (params.active_frac == 0) {
+ config->active_polarity = 0;
+ config->active_count = params.active_count;
+
+ if (!params.active_polarity)
+ config->active_count--;
+
+ config->tu_size = params.tu_size;
+ config->active_frac = 1;
+ } else {
+ config->active_polarity = params.active_polarity;
+ config->active_count = params.active_count;
+ config->active_frac = params.active_frac;
+ config->tu_size = params.tu_size;
+ }
+
+ dev_dbg(sor->dev,
+ "polarity: %d active count: %d tu size: %d active frac: %d\n",
+ config->active_polarity, config->active_count,
+ config->tu_size, config->active_frac);
+
+ watermark = params.ratio * config->tu_size * (f - params.ratio);
+ watermark = div_u64(watermark, f);
+
+ watermark = div_u64(watermark + params.error, f);
+ config->watermark = watermark + (config->bits_per_pixel / 8) + 2;
+ num_syms_per_line = (mode->hdisplay * config->bits_per_pixel) *
+ (link->lanes * 8);
+
+ if (config->watermark > 30) {
+ config->watermark = 30;
+ dev_err(sor->dev,
+ "unable to compute TU size, forcing watermark to %u\n",
+ config->watermark);
+ } else if (config->watermark > num_syms_per_line) {
+ config->watermark = num_syms_per_line;
+ dev_err(sor->dev, "watermark too high, forcing to %u\n",
+ config->watermark);
+ }
+
+ /* compute the number of symbols per horizontal blanking interval */
+ num = ((mode->htotal - mode->hdisplay) - 7) * link_rate;
+ config->hblank_symbols = div_u64(num, pclk);
+
+ if (link->caps.enhanced_framing)
+ config->hblank_symbols -= 3;
+
+ config->hblank_symbols -= 12 / link->lanes;
+
+ /* compute the number of symbols per vertical blanking interval */
+ num = (mode->hdisplay - 25) * link_rate;
+ config->vblank_symbols = div_u64(num, pclk);
+ config->vblank_symbols -= 36 / link->lanes + 4;
+
+ dev_dbg(sor->dev, "blank symbols: H:%u V:%u\n", config->hblank_symbols,
+ config->vblank_symbols);
+
+ return 0;
+}
+
+static void tegra_sor_apply_config(struct tegra_sor *sor,
+ const struct tegra_sor_config *config)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
+ value |= SOR_DP_LINKCTL_TU_SIZE(config->tu_size);
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
+ value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
+ value |= SOR_DP_CONFIG_WATERMARK(config->watermark);
+
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config->active_count);
+
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config->active_frac);
+
+ if (config->active_polarity)
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
+ else
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
+
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
+ value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
+ tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
+
+ value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
+ value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
+ value |= config->hblank_symbols & 0xffff;
+ tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
+
+ value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
+ value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
+ value |= config->vblank_symbols & 0xffff;
+ tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
+}
+
+static void tegra_sor_mode_set(struct tegra_sor *sor,
+ const struct drm_display_mode *mode,
+ struct tegra_sor_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(sor->output.encoder.crtc);
+ unsigned int vbe, vse, hbe, hse, vbs, hbs;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
+ value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
+ value &= ~SOR_STATE_ASY_OWNER_MASK;
+
+ value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
+ SOR_STATE_ASY_OWNER(dc->pipe + 1);
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value &= ~SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value &= ~SOR_STATE_ASY_VSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL;
+
+ switch (state->bpc) {
+ case 16:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_48_444;
+ break;
+
+ case 12:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_36_444;
+ break;
+
+ case 10:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_30_444;
+ break;
+
+ case 8:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+ break;
+
+ case 6:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
+ break;
+
+ default:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+ break;
+ }
+
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ /*
+ * TODO: The video timing programming below doesn't seem to match the
+ * register definitions.
+ */
+
+ value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
+ tegra_sor_writel(sor, value, sor->soc->regs->head_state1 + dc->pipe);
+
+ /* sync end = sync width - 1 */
+ vse = mode->vsync_end - mode->vsync_start - 1;
+ hse = mode->hsync_end - mode->hsync_start - 1;
+
+ value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
+ tegra_sor_writel(sor, value, sor->soc->regs->head_state2 + dc->pipe);
+
+ /* blank end = sync end + back porch */
+ vbe = vse + (mode->vtotal - mode->vsync_end);
+ hbe = hse + (mode->htotal - mode->hsync_end);
+
+ value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
+ tegra_sor_writel(sor, value, sor->soc->regs->head_state3 + dc->pipe);
+
+ /* blank start = blank end + active */
+ vbs = vbe + mode->vdisplay;
+ hbs = hbe + mode->hdisplay;
+
+ value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
+ tegra_sor_writel(sor, value, sor->soc->regs->head_state4 + dc->pipe);
+
+ /* XXX interlacing support */
+ tegra_sor_writel(sor, 0x001, sor->soc->regs->head_state5 + dc->pipe);
+}
+
+static int tegra_sor_detach(struct tegra_sor *sor)
+{
+ unsigned long value, timeout;
+
+ /* switch to safe mode */
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
+ value &= ~SOR_SUPER_STATE_MODE_NORMAL;
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
+ tegra_sor_super_update(sor);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_PWR);
+ if (value & SOR_PWR_MODE_SAFE)
+ break;
+ }
+
+ if ((value & SOR_PWR_MODE_SAFE) == 0)
+ return -ETIMEDOUT;
+
+ /* go to sleep */
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
+ value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK;
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
+ tegra_sor_super_update(sor);
+
+ /* detach */
+ value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
+ value &= ~SOR_SUPER_STATE_ATTACHED;
+ tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
+ tegra_sor_super_update(sor);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_TEST);
+ if ((value & SOR_TEST_ATTACHED) == 0)
+ break;
+
+ usleep_range(25, 100);
+ }
+
+ if ((value & SOR_TEST_ATTACHED) != 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int tegra_sor_power_down(struct tegra_sor *sor)
+{
+ unsigned long value, timeout;
+ int err;
+
+ value = tegra_sor_readl(sor, SOR_PWR);
+ value &= ~SOR_PWR_NORMAL_STATE_PU;
+ value |= SOR_PWR_TRIGGER;
+ tegra_sor_writel(sor, value, SOR_PWR);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_PWR);
+ if ((value & SOR_PWR_TRIGGER) == 0)
+ return 0;
+
+ usleep_range(25, 100);
+ }
+
+ if ((value & SOR_PWR_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+ return err;
+ }
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value |= SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(20, 100);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
+ value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value |= SOR_PLL2_SEQ_PLLCAPPD;
+ value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(20, 100);
+
+ return 0;
+}
+
+static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
+{
+ u32 value;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_CRCA);
+ if (value & SOR_CRCA_VALID)
+ return 0;
+
+ usleep_range(100, 200);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int tegra_sor_show_crc(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_sor *sor = node->info_ent->data;
+ struct drm_crtc *crtc = sor->output.encoder.crtc;
+ struct drm_device *drm = node->minor->dev;
+ int err = 0;
+ u32 value;
+
+ drm_modeset_lock_all(drm);
+
+ if (!crtc || !crtc->state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ value = tegra_sor_readl(sor, SOR_CRC_CNTRL);
+ value |= SOR_CRC_CNTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_CRC_CNTRL);
+
+ value = tegra_sor_readl(sor, SOR_TEST);
+ value &= ~SOR_TEST_CRC_POST_SERIALIZE;
+ tegra_sor_writel(sor, value, SOR_TEST);
+
+ err = tegra_sor_crc_wait(sor, 100);
+ if (err < 0)
+ goto unlock;
+
+ tegra_sor_writel(sor, SOR_CRCA_RESET, SOR_CRCA);
+ value = tegra_sor_readl(sor, SOR_CRCB);
+
+ seq_printf(s, "%08x\n", value);
+
+unlock:
+ drm_modeset_unlock_all(drm);
+ return err;
+}
+
+#define DEBUGFS_REG32(_name) { .name = #_name, .offset = _name }
+
+static const struct debugfs_reg32 tegra_sor_regs[] = {
+ DEBUGFS_REG32(SOR_CTXSW),
+ DEBUGFS_REG32(SOR_SUPER_STATE0),
+ DEBUGFS_REG32(SOR_SUPER_STATE1),
+ DEBUGFS_REG32(SOR_STATE0),
+ DEBUGFS_REG32(SOR_STATE1),
+ DEBUGFS_REG32(SOR_HEAD_STATE0(0)),
+ DEBUGFS_REG32(SOR_HEAD_STATE0(1)),
+ DEBUGFS_REG32(SOR_HEAD_STATE1(0)),
+ DEBUGFS_REG32(SOR_HEAD_STATE1(1)),
+ DEBUGFS_REG32(SOR_HEAD_STATE2(0)),
+ DEBUGFS_REG32(SOR_HEAD_STATE2(1)),
+ DEBUGFS_REG32(SOR_HEAD_STATE3(0)),
+ DEBUGFS_REG32(SOR_HEAD_STATE3(1)),
+ DEBUGFS_REG32(SOR_HEAD_STATE4(0)),
+ DEBUGFS_REG32(SOR_HEAD_STATE4(1)),
+ DEBUGFS_REG32(SOR_HEAD_STATE5(0)),
+ DEBUGFS_REG32(SOR_HEAD_STATE5(1)),
+ DEBUGFS_REG32(SOR_CRC_CNTRL),
+ DEBUGFS_REG32(SOR_DP_DEBUG_MVID),
+ DEBUGFS_REG32(SOR_CLK_CNTRL),
+ DEBUGFS_REG32(SOR_CAP),
+ DEBUGFS_REG32(SOR_PWR),
+ DEBUGFS_REG32(SOR_TEST),
+ DEBUGFS_REG32(SOR_PLL0),
+ DEBUGFS_REG32(SOR_PLL1),
+ DEBUGFS_REG32(SOR_PLL2),
+ DEBUGFS_REG32(SOR_PLL3),
+ DEBUGFS_REG32(SOR_CSTM),
+ DEBUGFS_REG32(SOR_LVDS),
+ DEBUGFS_REG32(SOR_CRCA),
+ DEBUGFS_REG32(SOR_CRCB),
+ DEBUGFS_REG32(SOR_BLANK),
+ DEBUGFS_REG32(SOR_SEQ_CTL),
+ DEBUGFS_REG32(SOR_LANE_SEQ_CTL),
+ DEBUGFS_REG32(SOR_SEQ_INST(0)),
+ DEBUGFS_REG32(SOR_SEQ_INST(1)),
+ DEBUGFS_REG32(SOR_SEQ_INST(2)),
+ DEBUGFS_REG32(SOR_SEQ_INST(3)),
+ DEBUGFS_REG32(SOR_SEQ_INST(4)),
+ DEBUGFS_REG32(SOR_SEQ_INST(5)),
+ DEBUGFS_REG32(SOR_SEQ_INST(6)),
+ DEBUGFS_REG32(SOR_SEQ_INST(7)),
+ DEBUGFS_REG32(SOR_SEQ_INST(8)),
+ DEBUGFS_REG32(SOR_SEQ_INST(9)),
+ DEBUGFS_REG32(SOR_SEQ_INST(10)),
+ DEBUGFS_REG32(SOR_SEQ_INST(11)),
+ DEBUGFS_REG32(SOR_SEQ_INST(12)),
+ DEBUGFS_REG32(SOR_SEQ_INST(13)),
+ DEBUGFS_REG32(SOR_SEQ_INST(14)),
+ DEBUGFS_REG32(SOR_SEQ_INST(15)),
+ DEBUGFS_REG32(SOR_PWM_DIV),
+ DEBUGFS_REG32(SOR_PWM_CTL),
+ DEBUGFS_REG32(SOR_VCRC_A0),
+ DEBUGFS_REG32(SOR_VCRC_A1),
+ DEBUGFS_REG32(SOR_VCRC_B0),
+ DEBUGFS_REG32(SOR_VCRC_B1),
+ DEBUGFS_REG32(SOR_CCRC_A0),
+ DEBUGFS_REG32(SOR_CCRC_A1),
+ DEBUGFS_REG32(SOR_CCRC_B0),
+ DEBUGFS_REG32(SOR_CCRC_B1),
+ DEBUGFS_REG32(SOR_EDATA_A0),
+ DEBUGFS_REG32(SOR_EDATA_A1),
+ DEBUGFS_REG32(SOR_EDATA_B0),
+ DEBUGFS_REG32(SOR_EDATA_B1),
+ DEBUGFS_REG32(SOR_COUNT_A0),
+ DEBUGFS_REG32(SOR_COUNT_A1),
+ DEBUGFS_REG32(SOR_COUNT_B0),
+ DEBUGFS_REG32(SOR_COUNT_B1),
+ DEBUGFS_REG32(SOR_DEBUG_A0),
+ DEBUGFS_REG32(SOR_DEBUG_A1),
+ DEBUGFS_REG32(SOR_DEBUG_B0),
+ DEBUGFS_REG32(SOR_DEBUG_B1),
+ DEBUGFS_REG32(SOR_TRIG),
+ DEBUGFS_REG32(SOR_MSCHECK),
+ DEBUGFS_REG32(SOR_XBAR_CTRL),
+ DEBUGFS_REG32(SOR_XBAR_POL),
+ DEBUGFS_REG32(SOR_DP_LINKCTL0),
+ DEBUGFS_REG32(SOR_DP_LINKCTL1),
+ DEBUGFS_REG32(SOR_LANE_DRIVE_CURRENT0),
+ DEBUGFS_REG32(SOR_LANE_DRIVE_CURRENT1),
+ DEBUGFS_REG32(SOR_LANE4_DRIVE_CURRENT0),
+ DEBUGFS_REG32(SOR_LANE4_DRIVE_CURRENT1),
+ DEBUGFS_REG32(SOR_LANE_PREEMPHASIS0),
+ DEBUGFS_REG32(SOR_LANE_PREEMPHASIS1),
+ DEBUGFS_REG32(SOR_LANE4_PREEMPHASIS0),
+ DEBUGFS_REG32(SOR_LANE4_PREEMPHASIS1),
+ DEBUGFS_REG32(SOR_LANE_POSTCURSOR0),
+ DEBUGFS_REG32(SOR_LANE_POSTCURSOR1),
+ DEBUGFS_REG32(SOR_DP_CONFIG0),
+ DEBUGFS_REG32(SOR_DP_CONFIG1),
+ DEBUGFS_REG32(SOR_DP_MN0),
+ DEBUGFS_REG32(SOR_DP_MN1),
+ DEBUGFS_REG32(SOR_DP_PADCTL0),
+ DEBUGFS_REG32(SOR_DP_PADCTL1),
+ DEBUGFS_REG32(SOR_DP_PADCTL2),
+ DEBUGFS_REG32(SOR_DP_DEBUG0),
+ DEBUGFS_REG32(SOR_DP_DEBUG1),
+ DEBUGFS_REG32(SOR_DP_SPARE0),
+ DEBUGFS_REG32(SOR_DP_SPARE1),
+ DEBUGFS_REG32(SOR_DP_AUDIO_CTRL),
+ DEBUGFS_REG32(SOR_DP_AUDIO_HBLANK_SYMBOLS),
+ DEBUGFS_REG32(SOR_DP_AUDIO_VBLANK_SYMBOLS),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_HEADER),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK0),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK1),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK2),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK3),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK4),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK5),
+ DEBUGFS_REG32(SOR_DP_GENERIC_INFOFRAME_SUBPACK6),
+ DEBUGFS_REG32(SOR_DP_TPG),
+ DEBUGFS_REG32(SOR_DP_TPG_CONFIG),
+ DEBUGFS_REG32(SOR_DP_LQ_CSTM0),
+ DEBUGFS_REG32(SOR_DP_LQ_CSTM1),
+ DEBUGFS_REG32(SOR_DP_LQ_CSTM2),
+};
+
+static int tegra_sor_show_regs(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct tegra_sor *sor = node->info_ent->data;
+ struct drm_crtc *crtc = sor->output.encoder.crtc;
+ struct drm_device *drm = node->minor->dev;
+ unsigned int i;
+ int err = 0;
+
+ drm_modeset_lock_all(drm);
+
+ if (!crtc || !crtc->state->active) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_sor_regs); i++) {
+ unsigned int offset = tegra_sor_regs[i].offset;
+
+ seq_printf(s, "%-38s %#05x %08x\n", tegra_sor_regs[i].name,
+ offset, tegra_sor_readl(sor, offset));
+ }
+
+unlock:
+ drm_modeset_unlock_all(drm);
+ return err;
+}
+
+static const struct drm_info_list debugfs_files[] = {
+ { "crc", tegra_sor_show_crc, 0, NULL },
+ { "regs", tegra_sor_show_regs, 0, NULL },
+};
+
+static int tegra_sor_late_register(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ unsigned int i, count = ARRAY_SIZE(debugfs_files);
+ struct drm_minor *minor = connector->dev->primary;
+ struct dentry *root = connector->debugfs_entry;
+ struct tegra_sor *sor = to_sor(output);
+
+ sor->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+ GFP_KERNEL);
+ if (!sor->debugfs_files)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++)
+ sor->debugfs_files[i].data = sor;
+
+ drm_debugfs_create_files(sor->debugfs_files, count, root, minor);
+
+ return 0;
+}
+
+static void tegra_sor_early_unregister(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ unsigned int count = ARRAY_SIZE(debugfs_files);
+ struct tegra_sor *sor = to_sor(output);
+
+ drm_debugfs_remove_files(sor->debugfs_files, count,
+ connector->dev->primary);
+ kfree(sor->debugfs_files);
+ sor->debugfs_files = NULL;
+}
+
+static void tegra_sor_connector_reset(struct drm_connector *connector)
+{
+ struct tegra_sor_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return;
+
+ if (connector->state) {
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+ kfree(connector->state);
+ }
+
+ __drm_atomic_helper_connector_reset(connector, &state->base);
+}
+
+static enum drm_connector_status
+tegra_sor_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ struct tegra_sor *sor = to_sor(output);
+
+ if (sor->aux)
+ return drm_dp_aux_detect(sor->aux);
+
+ return tegra_output_connector_detect(connector, force);
+}
+
+static struct drm_connector_state *
+tegra_sor_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct tegra_sor_state *state = to_sor_state(connector->state);
+ struct tegra_sor_state *copy;
+
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector, &copy->base);
+
+ return &copy->base;
+}
+
+static const struct drm_connector_funcs tegra_sor_connector_funcs = {
+ .reset = tegra_sor_connector_reset,
+ .detect = tegra_sor_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = tegra_output_connector_destroy,
+ .atomic_duplicate_state = tegra_sor_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = tegra_sor_late_register,
+ .early_unregister = tegra_sor_early_unregister,
+};
+
+static int tegra_sor_connector_get_modes(struct drm_connector *connector)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ struct tegra_sor *sor = to_sor(output);
+ int err;
+
+ if (sor->aux)
+ drm_dp_aux_enable(sor->aux);
+
+ err = tegra_output_connector_get_modes(connector);
+
+ if (sor->aux)
+ drm_dp_aux_disable(sor->aux);
+
+ return err;
+}
+
+static enum drm_mode_status
+tegra_sor_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = {
+ .get_modes = tegra_sor_connector_get_modes,
+ .mode_valid = tegra_sor_connector_mode_valid,
+};
+
+static int
+tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_sor_state *state = to_sor_state(conn_state);
+ struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
+ unsigned long pclk = crtc_state->mode.clock * 1000;
+ struct tegra_sor *sor = to_sor(output);
+ struct drm_display_info *info;
+ int err;
+
+ info = &output->connector.display_info;
+
+ /*
+ * For HBR2 modes, the SOR brick needs to use the x20 multiplier, so
+ * the pixel clock must be corrected accordingly.
+ */
+ if (pclk >= 340000000) {
+ state->link_speed = 20;
+ state->pclk = pclk / 2;
+ } else {
+ state->link_speed = 10;
+ state->pclk = pclk;
+ }
+
+ err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
+ pclk, 0);
+ if (err < 0) {
+ dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
+ return err;
+ }
+
+ switch (info->bpc) {
+ case 8:
+ case 6:
+ state->bpc = info->bpc;
+ break;
+
+ default:
+ DRM_DEBUG_KMS("%u bits-per-color not supported\n", info->bpc);
+ state->bpc = 8;
+ break;
+ }
+
+ return 0;
+}
+
+static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size)
+{
+ u32 value = 0;
+ size_t i;
+
+ for (i = size; i > 0; i--)
+ value = (value << 8) | ptr[i - 1];
+
+ return value;
+}
+
+static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
+ const void *data, size_t size)
+{
+ const u8 *ptr = data;
+ unsigned long offset;
+ size_t i, j;
+ u32 value;
+
+ switch (ptr[0]) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ offset = SOR_HDMI_AVI_INFOFRAME_HEADER;
+ break;
+
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ offset = SOR_HDMI_AUDIO_INFOFRAME_HEADER;
+ break;
+
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ offset = SOR_HDMI_VSI_INFOFRAME_HEADER;
+ break;
+
+ default:
+ dev_err(sor->dev, "unsupported infoframe type: %02x\n",
+ ptr[0]);
+ return;
+ }
+
+ value = INFOFRAME_HEADER_TYPE(ptr[0]) |
+ INFOFRAME_HEADER_VERSION(ptr[1]) |
+ INFOFRAME_HEADER_LEN(ptr[2]);
+ tegra_sor_writel(sor, value, offset);
+ offset++;
+
+ /*
+ * Each subpack contains 7 bytes, divided into:
+ * - subpack_low: bytes 0 - 3
+ * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
+ */
+ for (i = 3, j = 0; i < size; i += 7, j += 8) {
+ size_t rem = size - i, num = min_t(size_t, rem, 4);
+
+ value = tegra_sor_hdmi_subpack(&ptr[i], num);
+ tegra_sor_writel(sor, value, offset++);
+
+ num = min_t(size_t, rem - num, 3);
+
+ value = tegra_sor_hdmi_subpack(&ptr[i + 4], num);
+ tegra_sor_writel(sor, value, offset++);
+ }
+}
+
+static int
+tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
+ const struct drm_display_mode *mode)
+{
+ u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+ struct hdmi_avi_infoframe frame;
+ u32 value;
+ int err;
+
+ /* disable AVI infoframe */
+ value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
+ value &= ~INFOFRAME_CTRL_SINGLE;
+ value &= ~INFOFRAME_CTRL_OTHER;
+ value &= ~INFOFRAME_CTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
+
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+ &sor->output.connector, mode);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
+ return err;
+ }
+
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(sor->dev, "failed to pack AVI infoframe: %d\n", err);
+ return err;
+ }
+
+ tegra_sor_hdmi_write_infopack(sor, buffer, err);
+
+ /* enable AVI infoframe */
+ value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
+ value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
+ value |= INFOFRAME_CTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
+
+ return 0;
+}
+
+static void tegra_sor_write_eld(struct tegra_sor *sor)
+{
+ size_t length = drm_eld_size(sor->output.connector.eld), i;
+
+ for (i = 0; i < length; i++)
+ tegra_sor_writel(sor, i << 8 | sor->output.connector.eld[i],
+ SOR_AUDIO_HDA_ELD_BUFWR);
+
+ /*
+ * The HDA codec will always report an ELD buffer size of 96 bytes and
+ * the HDA codec driver will check that each byte read from the buffer
+ * is valid. Therefore every byte must be written, even if no 96 bytes
+ * were parsed from EDID.
+ */
+ for (i = length; i < 96; i++)
+ tegra_sor_writel(sor, i << 8 | 0, SOR_AUDIO_HDA_ELD_BUFWR);
+}
+
+static void tegra_sor_audio_prepare(struct tegra_sor *sor)
+{
+ u32 value;
+
+ /*
+ * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+ * is used for interoperability between the HDA codec driver and the
+ * HDMI/DP driver.
+ */
+ value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
+ tegra_sor_writel(sor, value, SOR_INT_ENABLE);
+ tegra_sor_writel(sor, value, SOR_INT_MASK);
+
+ tegra_sor_write_eld(sor);
+
+ value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD;
+ tegra_sor_writel(sor, value, SOR_AUDIO_HDA_PRESENSE);
+}
+
+static void tegra_sor_audio_unprepare(struct tegra_sor *sor)
+{
+ tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE);
+ tegra_sor_writel(sor, 0, SOR_INT_MASK);
+ tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
+}
+
+static void tegra_sor_audio_enable(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
+
+ /* select HDA audio input */
+ value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
+ value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
+
+ /* inject null samples */
+ if (sor->format.channels != 2)
+ value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+ else
+ value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+
+ value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
+
+ tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
+
+ /* enable advertising HBR capability */
+ tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
+}
+
+static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor)
+{
+ u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
+ struct hdmi_audio_infoframe frame;
+ u32 value;
+ int err;
+
+ err = hdmi_audio_infoframe_init(&frame);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to setup audio infoframe: %d\n", err);
+ return err;
+ }
+
+ frame.channels = sor->format.channels;
+
+ err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(sor->dev, "failed to pack audio infoframe: %d\n", err);
+ return err;
+ }
+
+ tegra_sor_hdmi_write_infopack(sor, buffer, err);
+
+ value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+ value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
+ value |= INFOFRAME_CTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+
+ return 0;
+}
+
+static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
+{
+ u32 value;
+
+ tegra_sor_audio_enable(sor);
+
+ tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL);
+
+ value = SOR_HDMI_SPARE_ACR_PRIORITY_HIGH |
+ SOR_HDMI_SPARE_CTS_RESET(1) |
+ SOR_HDMI_SPARE_HW_CTS_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_SPARE);
+
+ /* enable HW CTS */
+ value = SOR_HDMI_ACR_SUBPACK_LOW_SB1(0);
+ tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_LOW);
+
+ /* allow packet to be sent */
+ value = SOR_HDMI_ACR_SUBPACK_HIGH_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_HIGH);
+
+ /* reset N counter and enable lookup */
+ value = SOR_HDMI_AUDIO_N_RESET | SOR_HDMI_AUDIO_N_LOOKUP;
+ tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
+
+ value = (24000 * 4096) / (128 * sor->format.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0320);
+ tegra_sor_writel(sor, 4096, SOR_AUDIO_NVAL_0320);
+
+ tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0441);
+ tegra_sor_writel(sor, 4704, SOR_AUDIO_NVAL_0441);
+
+ tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0882);
+ tegra_sor_writel(sor, 9408, SOR_AUDIO_NVAL_0882);
+
+ tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_1764);
+ tegra_sor_writel(sor, 18816, SOR_AUDIO_NVAL_1764);
+
+ value = (24000 * 6144) / (128 * sor->format.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0480);
+ tegra_sor_writel(sor, 6144, SOR_AUDIO_NVAL_0480);
+
+ value = (24000 * 12288) / (128 * sor->format.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0960);
+ tegra_sor_writel(sor, 12288, SOR_AUDIO_NVAL_0960);
+
+ value = (24000 * 24576) / (128 * sor->format.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_1920);
+ tegra_sor_writel(sor, 24576, SOR_AUDIO_NVAL_1920);
+
+ value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_N);
+ value &= ~SOR_HDMI_AUDIO_N_RESET;
+ tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
+
+ tegra_sor_hdmi_enable_audio_infoframe(sor);
+}
+
+static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+ value &= ~INFOFRAME_CTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_sor_hdmi_audio_disable(struct tegra_sor *sor)
+{
+ tegra_sor_hdmi_disable_audio_infoframe(sor);
+}
+
+static struct tegra_sor_hdmi_settings *
+tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
+{
+ unsigned int i;
+
+ for (i = 0; i < sor->num_settings; i++)
+ if (frequency <= sor->settings[i].frequency)
+ return &sor->settings[i];
+
+ return NULL;
+}
+
+static void tegra_sor_hdmi_disable_scrambling(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_HDMI2_CTRL);
+ value &= ~SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4;
+ value &= ~SOR_HDMI2_CTRL_SCRAMBLE;
+ tegra_sor_writel(sor, value, SOR_HDMI2_CTRL);
+}
+
+static void tegra_sor_hdmi_scdc_disable(struct tegra_sor *sor)
+{
+ struct i2c_adapter *ddc = sor->output.ddc;
+
+ drm_scdc_set_high_tmds_clock_ratio(ddc, false);
+ drm_scdc_set_scrambling(ddc, false);
+
+ tegra_sor_hdmi_disable_scrambling(sor);
+}
+
+static void tegra_sor_hdmi_scdc_stop(struct tegra_sor *sor)
+{
+ if (sor->scdc_enabled) {
+ cancel_delayed_work_sync(&sor->scdc);
+ tegra_sor_hdmi_scdc_disable(sor);
+ }
+}
+
+static void tegra_sor_hdmi_enable_scrambling(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_HDMI2_CTRL);
+ value |= SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4;
+ value |= SOR_HDMI2_CTRL_SCRAMBLE;
+ tegra_sor_writel(sor, value, SOR_HDMI2_CTRL);
+}
+
+static void tegra_sor_hdmi_scdc_enable(struct tegra_sor *sor)
+{
+ struct i2c_adapter *ddc = sor->output.ddc;
+
+ drm_scdc_set_high_tmds_clock_ratio(ddc, true);
+ drm_scdc_set_scrambling(ddc, true);
+
+ tegra_sor_hdmi_enable_scrambling(sor);
+}
+
+static void tegra_sor_hdmi_scdc_work(struct work_struct *work)
+{
+ struct tegra_sor *sor = container_of(work, struct tegra_sor, scdc.work);
+ struct i2c_adapter *ddc = sor->output.ddc;
+
+ if (!drm_scdc_get_scrambling_status(ddc)) {
+ DRM_DEBUG_KMS("SCDC not scrambled\n");
+ tegra_sor_hdmi_scdc_enable(sor);
+ }
+
+ schedule_delayed_work(&sor->scdc, msecs_to_jiffies(5000));
+}
+
+static void tegra_sor_hdmi_scdc_start(struct tegra_sor *sor)
+{
+ struct drm_scdc *scdc = &sor->output.connector.display_info.hdmi.scdc;
+ struct drm_display_mode *mode;
+
+ mode = &sor->output.encoder.crtc->state->adjusted_mode;
+
+ if (mode->clock >= 340000 && scdc->supported) {
+ schedule_delayed_work(&sor->scdc, msecs_to_jiffies(5000));
+ tegra_sor_hdmi_scdc_enable(sor);
+ sor->scdc_enabled = true;
+ }
+}
+
+static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor *sor = to_sor(output);
+ u32 value;
+ int err;
+
+ tegra_sor_audio_unprepare(sor);
+ tegra_sor_hdmi_scdc_stop(sor);
+
+ err = tegra_sor_detach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+ tegra_sor_writel(sor, 0, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ /* disable display to SOR clock */
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+
+ if (!sor->soc->has_nvdisplay)
+ value &= ~SOR1_TIMING_CYA;
+
+ value &= ~SOR_ENABLE(sor->index);
+
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+
+ err = tegra_sor_power_down(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+ err = tegra_io_pad_power_disable(sor->pad);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
+
+ host1x_client_suspend(&sor->client);
+}
+
+static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor_hdmi_settings *settings;
+ struct tegra_sor *sor = to_sor(output);
+ struct tegra_sor_state *state;
+ struct drm_display_mode *mode;
+ unsigned long rate, pclk;
+ unsigned int div, i;
+ u32 value;
+ int err;
+
+ state = to_sor_state(output->connector.state);
+ mode = &encoder->crtc->state->adjusted_mode;
+ pclk = mode->clock * 1000;
+
+ err = host1x_client_resume(&sor->client);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to resume: %d\n", err);
+ return;
+ }
+
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+ return;
+ }
+
+ div = clk_get_rate(sor->clk) / 1000000 * 4;
+
+ err = tegra_io_pad_power_enable(sor->pad);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power on I/O pad: %d\n", err);
+
+ usleep_range(20, 100);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(20, 100);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll3);
+ value &= ~SOR_PLL3_PLL_VDD_MODE_3V3;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll3);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
+ value &= ~SOR_PLL0_VCOPD;
+ value &= ~SOR_PLL0_PWR;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(200, 400);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+ value &= ~SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(20, 100);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
+ SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2;
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ while (true) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_STATE_BUSY) == 0)
+ break;
+
+ usleep_range(250, 1000);
+ }
+
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
+ SOR_LANE_SEQ_CTL_POWER_STATE_UP | SOR_LANE_SEQ_CTL_DELAY(5);
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ while (true) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(250, 1000);
+ }
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+ value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+ if (mode->clock < 340000) {
+ DRM_DEBUG_KMS("setting 2.7 GHz link speed\n");
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70;
+ } else {
+ DRM_DEBUG_KMS("setting 5.4 GHz link speed\n");
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40;
+ }
+
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ /* SOR pad PLL stabilization time */
+ usleep_range(250, 1000);
+
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
+ value |= SOR_DP_LINKCTL_LANE_COUNT(4);
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
+ value &= ~SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+ value &= ~SOR_DP_SPARE_SEQ_ENABLE;
+ value &= ~SOR_DP_SPARE_MACRO_SOR_CLK;
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ value = SOR_SEQ_CTL_PU_PC(0) | SOR_SEQ_CTL_PU_PC_ALT(0) |
+ SOR_SEQ_CTL_PD_PC(8) | SOR_SEQ_CTL_PD_PC_ALT(8);
+ tegra_sor_writel(sor, value, SOR_SEQ_CTL);
+
+ value = SOR_SEQ_INST_DRIVE_PWM_OUT_LO | SOR_SEQ_INST_HALT |
+ SOR_SEQ_INST_WAIT_VSYNC | SOR_SEQ_INST_WAIT(1);
+ tegra_sor_writel(sor, value, SOR_SEQ_INST(0));
+ tegra_sor_writel(sor, value, SOR_SEQ_INST(8));
+
+ if (!sor->soc->has_nvdisplay) {
+ /* program the reference clock */
+ value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
+ tegra_sor_writel(sor, value, SOR_REFCLK);
+ }
+
+ /* XXX not in TRM */
+ for (value = 0, i = 0; i < 5; i++)
+ value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->xbar_cfg[i]) |
+ SOR_XBAR_CTRL_LINK1_XSEL(i, i);
+
+ tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+ tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+ /*
+ * Switch the pad clock to the DP clock. Note that we cannot actually
+ * do this because Tegra186 and later don't support clk_set_parent()
+ * on the sorX_pad_clkout clocks. We already do the equivalent above
+ * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
+ */
+#if 0
+ err = clk_set_parent(sor->clk_pad, sor->clk_dp);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select pad parent clock: %d\n",
+ err);
+ return;
+ }
+#endif
+
+ /* switch the SOR clock to the pad clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* switch the output clock to the parent pixel clock */
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select output parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* adjust clock rate for HDMI 2.0 modes */
+ rate = clk_get_rate(sor->clk_parent);
+
+ if (mode->clock >= 340000)
+ rate /= 2;
+
+ DRM_DEBUG_KMS("setting clock to %lu Hz, mode: %lu Hz\n", rate, pclk);
+
+ clk_set_rate(sor->clk, rate);
+
+ if (!sor->soc->has_nvdisplay) {
+ value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
+
+ /* XXX is this the proper check? */
+ if (mode->clock < 75000)
+ value |= SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED;
+
+ tegra_sor_writel(sor, value, SOR_INPUT_CONTROL);
+ }
+
+ max_ac = ((mode->htotal - mode->hdisplay) - SOR_REKEY - 18) / 32;
+
+ value = SOR_HDMI_CTRL_ENABLE | SOR_HDMI_CTRL_MAX_AC_PACKET(max_ac) |
+ SOR_HDMI_CTRL_AUDIO_LAYOUT | SOR_HDMI_CTRL_REKEY(SOR_REKEY);
+ tegra_sor_writel(sor, value, SOR_HDMI_CTRL);
+
+ if (!dc->soc->has_nvdisplay) {
+ /* H_PULSE2 setup */
+ pulse_start = h_ref_to_sync +
+ (mode->hsync_end - mode->hsync_start) +
+ (mode->htotal - mode->hsync_end) - 10;
+
+ value = PULSE_LAST_END_A | PULSE_QUAL_VACTIVE |
+ PULSE_POLARITY_HIGH | PULSE_MODE_NORMAL;
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
+
+ value = PULSE_END(pulse_start + 8) | PULSE_START(pulse_start);
+ tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_SIGNAL_OPTIONS0);
+ value |= H_PULSE2_ENABLE;
+ tegra_dc_writel(dc, value, DC_DISP_DISP_SIGNAL_OPTIONS0);
+ }
+
+ /* infoframe setup */
+ err = tegra_sor_hdmi_setup_avi_infoframe(sor, mode);
+ if (err < 0)
+ dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
+
+ /* XXX HDMI audio support not implemented yet */
+ tegra_sor_hdmi_disable_audio_infoframe(sor);
+
+ /* use single TMDS protocol */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value |= SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ /* power up pad calibration */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ /* production settings */
+ settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000);
+ if (!settings) {
+ dev_err(sor->dev, "no settings for pixel clock %d Hz\n",
+ mode->clock * 1000);
+ return;
+ }
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
+ value &= ~SOR_PLL0_ICHPMP_MASK;
+ value &= ~SOR_PLL0_FILTER_MASK;
+ value &= ~SOR_PLL0_VCOCAP_MASK;
+ value |= SOR_PLL0_ICHPMP(settings->ichpmp);
+ value |= SOR_PLL0_FILTER(settings->filter);
+ value |= SOR_PLL0_VCOCAP(settings->vcocap);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
+
+ /* XXX not in TRM */
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_LOADADJ_MASK;
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_LOADADJ(settings->loadadj);
+ value |= SOR_PLL1_TMDS_TERMADJ(settings->tmds_termadj);
+ value |= SOR_PLL1_TMDS_TERM;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll3);
+ value &= ~SOR_PLL3_BG_TEMP_COEF_MASK;
+ value &= ~SOR_PLL3_BG_VREF_LEVEL_MASK;
+ value &= ~SOR_PLL3_AVDD10_LEVEL_MASK;
+ value &= ~SOR_PLL3_AVDD14_LEVEL_MASK;
+ value |= SOR_PLL3_BG_TEMP_COEF(settings->bg_temp_coef);
+ value |= SOR_PLL3_BG_VREF_LEVEL(settings->bg_vref_level);
+ value |= SOR_PLL3_AVDD10_LEVEL(settings->avdd10_level);
+ value |= SOR_PLL3_AVDD14_LEVEL(settings->avdd14_level);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll3);
+
+ value = settings->drive_current[3] << 24 |
+ settings->drive_current[2] << 16 |
+ settings->drive_current[1] << 8 |
+ settings->drive_current[0] << 0;
+ tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
+
+ value = settings->preemphasis[3] << 24 |
+ settings->preemphasis[2] << 16 |
+ settings->preemphasis[1] << 8 |
+ settings->preemphasis[0] << 0;
+ tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~SOR_DP_PADCTL_TX_PU_MASK;
+ value |= SOR_DP_PADCTL_TX_PU_ENABLE;
+ value |= SOR_DP_PADCTL_TX_PU(settings->tx_pu_value);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl2);
+ value &= ~SOR_DP_PADCTL_SPAREPLL_MASK;
+ value |= SOR_DP_PADCTL_SPAREPLL(settings->sparepll);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl2);
+
+ /* power down pad calibration */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value |= SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ if (!dc->soc->has_nvdisplay) {
+ /* miscellaneous display controller settings */
+ value = VSYNC_H_POSITION(1);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_TIMING_OPTIONS);
+ }
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_COLOR_CONTROL);
+ value &= ~DITHER_CONTROL_MASK;
+ value &= ~BASE_COLOR_SIZE_MASK;
+
+ switch (state->bpc) {
+ case 6:
+ value |= BASE_COLOR_SIZE_666;
+ break;
+
+ case 8:
+ value |= BASE_COLOR_SIZE_888;
+ break;
+
+ case 10:
+ value |= BASE_COLOR_SIZE_101010;
+ break;
+
+ case 12:
+ value |= BASE_COLOR_SIZE_121212;
+ break;
+
+ default:
+ WARN(1, "%u bits-per-color not supported\n", state->bpc);
+ value |= BASE_COLOR_SIZE_888;
+ break;
+ }
+
+ tegra_dc_writel(dc, value, DC_DISP_DISP_COLOR_CONTROL);
+
+ /* XXX set display head owner */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_OWNER_MASK;
+ value |= SOR_STATE_ASY_OWNER(1 + dc->pipe);
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ err = tegra_sor_power_up(sor, 250);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up SOR: %d\n", err);
+
+ /* configure dynamic range of output */
+ value = tegra_sor_readl(sor, sor->soc->regs->head_state0 + dc->pipe);
+ value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
+ value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
+ tegra_sor_writel(sor, value, sor->soc->regs->head_state0 + dc->pipe);
+
+ /* configure colorspace */
+ value = tegra_sor_readl(sor, sor->soc->regs->head_state0 + dc->pipe);
+ value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
+ value |= SOR_HEAD_STATE_COLORSPACE_RGB;
+ tegra_sor_writel(sor, value, sor->soc->regs->head_state0 + dc->pipe);
+
+ tegra_sor_mode_set(sor, mode, state);
+
+ tegra_sor_update(sor);
+
+ /* program preamble timing in SOR (XXX) */
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
+ value &= ~SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ err = tegra_sor_attach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to attach SOR: %d\n", err);
+
+ /* enable display to SOR clock and generate HDMI preamble */
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+
+ if (!sor->soc->has_nvdisplay)
+ value |= SOR1_TIMING_CYA;
+
+ value |= SOR_ENABLE(sor->index);
+
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ if (dc->soc->has_nvdisplay) {
+ value = tegra_dc_readl(dc, DC_DISP_CORE_SOR_SET_CONTROL(sor->index));
+ value &= ~PROTOCOL_MASK;
+ value |= PROTOCOL_SINGLE_TMDS_A;
+ tegra_dc_writel(dc, value, DC_DISP_CORE_SOR_SET_CONTROL(sor->index));
+ }
+
+ tegra_dc_commit(dc);
+
+ err = tegra_sor_wakeup(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
+
+ tegra_sor_hdmi_scdc_start(sor);
+ tegra_sor_audio_prepare(sor);
+}
+
+static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
+ .disable = tegra_sor_hdmi_disable,
+ .enable = tegra_sor_hdmi_enable,
+ .atomic_check = tegra_sor_encoder_atomic_check,
+};
+
+static void tegra_sor_dp_disable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor *sor = to_sor(output);
+ u32 value;
+ int err;
+
+ if (output->panel)
+ drm_panel_disable(output->panel);
+
+ /*
+ * Do not attempt to power down a DP link if we're not connected since
+ * the AUX transactions would just be timing out.
+ */
+ if (output->connector.status != connector_status_disconnected) {
+ err = drm_dp_link_power_down(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down link: %d\n",
+ err);
+ }
+
+ err = tegra_sor_detach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+ tegra_sor_writel(sor, 0, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~SOR_ENABLE(sor->index);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+ tegra_dc_commit(dc);
+
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value &= ~SOR_STATE_ASY_SUBOWNER_MASK;
+ value &= ~SOR_STATE_ASY_OWNER_MASK;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe clock: %d\n", err);
+
+ err = tegra_sor_power_down(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+ err = tegra_io_pad_power_disable(sor->pad);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
+
+ err = drm_dp_aux_disable(sor->aux);
+ if (err < 0)
+ dev_err(sor->dev, "failed disable DPAUX: %d\n", err);
+
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
+
+ host1x_client_suspend(&sor->client);
+}
+
+static void tegra_sor_dp_enable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor *sor = to_sor(output);
+ struct tegra_sor_config config;
+ struct tegra_sor_state *state;
+ struct drm_display_mode *mode;
+ struct drm_display_info *info;
+ unsigned int i;
+ u32 value;
+ int err;
+
+ state = to_sor_state(output->connector.state);
+ mode = &encoder->crtc->state->adjusted_mode;
+ info = &output->connector.display_info;
+
+ err = host1x_client_resume(&sor->client);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to resume: %d\n", err);
+ return;
+ }
+
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+
+ err = tegra_io_pad_power_enable(sor->pad);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power on LVDS rail: %d\n", err);
+
+ usleep_range(20, 100);
+
+ err = drm_dp_aux_enable(sor->aux);
+ if (err < 0)
+ dev_err(sor->dev, "failed to enable DPAUX: %d\n", err);
+
+ err = drm_dp_link_probe(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to probe DP link: %d\n", err);
+
+ tegra_sor_filter_rates(sor);
+
+ err = drm_dp_link_choose(&sor->link, mode, info);
+ if (err < 0)
+ dev_err(sor->dev, "failed to choose link: %d\n", err);
+
+ if (output->panel)
+ drm_panel_prepare(output->panel);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(20, 40);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll3);
+ value |= SOR_PLL3_PLL_VDD_MODE_3V3;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll3);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
+ value &= ~(SOR_PLL0_VCOPD | SOR_PLL0_PWR);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ value |= SOR_PLL2_SEQ_PLLCAPPD;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(200, 400);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+ value &= ~SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+ if (output->panel)
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
+ else
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK;
+
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ usleep_range(200, 400);
+
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
+ /* XXX not in TRM */
+ if (output->panel)
+ value |= SOR_DP_SPARE_PANEL_INTERNAL;
+ else
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+
+ value |= SOR_DP_SPARE_SEQ_ENABLE;
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ /* XXX not in TRM */
+ tegra_sor_writel(sor, 0, SOR_LVDS);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
+ value &= ~SOR_PLL0_ICHPMP_MASK;
+ value &= ~SOR_PLL0_VCOCAP_MASK;
+ value |= SOR_PLL0_ICHPMP(0x1);
+ value |= SOR_PLL0_VCOCAP(0x3);
+ value |= SOR_PLL0_RESISTOR_EXT;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
+
+ /* XXX not in TRM */
+ for (value = 0, i = 0; i < 5; i++)
+ value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+ SOR_XBAR_CTRL_LINK1_XSEL(i, i);
+
+ tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+ tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+ /*
+ * Switch the pad clock to the DP clock. Note that we cannot actually
+ * do this because Tegra186 and later don't support clk_set_parent()
+ * on the sorX_pad_clkout clocks. We already do the equivalent above
+ * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
+ */
+#if 0
+ err = clk_set_parent(sor->clk_pad, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select pad parent clock: %d\n",
+ err);
+ return;
+ }
+#endif
+
+ /* switch the SOR clock to the pad clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* switch the output clock to the parent pixel clock */
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select output parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* use DP-A protocol */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value |= SOR_STATE_ASY_PROTOCOL_DP_A;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ /* enable port */
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value |= SOR_DP_LINKCTL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ tegra_sor_dp_term_calibrate(sor);
+
+ err = drm_dp_link_train(&sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "link training failed: %d\n", err);
+ else
+ dev_dbg(sor->dev, "link training succeeded\n");
+
+ err = drm_dp_link_power_up(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up DP link: %d\n", err);
+
+ /* compute configuration */
+ memset(&config, 0, sizeof(config));
+ config.bits_per_pixel = state->bpc * 3;
+
+ err = tegra_sor_compute_config(sor, mode, &config, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to compute configuration: %d\n", err);
+
+ tegra_sor_apply_config(sor, &config);
+ tegra_sor_mode_set(sor, mode, state);
+
+ if (output->panel) {
+ /* CSTM (LVDS, link A/B, upper) */
+ value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
+ SOR_CSTM_UPPER;
+ tegra_sor_writel(sor, value, SOR_CSTM);
+
+ /* PWM setup */
+ err = tegra_sor_setup_pwm(sor, 250);
+ if (err < 0)
+ dev_err(sor->dev, "failed to setup PWM: %d\n", err);
+ }
+
+ tegra_sor_update(sor);
+
+ err = tegra_sor_power_up(sor, 250);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up SOR: %d\n", err);
+
+ /* attach and wake up */
+ err = tegra_sor_attach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to attach SOR: %d\n", err);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= SOR_ENABLE(sor->index);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+
+ err = tegra_sor_wakeup(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
+
+ if (output->panel)
+ drm_panel_enable(output->panel);
+}
+
+static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = {
+ .disable = tegra_sor_dp_disable,
+ .enable = tegra_sor_dp_enable,
+ .atomic_check = tegra_sor_encoder_atomic_check,
+};
+
+static void tegra_sor_disable_regulator(void *data)
+{
+ struct regulator *reg = data;
+
+ regulator_disable(reg);
+}
+
+static int tegra_sor_enable_regulator(struct tegra_sor *sor, struct regulator *reg)
+{
+ int err;
+
+ err = regulator_enable(reg);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(sor->dev, tegra_sor_disable_regulator, reg);
+}
+
+static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
+{
+ int err;
+
+ sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io-hdmi-dp");
+ if (IS_ERR(sor->avdd_io_supply)) {
+ dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
+ PTR_ERR(sor->avdd_io_supply));
+ return PTR_ERR(sor->avdd_io_supply);
+ }
+
+ err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-hdmi-dp-pll");
+ if (IS_ERR(sor->vdd_pll_supply)) {
+ dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
+ PTR_ERR(sor->vdd_pll_supply));
+ return PTR_ERR(sor->vdd_pll_supply);
+ }
+
+ err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
+ if (IS_ERR(sor->hdmi_supply)) {
+ dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
+ PTR_ERR(sor->hdmi_supply));
+ return PTR_ERR(sor->hdmi_supply);
+ }
+
+ err = tegra_sor_enable_regulator(sor, sor->hdmi_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
+ return err;
+ }
+
+ INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work);
+
+ return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
+ .name = "HDMI",
+ .probe = tegra_sor_hdmi_probe,
+ .audio_enable = tegra_sor_hdmi_audio_enable,
+ .audio_disable = tegra_sor_hdmi_audio_disable,
+};
+
+static int tegra_sor_dp_probe(struct tegra_sor *sor)
+{
+ int err;
+
+ sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io-hdmi-dp");
+ if (IS_ERR(sor->avdd_io_supply))
+ return PTR_ERR(sor->avdd_io_supply);
+
+ err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply);
+ if (err < 0)
+ return err;
+
+ sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-hdmi-dp-pll");
+ if (IS_ERR(sor->vdd_pll_supply))
+ return PTR_ERR(sor->vdd_pll_supply);
+
+ err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_dp_ops = {
+ .name = "DP",
+ .probe = tegra_sor_dp_probe,
+};
+
+static int tegra_sor_init(struct host1x_client *client)
+{
+ struct drm_device *drm = dev_get_drvdata(client->host);
+ const struct drm_encoder_helper_funcs *helpers = NULL;
+ struct tegra_sor *sor = host1x_client_to_sor(client);
+ int connector = DRM_MODE_CONNECTOR_Unknown;
+ int encoder = DRM_MODE_ENCODER_NONE;
+ int err;
+
+ if (!sor->aux) {
+ if (sor->ops == &tegra_sor_hdmi_ops) {
+ connector = DRM_MODE_CONNECTOR_HDMIA;
+ encoder = DRM_MODE_ENCODER_TMDS;
+ helpers = &tegra_sor_hdmi_helpers;
+ } else if (sor->soc->supports_lvds) {
+ connector = DRM_MODE_CONNECTOR_LVDS;
+ encoder = DRM_MODE_ENCODER_LVDS;
+ }
+ } else {
+ if (sor->output.panel) {
+ connector = DRM_MODE_CONNECTOR_eDP;
+ encoder = DRM_MODE_ENCODER_TMDS;
+ helpers = &tegra_sor_dp_helpers;
+ } else {
+ connector = DRM_MODE_CONNECTOR_DisplayPort;
+ encoder = DRM_MODE_ENCODER_TMDS;
+ helpers = &tegra_sor_dp_helpers;
+ }
+
+ sor->link.ops = &tegra_sor_dp_link_ops;
+ sor->link.aux = sor->aux;
+ }
+
+ sor->output.dev = sor->dev;
+
+ drm_connector_init_with_ddc(drm, &sor->output.connector,
+ &tegra_sor_connector_funcs,
+ connector,
+ sor->output.ddc);
+ drm_connector_helper_add(&sor->output.connector,
+ &tegra_sor_connector_helper_funcs);
+ sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
+
+ drm_simple_encoder_init(drm, &sor->output.encoder, encoder);
+ drm_encoder_helper_add(&sor->output.encoder, helpers);
+
+ drm_connector_attach_encoder(&sor->output.connector,
+ &sor->output.encoder);
+ drm_connector_register(&sor->output.connector);
+
+ err = tegra_output_init(drm, &sor->output);
+ if (err < 0) {
+ dev_err(client->dev, "failed to initialize output: %d\n", err);
+ return err;
+ }
+
+ tegra_output_find_possible_crtcs(&sor->output, drm);
+
+ if (sor->aux) {
+ err = drm_dp_aux_attach(sor->aux, &sor->output);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to attach DP: %d\n", err);
+ return err;
+ }
+ }
+
+ /*
+ * XXX: Remove this reset once proper hand-over from firmware to
+ * kernel is possible.
+ */
+ if (sor->rst) {
+ err = pm_runtime_resume_and_get(sor->dev);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = reset_control_acquire(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
+ err);
+ goto rpm_put;
+ }
+
+ err = reset_control_assert(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to assert SOR reset: %d\n",
+ err);
+ goto rpm_put;
+ }
+ }
+
+ err = clk_prepare_enable(sor->clk);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable clock: %d\n", err);
+ goto rpm_put;
+ }
+
+ usleep_range(1000, 3000);
+
+ if (sor->rst) {
+ err = reset_control_deassert(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
+ err);
+ clk_disable_unprepare(sor->clk);
+ goto rpm_put;
+ }
+
+ reset_control_release(sor->rst);
+ pm_runtime_put(sor->dev);
+ }
+
+ err = clk_prepare_enable(sor->clk_safe);
+ if (err < 0) {
+ clk_disable_unprepare(sor->clk);
+ return err;
+ }
+
+ err = clk_prepare_enable(sor->clk_dp);
+ if (err < 0) {
+ clk_disable_unprepare(sor->clk_safe);
+ clk_disable_unprepare(sor->clk);
+ return err;
+ }
+
+ return 0;
+
+rpm_put:
+ if (sor->rst)
+ pm_runtime_put(sor->dev);
+
+ return err;
+}
+
+static int tegra_sor_exit(struct host1x_client *client)
+{
+ struct tegra_sor *sor = host1x_client_to_sor(client);
+ int err;
+
+ tegra_output_exit(&sor->output);
+
+ if (sor->aux) {
+ err = drm_dp_aux_detach(sor->aux);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to detach DP: %d\n", err);
+ return err;
+ }
+ }
+
+ clk_disable_unprepare(sor->clk_safe);
+ clk_disable_unprepare(sor->clk_dp);
+ clk_disable_unprepare(sor->clk);
+
+ return 0;
+}
+
+static int tegra_sor_runtime_suspend(struct host1x_client *client)
+{
+ struct tegra_sor *sor = host1x_client_to_sor(client);
+ struct device *dev = client->dev;
+ int err;
+
+ if (sor->rst) {
+ err = reset_control_assert(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ reset_control_release(sor->rst);
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(sor->clk);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int tegra_sor_runtime_resume(struct host1x_client *client)
+{
+ struct tegra_sor *sor = host1x_client_to_sor(client);
+ struct device *dev = client->dev;
+ int err;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(sor->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ goto put_rpm;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (sor->rst) {
+ err = reset_control_acquire(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to acquire reset: %d\n", err);
+ goto disable_clk;
+ }
+
+ err = reset_control_deassert(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ goto release_reset;
+ }
+ }
+
+ return 0;
+
+release_reset:
+ reset_control_release(sor->rst);
+disable_clk:
+ clk_disable_unprepare(sor->clk);
+put_rpm:
+ pm_runtime_put_sync(dev);
+ return err;
+}
+
+static const struct host1x_client_ops sor_client_ops = {
+ .init = tegra_sor_init,
+ .exit = tegra_sor_exit,
+ .suspend = tegra_sor_runtime_suspend,
+ .resume = tegra_sor_runtime_resume,
+};
+
+static const u8 tegra124_sor_xbar_cfg[5] = {
+ 0, 1, 2, 3, 4
+};
+
+static const struct tegra_sor_regs tegra124_sor_regs = {
+ .head_state0 = 0x05,
+ .head_state1 = 0x07,
+ .head_state2 = 0x09,
+ .head_state3 = 0x0b,
+ .head_state4 = 0x0d,
+ .head_state5 = 0x0f,
+ .pll0 = 0x17,
+ .pll1 = 0x18,
+ .pll2 = 0x19,
+ .pll3 = 0x1a,
+ .dp_padctl0 = 0x5c,
+ .dp_padctl2 = 0x73,
+};
+
+/* Tegra124 and Tegra132 have lanes 0 and 2 swapped. */
+static const u8 tegra124_sor_lane_map[4] = {
+ 2, 1, 0, 3,
+};
+
+static const u8 tegra124_sor_voltage_swing[4][4][4] = {
+ {
+ { 0x13, 0x19, 0x1e, 0x28 },
+ { 0x1e, 0x25, 0x2d, },
+ { 0x28, 0x32, },
+ { 0x3c, },
+ }, {
+ { 0x12, 0x17, 0x1b, 0x25 },
+ { 0x1c, 0x23, 0x2a, },
+ { 0x25, 0x2f, },
+ { 0x39, }
+ }, {
+ { 0x12, 0x16, 0x1a, 0x22 },
+ { 0x1b, 0x20, 0x27, },
+ { 0x24, 0x2d, },
+ { 0x36, },
+ }, {
+ { 0x11, 0x14, 0x17, 0x1f },
+ { 0x19, 0x1e, 0x24, },
+ { 0x22, 0x2a, },
+ { 0x32, },
+ },
+};
+
+static const u8 tegra124_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x09, 0x13, 0x25 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ },
+};
+
+static const u8 tegra124_sor_post_cursor[4][4][4] = {
+ {
+ { 0x00, 0x00, 0x00, 0x00 },
+ { 0x00, 0x00, 0x00, },
+ { 0x00, 0x00, },
+ { 0x00, },
+ }, {
+ { 0x02, 0x02, 0x04, 0x05 },
+ { 0x02, 0x04, 0x05, },
+ { 0x04, 0x05, },
+ { 0x05, },
+ }, {
+ { 0x04, 0x05, 0x08, 0x0b },
+ { 0x05, 0x09, 0x0b, },
+ { 0x08, 0x0a, },
+ { 0x0b, },
+ }, {
+ { 0x05, 0x09, 0x0b, 0x12 },
+ { 0x09, 0x0d, 0x12, },
+ { 0x0b, 0x0f, },
+ { 0x12, },
+ },
+};
+
+static const u8 tegra124_sor_tx_pu[4][4][4] = {
+ {
+ { 0x20, 0x30, 0x40, 0x60 },
+ { 0x30, 0x40, 0x60, },
+ { 0x40, 0x60, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x30, 0x50 },
+ { 0x30, 0x40, 0x50, },
+ { 0x40, 0x50, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x30, 0x40, },
+ { 0x30, 0x30, 0x40, },
+ { 0x40, 0x50, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x20, 0x40, },
+ { 0x30, 0x30, 0x40, },
+ { 0x40, 0x40, },
+ { 0x60, },
+ },
+};
+
+static const struct tegra_sor_soc tegra124_sor = {
+ .supports_lvds = true,
+ .supports_hdmi = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
+ .regs = &tegra124_sor_regs,
+ .has_nvdisplay = false,
+ .xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
+};
+
+static const u8 tegra132_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x01, 0x0e, 0x1d, },
+ { 0x01, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ },
+};
+
+static const struct tegra_sor_soc tegra132_sor = {
+ .supports_lvds = true,
+ .supports_hdmi = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
+ .regs = &tegra124_sor_regs,
+ .has_nvdisplay = false,
+ .xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra132_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
+};
+
+static const struct tegra_sor_regs tegra210_sor_regs = {
+ .head_state0 = 0x05,
+ .head_state1 = 0x07,
+ .head_state2 = 0x09,
+ .head_state3 = 0x0b,
+ .head_state4 = 0x0d,
+ .head_state5 = 0x0f,
+ .pll0 = 0x17,
+ .pll1 = 0x18,
+ .pll2 = 0x19,
+ .pll3 = 0x1a,
+ .dp_padctl0 = 0x5c,
+ .dp_padctl2 = 0x73,
+};
+
+static const u8 tegra210_sor_xbar_cfg[5] = {
+ 2, 1, 0, 3, 4
+};
+
+static const u8 tegra210_sor_lane_map[4] = {
+ 0, 1, 2, 3,
+};
+
+static const struct tegra_sor_soc tegra210_sor = {
+ .supports_lvds = false,
+ .supports_hdmi = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
+
+ .regs = &tegra210_sor_regs,
+ .has_nvdisplay = false,
+
+ .xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra210_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
+};
+
+static const struct tegra_sor_soc tegra210_sor1 = {
+ .supports_lvds = false,
+ .supports_hdmi = true,
+ .supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
+
+ .regs = &tegra210_sor_regs,
+ .has_nvdisplay = false,
+
+ .num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
+ .settings = tegra210_sor_hdmi_defaults,
+ .xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra210_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
+};
+
+static const struct tegra_sor_regs tegra186_sor_regs = {
+ .head_state0 = 0x151,
+ .head_state1 = 0x154,
+ .head_state2 = 0x157,
+ .head_state3 = 0x15a,
+ .head_state4 = 0x15d,
+ .head_state5 = 0x160,
+ .pll0 = 0x163,
+ .pll1 = 0x164,
+ .pll2 = 0x165,
+ .pll3 = 0x166,
+ .dp_padctl0 = 0x168,
+ .dp_padctl2 = 0x16a,
+};
+
+static const u8 tegra186_sor_voltage_swing[4][4][4] = {
+ {
+ { 0x13, 0x19, 0x1e, 0x28 },
+ { 0x1e, 0x25, 0x2d, },
+ { 0x28, 0x32, },
+ { 0x39, },
+ }, {
+ { 0x12, 0x16, 0x1b, 0x25 },
+ { 0x1c, 0x23, 0x2a, },
+ { 0x25, 0x2f, },
+ { 0x37, }
+ }, {
+ { 0x12, 0x16, 0x1a, 0x22 },
+ { 0x1b, 0x20, 0x27, },
+ { 0x24, 0x2d, },
+ { 0x35, },
+ }, {
+ { 0x11, 0x14, 0x17, 0x1f },
+ { 0x19, 0x1e, 0x24, },
+ { 0x22, 0x2a, },
+ { 0x32, },
+ },
+};
+
+static const u8 tegra186_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x01, 0x0e, 0x1d, },
+ { 0x01, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x08, 0x14, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ },
+};
+
+static const struct tegra_sor_soc tegra186_sor = {
+ .supports_lvds = false,
+ .supports_hdmi = true,
+ .supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
+
+ .regs = &tegra186_sor_regs,
+ .has_nvdisplay = true,
+
+ .num_settings = ARRAY_SIZE(tegra186_sor_hdmi_defaults),
+ .settings = tegra186_sor_hdmi_defaults,
+ .xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra186_sor_voltage_swing,
+ .pre_emphasis = tegra186_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
+};
+
+static const struct tegra_sor_regs tegra194_sor_regs = {
+ .head_state0 = 0x151,
+ .head_state1 = 0x155,
+ .head_state2 = 0x159,
+ .head_state3 = 0x15d,
+ .head_state4 = 0x161,
+ .head_state5 = 0x165,
+ .pll0 = 0x169,
+ .pll1 = 0x16a,
+ .pll2 = 0x16b,
+ .pll3 = 0x16c,
+ .dp_padctl0 = 0x16e,
+ .dp_padctl2 = 0x16f,
+};
+
+static const struct tegra_sor_soc tegra194_sor = {
+ .supports_lvds = false,
+ .supports_hdmi = true,
+ .supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
+
+ .regs = &tegra194_sor_regs,
+ .has_nvdisplay = true,
+
+ .num_settings = ARRAY_SIZE(tegra194_sor_hdmi_defaults),
+ .settings = tegra194_sor_hdmi_defaults,
+
+ .xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra186_sor_voltage_swing,
+ .pre_emphasis = tegra186_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
+};
+
+static const struct of_device_id tegra_sor_of_match[] = {
+ { .compatible = "nvidia,tegra194-sor", .data = &tegra194_sor },
+ { .compatible = "nvidia,tegra186-sor", .data = &tegra186_sor },
+ { .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
+ { .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor },
+ { .compatible = "nvidia,tegra132-sor", .data = &tegra132_sor },
+ { .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
+
+static int tegra_sor_parse_dt(struct tegra_sor *sor)
+{
+ struct device_node *np = sor->dev->of_node;
+ u32 xbar_cfg[5];
+ unsigned int i;
+ u32 value;
+ int err;
+
+ if (sor->soc->has_nvdisplay) {
+ err = of_property_read_u32(np, "nvidia,interface", &value);
+ if (err < 0)
+ return err;
+
+ sor->index = value;
+
+ /*
+ * override the default that we already set for Tegra210 and
+ * earlier
+ */
+ sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index;
+ } else {
+ if (!sor->soc->supports_audio)
+ sor->index = 0;
+ else
+ sor->index = 1;
+ }
+
+ err = of_property_read_u32_array(np, "nvidia,xbar-cfg", xbar_cfg, 5);
+ if (err < 0) {
+ /* fall back to default per-SoC XBAR configuration */
+ for (i = 0; i < 5; i++)
+ sor->xbar_cfg[i] = sor->soc->xbar_cfg[i];
+ } else {
+ /* copy cells to SOR XBAR configuration */
+ for (i = 0; i < 5; i++)
+ sor->xbar_cfg[i] = xbar_cfg[i];
+ }
+
+ return 0;
+}
+
+static irqreturn_t tegra_sor_irq(int irq, void *data)
+{
+ struct tegra_sor *sor = data;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_INT_STATUS);
+ tegra_sor_writel(sor, value, SOR_INT_STATUS);
+
+ if (value & SOR_INT_CODEC_SCRATCH0) {
+ value = tegra_sor_readl(sor, SOR_AUDIO_HDA_CODEC_SCRATCH0);
+
+ if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
+ unsigned int format;
+
+ format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
+
+ tegra_hda_parse_format(format, &sor->format);
+
+ if (sor->ops->audio_enable)
+ sor->ops->audio_enable(sor);
+ } else {
+ if (sor->ops->audio_disable)
+ sor->ops->audio_disable(sor);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_sor_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct tegra_sor *sor;
+ struct resource *regs;
+ int err;
+
+ sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL);
+ if (!sor)
+ return -ENOMEM;
+
+ sor->soc = of_device_get_match_data(&pdev->dev);
+ sor->output.dev = sor->dev = &pdev->dev;
+
+ sor->settings = devm_kmemdup(&pdev->dev, sor->soc->settings,
+ sor->soc->num_settings *
+ sizeof(*sor->settings),
+ GFP_KERNEL);
+ if (!sor->settings)
+ return -ENOMEM;
+
+ sor->num_settings = sor->soc->num_settings;
+
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0);
+ if (np) {
+ sor->aux = drm_dp_aux_find_by_of_node(np);
+ of_node_put(np);
+
+ if (!sor->aux)
+ return -EPROBE_DEFER;
+
+ if (get_device(sor->aux->dev))
+ sor->output.ddc = &sor->aux->ddc;
+ }
+
+ if (!sor->aux) {
+ if (sor->soc->supports_hdmi) {
+ sor->ops = &tegra_sor_hdmi_ops;
+ sor->pad = TEGRA_IO_PAD_HDMI;
+ } else if (sor->soc->supports_lvds) {
+ dev_err(&pdev->dev, "LVDS not supported yet\n");
+ return -ENODEV;
+ } else {
+ dev_err(&pdev->dev, "unknown (non-DP) support\n");
+ return -ENODEV;
+ }
+ } else {
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,panel", 0);
+ /*
+ * No need to keep this around since we only use it as a check
+ * to see if a panel is connected (eDP) or not (DP).
+ */
+ of_node_put(np);
+
+ sor->ops = &tegra_sor_dp_ops;
+ sor->pad = TEGRA_IO_PAD_LVDS;
+ }
+
+ err = tegra_sor_parse_dt(sor);
+ if (err < 0)
+ goto put_aux;
+
+ err = tegra_output_probe(&sor->output);
+ if (err < 0) {
+ dev_err_probe(&pdev->dev, err, "failed to probe output\n");
+ goto put_aux;
+ }
+
+ if (sor->ops && sor->ops->probe) {
+ err = sor->ops->probe(sor);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to probe %s: %d\n",
+ sor->ops->name, err);
+ goto remove;
+ }
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sor->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(sor->regs)) {
+ err = PTR_ERR(sor->regs);
+ goto remove;
+ }
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+ goto remove;
+ }
+
+ sor->irq = err;
+
+ err = devm_request_irq(sor->dev, sor->irq, tegra_sor_irq, 0,
+ dev_name(sor->dev), sor);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ goto remove;
+ }
+
+ sor->rst = devm_reset_control_get_exclusive_released(&pdev->dev, "sor");
+ if (IS_ERR(sor->rst)) {
+ err = PTR_ERR(sor->rst);
+
+ if (err != -EBUSY || WARN_ON(!pdev->dev.pm_domain)) {
+ dev_err(&pdev->dev, "failed to get reset control: %d\n",
+ err);
+ goto remove;
+ }
+
+ /*
+ * At this point, the reset control is most likely being used
+ * by the generic power domain implementation. With any luck
+ * the power domain will have taken care of resetting the SOR
+ * and we don't have to do anything.
+ */
+ sor->rst = NULL;
+ }
+
+ sor->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sor->clk)) {
+ err = PTR_ERR(sor->clk);
+ dev_err(&pdev->dev, "failed to get module clock: %d\n", err);
+ goto remove;
+ }
+
+ if (sor->soc->supports_hdmi || sor->soc->supports_dp) {
+ struct device_node *np = pdev->dev.of_node;
+ const char *name;
+
+ /*
+ * For backwards compatibility with Tegra210 device trees,
+ * fall back to the old clock name "source" if the new "out"
+ * clock is not available.
+ */
+ if (of_property_match_string(np, "clock-names", "out") < 0)
+ name = "source";
+ else
+ name = "out";
+
+ sor->clk_out = devm_clk_get(&pdev->dev, name);
+ if (IS_ERR(sor->clk_out)) {
+ err = PTR_ERR(sor->clk_out);
+ dev_err(sor->dev, "failed to get %s clock: %d\n",
+ name, err);
+ goto remove;
+ }
+ } else {
+ /* fall back to the module clock on SOR0 (eDP/LVDS only) */
+ sor->clk_out = sor->clk;
+ }
+
+ sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
+ if (IS_ERR(sor->clk_parent)) {
+ err = PTR_ERR(sor->clk_parent);
+ dev_err(&pdev->dev, "failed to get parent clock: %d\n", err);
+ goto remove;
+ }
+
+ sor->clk_safe = devm_clk_get(&pdev->dev, "safe");
+ if (IS_ERR(sor->clk_safe)) {
+ err = PTR_ERR(sor->clk_safe);
+ dev_err(&pdev->dev, "failed to get safe clock: %d\n", err);
+ goto remove;
+ }
+
+ sor->clk_dp = devm_clk_get(&pdev->dev, "dp");
+ if (IS_ERR(sor->clk_dp)) {
+ err = PTR_ERR(sor->clk_dp);
+ dev_err(&pdev->dev, "failed to get DP clock: %d\n", err);
+ goto remove;
+ }
+
+ /*
+ * Starting with Tegra186, the BPMP provides an implementation for
+ * the pad output clock, so we have to look it up from device tree.
+ */
+ sor->clk_pad = devm_clk_get(&pdev->dev, "pad");
+ if (IS_ERR(sor->clk_pad)) {
+ if (sor->clk_pad != ERR_PTR(-ENOENT)) {
+ err = PTR_ERR(sor->clk_pad);
+ goto remove;
+ }
+
+ /*
+ * If the pad output clock is not available, then we assume
+ * we're on Tegra210 or earlier and have to provide our own
+ * implementation.
+ */
+ sor->clk_pad = NULL;
+ }
+
+ /*
+ * The bootloader may have set up the SOR such that it's module clock
+ * is sourced by one of the display PLLs. However, that doesn't work
+ * without properly having set up other bits of the SOR.
+ */
+ err = clk_set_parent(sor->clk_out, sor->clk_safe);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to use safe clock: %d\n", err);
+ goto remove;
+ }
+
+ platform_set_drvdata(pdev, sor);
+ pm_runtime_enable(&pdev->dev);
+
+ host1x_client_init(&sor->client);
+ sor->client.ops = &sor_client_ops;
+ sor->client.dev = &pdev->dev;
+
+ /*
+ * On Tegra210 and earlier, provide our own implementation for the
+ * pad output clock.
+ */
+ if (!sor->clk_pad) {
+ char *name;
+
+ name = devm_kasprintf(sor->dev, GFP_KERNEL, "sor%u_pad_clkout",
+ sor->index);
+ if (!name) {
+ err = -ENOMEM;
+ goto uninit;
+ }
+
+ err = host1x_client_resume(&sor->client);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to resume: %d\n", err);
+ goto uninit;
+ }
+
+ sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
+ host1x_client_suspend(&sor->client);
+ }
+
+ if (IS_ERR(sor->clk_pad)) {
+ err = PTR_ERR(sor->clk_pad);
+ dev_err(sor->dev, "failed to register SOR pad clock: %d\n",
+ err);
+ goto uninit;
+ }
+
+ err = __host1x_client_register(&sor->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+ err);
+ goto uninit;
+ }
+
+ return 0;
+
+uninit:
+ host1x_client_exit(&sor->client);
+ pm_runtime_disable(&pdev->dev);
+remove:
+ if (sor->aux)
+ sor->output.ddc = NULL;
+
+ tegra_output_remove(&sor->output);
+put_aux:
+ if (sor->aux)
+ put_device(sor->aux->dev);
+
+ return err;
+}
+
+static int tegra_sor_remove(struct platform_device *pdev)
+{
+ struct tegra_sor *sor = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&sor->client);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ if (sor->aux) {
+ put_device(sor->aux->dev);
+ sor->output.ddc = NULL;
+ }
+
+ tegra_output_remove(&sor->output);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_sor_suspend(struct device *dev)
+{
+ struct tegra_sor *sor = dev_get_drvdata(dev);
+ int err;
+
+ err = tegra_output_suspend(&sor->output);
+ if (err < 0) {
+ dev_err(dev, "failed to suspend output: %d\n", err);
+ return err;
+ }
+
+ if (sor->hdmi_supply) {
+ err = regulator_disable(sor->hdmi_supply);
+ if (err < 0) {
+ tegra_output_resume(&sor->output);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_sor_resume(struct device *dev)
+{
+ struct tegra_sor *sor = dev_get_drvdata(dev);
+ int err;
+
+ if (sor->hdmi_supply) {
+ err = regulator_enable(sor->hdmi_supply);
+ if (err < 0)
+ return err;
+ }
+
+ err = tegra_output_resume(&sor->output);
+ if (err < 0) {
+ dev_err(dev, "failed to resume output: %d\n", err);
+
+ if (sor->hdmi_supply)
+ regulator_disable(sor->hdmi_supply);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_sor_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_sor_suspend, tegra_sor_resume)
+};
+
+struct platform_driver tegra_sor_driver = {
+ .driver = {
+ .name = "tegra-sor",
+ .of_match_table = tegra_sor_of_match,
+ .pm = &tegra_sor_pm_ops,
+ },
+ .probe = tegra_sor_probe,
+ .remove = tegra_sor_remove,
+};
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
new file mode 100644
index 000000000..00e09d5dc
--- /dev/null
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -0,0 +1,457 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ */
+
+#ifndef DRM_TEGRA_SOR_H
+#define DRM_TEGRA_SOR_H
+
+#define SOR_CTXSW 0x00
+
+#define SOR_SUPER_STATE0 0x01
+
+#define SOR_SUPER_STATE1 0x02
+#define SOR_SUPER_STATE_ATTACHED (1 << 3)
+#define SOR_SUPER_STATE_MODE_NORMAL (1 << 2)
+#define SOR_SUPER_STATE_HEAD_MODE_MASK (3 << 0)
+#define SOR_SUPER_STATE_HEAD_MODE_AWAKE (2 << 0)
+#define SOR_SUPER_STATE_HEAD_MODE_SNOOZE (1 << 0)
+#define SOR_SUPER_STATE_HEAD_MODE_SLEEP (0 << 0)
+
+#define SOR_STATE0 0x03
+
+#define SOR_STATE1 0x04
+#define SOR_STATE_ASY_PIXELDEPTH_MASK (0xf << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_18_444 (0x2 << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 (0x5 << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_30_444 (0x6 << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_36_444 (0x8 << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_48_444 (0x9 << 17)
+#define SOR_STATE_ASY_VSYNCPOL (1 << 13)
+#define SOR_STATE_ASY_HSYNCPOL (1 << 12)
+#define SOR_STATE_ASY_PROTOCOL_MASK (0xf << 8)
+#define SOR_STATE_ASY_PROTOCOL_CUSTOM (0xf << 8)
+#define SOR_STATE_ASY_PROTOCOL_DP_A (0x8 << 8)
+#define SOR_STATE_ASY_PROTOCOL_DP_B (0x9 << 8)
+#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (0x1 << 8)
+#define SOR_STATE_ASY_PROTOCOL_LVDS (0x0 << 8)
+#define SOR_STATE_ASY_CRC_MODE_MASK (0x3 << 6)
+#define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6)
+#define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6)
+#define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6)
+#define SOR_STATE_ASY_SUBOWNER_MASK (0x3 << 4)
+#define SOR_STATE_ASY_OWNER_MASK 0xf
+#define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0)
+
+#define SOR_HEAD_STATE0(x) (0x05 + (x))
+#define SOR_HEAD_STATE_RANGECOMPRESS_MASK (0x1 << 3)
+#define SOR_HEAD_STATE_DYNRANGE_MASK (0x1 << 2)
+#define SOR_HEAD_STATE_DYNRANGE_VESA (0 << 2)
+#define SOR_HEAD_STATE_DYNRANGE_CEA (1 << 2)
+#define SOR_HEAD_STATE_COLORSPACE_MASK (0x3 << 0)
+#define SOR_HEAD_STATE_COLORSPACE_RGB (0 << 0)
+#define SOR_HEAD_STATE1(x) (0x07 + (x))
+#define SOR_HEAD_STATE2(x) (0x09 + (x))
+#define SOR_HEAD_STATE3(x) (0x0b + (x))
+#define SOR_HEAD_STATE4(x) (0x0d + (x))
+#define SOR_HEAD_STATE5(x) (0x0f + (x))
+#define SOR_CRC_CNTRL 0x11
+#define SOR_CRC_CNTRL_ENABLE (1 << 0)
+#define SOR_DP_DEBUG_MVID 0x12
+
+#define SOR_CLK_CNTRL 0x13
+#define SOR_CLK_CNTRL_DP_LINK_SPEED_MASK (0x1f << 2)
+#define SOR_CLK_CNTRL_DP_LINK_SPEED(x) (((x) & 0x1f) << 2)
+#define SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62 (0x06 << 2)
+#define SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70 (0x0a << 2)
+#define SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40 (0x14 << 2)
+#define SOR_CLK_CNTRL_DP_CLK_SEL_MASK (3 << 0)
+#define SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK (0 << 0)
+#define SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_PCLK (1 << 0)
+#define SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK (2 << 0)
+#define SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK (3 << 0)
+
+#define SOR_CAP 0x14
+
+#define SOR_PWR 0x15
+#define SOR_PWR_TRIGGER (1 << 31)
+#define SOR_PWR_MODE_SAFE (1 << 28)
+#define SOR_PWR_NORMAL_STATE_PU (1 << 0)
+
+#define SOR_TEST 0x16
+#define SOR_TEST_CRC_POST_SERIALIZE (1 << 23)
+#define SOR_TEST_ATTACHED (1 << 10)
+#define SOR_TEST_HEAD_MODE_MASK (3 << 8)
+#define SOR_TEST_HEAD_MODE_AWAKE (2 << 8)
+
+#define SOR_PLL0 0x17
+#define SOR_PLL0_ICHPMP_MASK (0xf << 24)
+#define SOR_PLL0_ICHPMP(x) (((x) & 0xf) << 24)
+#define SOR_PLL0_FILTER_MASK (0xf << 16)
+#define SOR_PLL0_FILTER(x) (((x) & 0xf) << 16)
+#define SOR_PLL0_VCOCAP_MASK (0xf << 8)
+#define SOR_PLL0_VCOCAP(x) (((x) & 0xf) << 8)
+#define SOR_PLL0_VCOCAP_RST SOR_PLL0_VCOCAP(3)
+#define SOR_PLL0_PLLREG_MASK (0x3 << 6)
+#define SOR_PLL0_PLLREG_LEVEL(x) (((x) & 0x3) << 6)
+#define SOR_PLL0_PLLREG_LEVEL_V25 SOR_PLL0_PLLREG_LEVEL(0)
+#define SOR_PLL0_PLLREG_LEVEL_V15 SOR_PLL0_PLLREG_LEVEL(1)
+#define SOR_PLL0_PLLREG_LEVEL_V35 SOR_PLL0_PLLREG_LEVEL(2)
+#define SOR_PLL0_PLLREG_LEVEL_V45 SOR_PLL0_PLLREG_LEVEL(3)
+#define SOR_PLL0_PULLDOWN (1 << 5)
+#define SOR_PLL0_RESISTOR_EXT (1 << 4)
+#define SOR_PLL0_VCOPD (1 << 2)
+#define SOR_PLL0_PWR (1 << 0)
+
+#define SOR_PLL1 0x18
+/* XXX: read-only bit? */
+#define SOR_PLL1_LOADADJ_MASK (0xf << 20)
+#define SOR_PLL1_LOADADJ(x) (((x) & 0xf) << 20)
+#define SOR_PLL1_TERM_COMPOUT (1 << 15)
+#define SOR_PLL1_TMDS_TERMADJ_MASK (0xf << 9)
+#define SOR_PLL1_TMDS_TERMADJ(x) (((x) & 0xf) << 9)
+#define SOR_PLL1_TMDS_TERM (1 << 8)
+
+#define SOR_PLL2 0x19
+#define SOR_PLL2_LVDS_ENABLE (1 << 25)
+#define SOR_PLL2_SEQ_PLLCAPPD_ENFORCE (1 << 24)
+#define SOR_PLL2_PORT_POWERDOWN (1 << 23)
+#define SOR_PLL2_BANDGAP_POWERDOWN (1 << 22)
+#define SOR_PLL2_POWERDOWN_OVERRIDE (1 << 18)
+#define SOR_PLL2_SEQ_PLLCAPPD (1 << 17)
+#define SOR_PLL2_SEQ_PLL_PULLDOWN (1 << 16)
+
+#define SOR_PLL3 0x1a
+#define SOR_PLL3_BG_TEMP_COEF_MASK (0xf << 28)
+#define SOR_PLL3_BG_TEMP_COEF(x) (((x) & 0xf) << 28)
+#define SOR_PLL3_BG_VREF_LEVEL_MASK (0xf << 24)
+#define SOR_PLL3_BG_VREF_LEVEL(x) (((x) & 0xf) << 24)
+#define SOR_PLL3_PLL_VDD_MODE_1V8 (0 << 13)
+#define SOR_PLL3_PLL_VDD_MODE_3V3 (1 << 13)
+#define SOR_PLL3_AVDD10_LEVEL_MASK (0xf << 8)
+#define SOR_PLL3_AVDD10_LEVEL(x) (((x) & 0xf) << 8)
+#define SOR_PLL3_AVDD14_LEVEL_MASK (0xf << 4)
+#define SOR_PLL3_AVDD14_LEVEL(x) (((x) & 0xf) << 4)
+
+#define SOR_CSTM 0x1b
+#define SOR_CSTM_ROTCLK_MASK (0xf << 24)
+#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
+#define SOR_CSTM_LVDS (1 << 16)
+#define SOR_CSTM_LINK_ACT_B (1 << 15)
+#define SOR_CSTM_LINK_ACT_A (1 << 14)
+#define SOR_CSTM_UPPER (1 << 11)
+
+#define SOR_LVDS 0x1c
+#define SOR_CRCA 0x1d
+#define SOR_CRCA_VALID (1 << 0)
+#define SOR_CRCA_RESET (1 << 0)
+#define SOR_CRCB 0x1e
+#define SOR_BLANK 0x1f
+#define SOR_SEQ_CTL 0x20
+#define SOR_SEQ_CTL_PD_PC_ALT(x) (((x) & 0xf) << 12)
+#define SOR_SEQ_CTL_PD_PC(x) (((x) & 0xf) << 8)
+#define SOR_SEQ_CTL_PU_PC_ALT(x) (((x) & 0xf) << 4)
+#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0)
+
+#define SOR_LANE_SEQ_CTL 0x21
+#define SOR_LANE_SEQ_CTL_TRIGGER (1 << 31)
+#define SOR_LANE_SEQ_CTL_STATE_BUSY (1 << 28)
+#define SOR_LANE_SEQ_CTL_SEQUENCE_UP (0 << 20)
+#define SOR_LANE_SEQ_CTL_SEQUENCE_DOWN (1 << 20)
+#define SOR_LANE_SEQ_CTL_POWER_STATE_UP (0 << 16)
+#define SOR_LANE_SEQ_CTL_POWER_STATE_DOWN (1 << 16)
+#define SOR_LANE_SEQ_CTL_DELAY(x) (((x) & 0xf) << 12)
+
+#define SOR_SEQ_INST(x) (0x22 + (x))
+#define SOR_SEQ_INST_PLL_PULLDOWN (1 << 31)
+#define SOR_SEQ_INST_POWERDOWN_MACRO (1 << 30)
+#define SOR_SEQ_INST_ASSERT_PLL_RESET (1 << 29)
+#define SOR_SEQ_INST_BLANK_V (1 << 28)
+#define SOR_SEQ_INST_BLANK_H (1 << 27)
+#define SOR_SEQ_INST_BLANK_DE (1 << 26)
+#define SOR_SEQ_INST_BLACK_DATA (1 << 25)
+#define SOR_SEQ_INST_TRISTATE_IOS (1 << 24)
+#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+#define SOR_SEQ_INST_PIN_B_LOW (0 << 22)
+#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
+#define SOR_SEQ_INST_PIN_A_LOW (0 << 21)
+#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
+#define SOR_SEQ_INST_SEQUENCE_UP (0 << 19)
+#define SOR_SEQ_INST_SEQUENCE_DOWN (1 << 19)
+#define SOR_SEQ_INST_LANE_SEQ_STOP (0 << 18)
+#define SOR_SEQ_INST_LANE_SEQ_RUN (1 << 18)
+#define SOR_SEQ_INST_PORT_POWERDOWN (1 << 17)
+#define SOR_SEQ_INST_PLL_POWERDOWN (1 << 16)
+#define SOR_SEQ_INST_HALT (1 << 15)
+#define SOR_SEQ_INST_WAIT_US (0 << 12)
+#define SOR_SEQ_INST_WAIT_MS (1 << 12)
+#define SOR_SEQ_INST_WAIT_VSYNC (2 << 12)
+#define SOR_SEQ_INST_WAIT(x) (((x) & 0x3ff) << 0)
+
+#define SOR_PWM_DIV 0x32
+#define SOR_PWM_DIV_MASK 0xffffff
+
+#define SOR_PWM_CTL 0x33
+#define SOR_PWM_CTL_TRIGGER (1 << 31)
+#define SOR_PWM_CTL_CLK_SEL (1 << 30)
+#define SOR_PWM_CTL_DUTY_CYCLE_MASK 0xffffff
+
+#define SOR_VCRC_A0 0x34
+#define SOR_VCRC_A1 0x35
+#define SOR_VCRC_B0 0x36
+#define SOR_VCRC_B1 0x37
+#define SOR_CCRC_A0 0x38
+#define SOR_CCRC_A1 0x39
+#define SOR_CCRC_B0 0x3a
+#define SOR_CCRC_B1 0x3b
+#define SOR_EDATA_A0 0x3c
+#define SOR_EDATA_A1 0x3d
+#define SOR_EDATA_B0 0x3e
+#define SOR_EDATA_B1 0x3f
+#define SOR_COUNT_A0 0x40
+#define SOR_COUNT_A1 0x41
+#define SOR_COUNT_B0 0x42
+#define SOR_COUNT_B1 0x43
+#define SOR_DEBUG_A0 0x44
+#define SOR_DEBUG_A1 0x45
+#define SOR_DEBUG_B0 0x46
+#define SOR_DEBUG_B1 0x47
+#define SOR_TRIG 0x48
+#define SOR_MSCHECK 0x49
+#define SOR_XBAR_CTRL 0x4a
+#define SOR_XBAR_CTRL_LINK1_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) << 17)
+#define SOR_XBAR_CTRL_LINK0_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) << 2)
+#define SOR_XBAR_CTRL_LINK_SWAP (1 << 1)
+#define SOR_XBAR_CTRL_BYPASS (1 << 0)
+#define SOR_XBAR_POL 0x4b
+
+#define SOR_DP_LINKCTL0 0x4c
+#define SOR_DP_LINKCTL_LANE_COUNT_MASK (0x1f << 16)
+#define SOR_DP_LINKCTL_LANE_COUNT(x) (((1 << (x)) - 1) << 16)
+#define SOR_DP_LINKCTL_ENHANCED_FRAME (1 << 14)
+#define SOR_DP_LINKCTL_TU_SIZE_MASK (0x7f << 2)
+#define SOR_DP_LINKCTL_TU_SIZE(x) (((x) & 0x7f) << 2)
+#define SOR_DP_LINKCTL_ENABLE (1 << 0)
+
+#define SOR_DP_LINKCTL1 0x4d
+
+#define SOR_LANE_DRIVE_CURRENT0 0x4e
+#define SOR_LANE_DRIVE_CURRENT1 0x4f
+#define SOR_LANE4_DRIVE_CURRENT0 0x50
+#define SOR_LANE4_DRIVE_CURRENT1 0x51
+#define SOR_LANE_DRIVE_CURRENT_LANE3(x) (((x) & 0xff) << 24)
+#define SOR_LANE_DRIVE_CURRENT_LANE2(x) (((x) & 0xff) << 16)
+#define SOR_LANE_DRIVE_CURRENT_LANE1(x) (((x) & 0xff) << 8)
+#define SOR_LANE_DRIVE_CURRENT_LANE0(x) (((x) & 0xff) << 0)
+
+#define SOR_LANE_PREEMPHASIS0 0x52
+#define SOR_LANE_PREEMPHASIS1 0x53
+#define SOR_LANE4_PREEMPHASIS0 0x54
+#define SOR_LANE4_PREEMPHASIS1 0x55
+#define SOR_LANE_PREEMPHASIS_LANE3(x) (((x) & 0xff) << 24)
+#define SOR_LANE_PREEMPHASIS_LANE2(x) (((x) & 0xff) << 16)
+#define SOR_LANE_PREEMPHASIS_LANE1(x) (((x) & 0xff) << 8)
+#define SOR_LANE_PREEMPHASIS_LANE0(x) (((x) & 0xff) << 0)
+
+#define SOR_LANE_POSTCURSOR0 0x56
+#define SOR_LANE_POSTCURSOR1 0x57
+#define SOR_LANE_POSTCURSOR_LANE3(x) (((x) & 0xff) << 24)
+#define SOR_LANE_POSTCURSOR_LANE2(x) (((x) & 0xff) << 16)
+#define SOR_LANE_POSTCURSOR_LANE1(x) (((x) & 0xff) << 8)
+#define SOR_LANE_POSTCURSOR_LANE0(x) (((x) & 0xff) << 0)
+
+#define SOR_DP_CONFIG0 0x58
+#define SOR_DP_CONFIG_DISPARITY_NEGATIVE (1 << 31)
+#define SOR_DP_CONFIG_ACTIVE_SYM_ENABLE (1 << 26)
+#define SOR_DP_CONFIG_ACTIVE_SYM_POLARITY (1 << 24)
+#define SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK (0xf << 16)
+#define SOR_DP_CONFIG_ACTIVE_SYM_FRAC(x) (((x) & 0xf) << 16)
+#define SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK (0x7f << 8)
+#define SOR_DP_CONFIG_ACTIVE_SYM_COUNT(x) (((x) & 0x7f) << 8)
+#define SOR_DP_CONFIG_WATERMARK_MASK (0x3f << 0)
+#define SOR_DP_CONFIG_WATERMARK(x) (((x) & 0x3f) << 0)
+
+#define SOR_DP_CONFIG1 0x59
+#define SOR_DP_MN0 0x5a
+#define SOR_DP_MN1 0x5b
+
+#define SOR_DP_PADCTL0 0x5c
+#define SOR_DP_PADCTL_PAD_CAL_PD (1 << 23)
+#define SOR_DP_PADCTL_TX_PU_ENABLE (1 << 22)
+#define SOR_DP_PADCTL_TX_PU_MASK (0xff << 8)
+#define SOR_DP_PADCTL_TX_PU(x) (((x) & 0xff) << 8)
+#define SOR_DP_PADCTL_CM_TXD_3 (1 << 7)
+#define SOR_DP_PADCTL_CM_TXD_2 (1 << 6)
+#define SOR_DP_PADCTL_CM_TXD_1 (1 << 5)
+#define SOR_DP_PADCTL_CM_TXD_0 (1 << 4)
+#define SOR_DP_PADCTL_CM_TXD(x) (1 << (4 + (x)))
+#define SOR_DP_PADCTL_PD_TXD_3 (1 << 3)
+#define SOR_DP_PADCTL_PD_TXD_0 (1 << 2)
+#define SOR_DP_PADCTL_PD_TXD_1 (1 << 1)
+#define SOR_DP_PADCTL_PD_TXD_2 (1 << 0)
+#define SOR_DP_PADCTL_PD_TXD(x) (1 << (0 + (x)))
+
+#define SOR_DP_PADCTL1 0x5d
+
+#define SOR_DP_DEBUG0 0x5e
+#define SOR_DP_DEBUG1 0x5f
+
+#define SOR_DP_SPARE0 0x60
+#define SOR_DP_SPARE_DISP_VIDEO_PREAMBLE (1 << 3)
+#define SOR_DP_SPARE_MACRO_SOR_CLK (1 << 2)
+#define SOR_DP_SPARE_PANEL_INTERNAL (1 << 1)
+#define SOR_DP_SPARE_SEQ_ENABLE (1 << 0)
+
+#define SOR_DP_SPARE1 0x61
+#define SOR_DP_AUDIO_CTRL 0x62
+
+#define SOR_DP_AUDIO_HBLANK_SYMBOLS 0x63
+#define SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK (0x01ffff << 0)
+
+#define SOR_DP_AUDIO_VBLANK_SYMBOLS 0x64
+#define SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK (0x1fffff << 0)
+
+#define SOR_DP_GENERIC_INFOFRAME_HEADER 0x65
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK0 0x66
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK1 0x67
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK2 0x68
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK3 0x69
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK4 0x6a
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK5 0x6b
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK6 0x6c
+
+#define SOR_DP_TPG 0x6d
+#define SOR_DP_TPG_CHANNEL_CODING (1 << 6)
+#define SOR_DP_TPG_SCRAMBLER_MASK (3 << 4)
+#define SOR_DP_TPG_SCRAMBLER_FIBONACCI (2 << 4)
+#define SOR_DP_TPG_SCRAMBLER_GALIOS (1 << 4)
+#define SOR_DP_TPG_SCRAMBLER_NONE (0 << 4)
+#define SOR_DP_TPG_PATTERN_MASK (0xf << 0)
+#define SOR_DP_TPG_PATTERN_HBR2 (0x8 << 0)
+#define SOR_DP_TPG_PATTERN_CSTM (0x7 << 0)
+#define SOR_DP_TPG_PATTERN_PRBS7 (0x6 << 0)
+#define SOR_DP_TPG_PATTERN_SBLERRRATE (0x5 << 0)
+#define SOR_DP_TPG_PATTERN_D102 (0x4 << 0)
+#define SOR_DP_TPG_PATTERN_TRAIN3 (0x3 << 0)
+#define SOR_DP_TPG_PATTERN_TRAIN2 (0x2 << 0)
+#define SOR_DP_TPG_PATTERN_TRAIN1 (0x1 << 0)
+#define SOR_DP_TPG_PATTERN_NONE (0x0 << 0)
+
+#define SOR_DP_TPG_CONFIG 0x6e
+#define SOR_DP_LQ_CSTM0 0x6f
+#define SOR_DP_LQ_CSTM1 0x70
+#define SOR_DP_LQ_CSTM2 0x71
+
+#define SOR_DP_PADCTL2 0x73
+#define SOR_DP_PADCTL_SPAREPLL_MASK (0xff << 24)
+#define SOR_DP_PADCTL_SPAREPLL(x) (((x) & 0xff) << 24)
+
+#define SOR_HDMI_AUDIO_INFOFRAME_CTRL 0x9a
+#define SOR_HDMI_AUDIO_INFOFRAME_STATUS 0x9b
+#define SOR_HDMI_AUDIO_INFOFRAME_HEADER 0x9c
+
+#define SOR_HDMI_AVI_INFOFRAME_CTRL 0x9f
+#define INFOFRAME_CTRL_CHECKSUM_ENABLE (1 << 9)
+#define INFOFRAME_CTRL_SINGLE (1 << 8)
+#define INFOFRAME_CTRL_OTHER (1 << 4)
+#define INFOFRAME_CTRL_ENABLE (1 << 0)
+
+#define SOR_HDMI_AVI_INFOFRAME_STATUS 0xa0
+#define INFOFRAME_STATUS_DONE (1 << 0)
+
+#define SOR_HDMI_AVI_INFOFRAME_HEADER 0xa1
+#define INFOFRAME_HEADER_LEN(x) (((x) & 0xff) << 16)
+#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
+#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
+
+#define SOR_HDMI_ACR_CTRL 0xb1
+
+#define SOR_HDMI_ACR_0320_SUBPACK_LOW 0xb2
+#define SOR_HDMI_ACR_SUBPACK_LOW_SB1(x) (((x) & 0xff) << 24)
+
+#define SOR_HDMI_ACR_0320_SUBPACK_HIGH 0xb3
+#define SOR_HDMI_ACR_SUBPACK_HIGH_ENABLE (1 << 31)
+
+#define SOR_HDMI_ACR_0441_SUBPACK_LOW 0xb4
+#define SOR_HDMI_ACR_0441_SUBPACK_HIGH 0xb5
+
+#define SOR_HDMI_CTRL 0xc0
+#define SOR_HDMI_CTRL_ENABLE (1 << 30)
+#define SOR_HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define SOR_HDMI_CTRL_AUDIO_LAYOUT (1 << 10)
+#define SOR_HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+
+#define SOR_HDMI_SPARE 0xcb
+#define SOR_HDMI_SPARE_ACR_PRIORITY_HIGH (1 << 31)
+#define SOR_HDMI_SPARE_CTS_RESET(x) (((x) & 0x7) << 16)
+#define SOR_HDMI_SPARE_HW_CTS_ENABLE (1 << 0)
+
+#define SOR_REFCLK 0xe6
+#define SOR_REFCLK_DIV_INT(x) ((((x) >> 2) & 0xff) << 8)
+#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6)
+
+#define SOR_INPUT_CONTROL 0xe8
+#define SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED (1 << 1)
+#define SOR_INPUT_CONTROL_HDMI_SRC_SELECT(x) (((x) & 0x1) << 0)
+
+#define SOR_AUDIO_CNTRL 0xfc
+#define SOR_AUDIO_CNTRL_INJECT_NULLSMPL (1 << 29)
+#define SOR_AUDIO_CNTRL_SOURCE_SELECT(x) (((x) & 0x3) << 20)
+#define SOURCE_SELECT_MASK 0x3
+#define SOURCE_SELECT_HDA 0x2
+#define SOURCE_SELECT_SPDIF 0x1
+#define SOURCE_SELECT_AUTO 0x0
+#define SOR_AUDIO_CNTRL_AFIFO_FLUSH (1 << 12)
+
+#define SOR_AUDIO_SPARE 0xfe
+#define SOR_AUDIO_SPARE_HBR_ENABLE (1 << 27)
+
+#define SOR_AUDIO_NVAL_0320 0xff
+#define SOR_AUDIO_NVAL_0441 0x100
+#define SOR_AUDIO_NVAL_0882 0x101
+#define SOR_AUDIO_NVAL_1764 0x102
+#define SOR_AUDIO_NVAL_0480 0x103
+#define SOR_AUDIO_NVAL_0960 0x104
+#define SOR_AUDIO_NVAL_1920 0x105
+
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0 0x10a
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID (1 << 30)
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK 0xffff
+
+#define SOR_AUDIO_HDA_ELD_BUFWR 0x10c
+#define SOR_AUDIO_HDA_ELD_BUFWR_INDEX(x) (((x) & 0xff) << 8)
+#define SOR_AUDIO_HDA_ELD_BUFWR_DATA(x) (((x) & 0xff) << 0)
+
+#define SOR_AUDIO_HDA_PRESENSE 0x10d
+#define SOR_AUDIO_HDA_PRESENSE_ELDV (1 << 1)
+#define SOR_AUDIO_HDA_PRESENSE_PD (1 << 0)
+
+#define SOR_AUDIO_AVAL_0320 0x10f
+#define SOR_AUDIO_AVAL_0441 0x110
+#define SOR_AUDIO_AVAL_0882 0x111
+#define SOR_AUDIO_AVAL_1764 0x112
+#define SOR_AUDIO_AVAL_0480 0x113
+#define SOR_AUDIO_AVAL_0960 0x114
+#define SOR_AUDIO_AVAL_1920 0x115
+
+#define SOR_INT_STATUS 0x11c
+#define SOR_INT_CODEC_CP_REQUEST (1 << 2)
+#define SOR_INT_CODEC_SCRATCH1 (1 << 1)
+#define SOR_INT_CODEC_SCRATCH0 (1 << 0)
+
+#define SOR_INT_MASK 0x11d
+#define SOR_INT_ENABLE 0x11e
+
+#define SOR_HDMI_VSI_INFOFRAME_CTRL 0x123
+#define SOR_HDMI_VSI_INFOFRAME_STATUS 0x124
+#define SOR_HDMI_VSI_INFOFRAME_HEADER 0x125
+
+#define SOR_HDMI_AUDIO_N 0x13c
+#define SOR_HDMI_AUDIO_N_LOOKUP (1 << 28)
+#define SOR_HDMI_AUDIO_N_RESET (1 << 20)
+
+#define SOR_HDMI2_CTRL 0x13e
+#define SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4 (1 << 1)
+#define SOR_HDMI2_CTRL_SCRAMBLE (1 << 0)
+
+#endif
diff --git a/drivers/gpu/drm/tegra/submit.c b/drivers/gpu/drm/tegra/submit.c
new file mode 100644
index 000000000..b24738bdf
--- /dev/null
+++ b/drivers/gpu/drm/tegra/submit.c
@@ -0,0 +1,698 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2020 NVIDIA Corporation */
+
+#include <linux/dma-fence-array.h>
+#include <linux/dma-mapping.h>
+#include <linux/file.h>
+#include <linux/host1x.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/nospec.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/sync_file.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
+
+#include "drm.h"
+#include "gem.h"
+#include "submit.h"
+#include "uapi.h"
+
+#define SUBMIT_ERR(context, fmt, ...) \
+ dev_err_ratelimited(context->client->base.dev, \
+ "%s: job submission failed: " fmt "\n", \
+ current->comm, ##__VA_ARGS__)
+
+struct gather_bo {
+ struct host1x_bo base;
+
+ struct kref ref;
+
+ struct device *dev;
+ u32 *gather_data;
+ dma_addr_t gather_data_dma;
+ size_t gather_data_words;
+};
+
+static struct host1x_bo *gather_bo_get(struct host1x_bo *host_bo)
+{
+ struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
+
+ kref_get(&bo->ref);
+
+ return host_bo;
+}
+
+static void gather_bo_release(struct kref *ref)
+{
+ struct gather_bo *bo = container_of(ref, struct gather_bo, ref);
+
+ dma_free_attrs(bo->dev, bo->gather_data_words * 4, bo->gather_data, bo->gather_data_dma,
+ 0);
+ kfree(bo);
+}
+
+static void gather_bo_put(struct host1x_bo *host_bo)
+{
+ struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
+
+ kref_put(&bo->ref, gather_bo_release);
+}
+
+static struct host1x_bo_mapping *
+gather_bo_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction)
+{
+ struct gather_bo *gather = container_of(bo, struct gather_bo, base);
+ struct host1x_bo_mapping *map;
+ int err;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&map->ref);
+ map->bo = host1x_bo_get(bo);
+ map->direction = direction;
+ map->dev = dev;
+
+ map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
+ if (!map->sgt) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ err = dma_get_sgtable(gather->dev, map->sgt, gather->gather_data, gather->gather_data_dma,
+ gather->gather_data_words * 4);
+ if (err)
+ goto free_sgt;
+
+ err = dma_map_sgtable(dev, map->sgt, direction, 0);
+ if (err)
+ goto free_sgt;
+
+ map->phys = sg_dma_address(map->sgt->sgl);
+ map->size = gather->gather_data_words * 4;
+ map->chunks = err;
+
+ return map;
+
+free_sgt:
+ sg_free_table(map->sgt);
+ kfree(map->sgt);
+free:
+ kfree(map);
+ return ERR_PTR(err);
+}
+
+static void gather_bo_unpin(struct host1x_bo_mapping *map)
+{
+ if (!map)
+ return;
+
+ dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
+ sg_free_table(map->sgt);
+ kfree(map->sgt);
+ host1x_bo_put(map->bo);
+
+ kfree(map);
+}
+
+static void *gather_bo_mmap(struct host1x_bo *host_bo)
+{
+ struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
+
+ return bo->gather_data;
+}
+
+static void gather_bo_munmap(struct host1x_bo *host_bo, void *addr)
+{
+}
+
+const struct host1x_bo_ops gather_bo_ops = {
+ .get = gather_bo_get,
+ .put = gather_bo_put,
+ .pin = gather_bo_pin,
+ .unpin = gather_bo_unpin,
+ .mmap = gather_bo_mmap,
+ .munmap = gather_bo_munmap,
+};
+
+static struct tegra_drm_mapping *
+tegra_drm_mapping_get(struct tegra_drm_context *context, u32 id)
+{
+ struct tegra_drm_mapping *mapping;
+
+ xa_lock(&context->mappings);
+
+ mapping = xa_load(&context->mappings, id);
+ if (mapping)
+ kref_get(&mapping->ref);
+
+ xa_unlock(&context->mappings);
+
+ return mapping;
+}
+
+static void *alloc_copy_user_array(void __user *from, size_t count, size_t size)
+{
+ size_t copy_len;
+ void *data;
+
+ if (check_mul_overflow(count, size, &copy_len))
+ return ERR_PTR(-EINVAL);
+
+ if (copy_len > 0x4000)
+ return ERR_PTR(-E2BIG);
+
+ data = kvmalloc(copy_len, GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ if (copy_from_user(data, from, copy_len)) {
+ kvfree(data);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return data;
+}
+
+static int submit_copy_gather_data(struct gather_bo **pbo, struct device *dev,
+ struct tegra_drm_context *context,
+ struct drm_tegra_channel_submit *args)
+{
+ struct gather_bo *bo;
+ size_t copy_len;
+
+ if (args->gather_data_words == 0) {
+ SUBMIT_ERR(context, "gather_data_words cannot be zero");
+ return -EINVAL;
+ }
+
+ if (check_mul_overflow((size_t)args->gather_data_words, (size_t)4, &copy_len)) {
+ SUBMIT_ERR(context, "gather_data_words is too large");
+ return -EINVAL;
+ }
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo) {
+ SUBMIT_ERR(context, "failed to allocate memory for bo info");
+ return -ENOMEM;
+ }
+
+ host1x_bo_init(&bo->base, &gather_bo_ops);
+ kref_init(&bo->ref);
+ bo->dev = dev;
+
+ bo->gather_data = dma_alloc_attrs(dev, copy_len, &bo->gather_data_dma,
+ GFP_KERNEL | __GFP_NOWARN, 0);
+ if (!bo->gather_data) {
+ SUBMIT_ERR(context, "failed to allocate memory for gather data");
+ kfree(bo);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(bo->gather_data, u64_to_user_ptr(args->gather_data_ptr), copy_len)) {
+ SUBMIT_ERR(context, "failed to copy gather data from userspace");
+ dma_free_attrs(dev, copy_len, bo->gather_data, bo->gather_data_dma, 0);
+ kfree(bo);
+ return -EFAULT;
+ }
+
+ bo->gather_data_words = args->gather_data_words;
+
+ *pbo = bo;
+
+ return 0;
+}
+
+static int submit_write_reloc(struct tegra_drm_context *context, struct gather_bo *bo,
+ struct drm_tegra_submit_buf *buf, struct tegra_drm_mapping *mapping)
+{
+ /* TODO check that target_offset is within bounds */
+ dma_addr_t iova = mapping->iova + buf->reloc.target_offset;
+ u32 written_ptr;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (buf->flags & DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT)
+ iova |= BIT_ULL(39);
+#endif
+
+ written_ptr = iova >> buf->reloc.shift;
+
+ if (buf->reloc.gather_offset_words >= bo->gather_data_words) {
+ SUBMIT_ERR(context,
+ "relocation has too large gather offset (%u vs gather length %zu)",
+ buf->reloc.gather_offset_words, bo->gather_data_words);
+ return -EINVAL;
+ }
+
+ buf->reloc.gather_offset_words = array_index_nospec(buf->reloc.gather_offset_words,
+ bo->gather_data_words);
+
+ bo->gather_data[buf->reloc.gather_offset_words] = written_ptr;
+
+ return 0;
+}
+
+static int submit_process_bufs(struct tegra_drm_context *context, struct gather_bo *bo,
+ struct drm_tegra_channel_submit *args,
+ struct tegra_drm_submit_data *job_data)
+{
+ struct tegra_drm_used_mapping *mappings;
+ struct drm_tegra_submit_buf *bufs;
+ int err;
+ u32 i;
+
+ bufs = alloc_copy_user_array(u64_to_user_ptr(args->bufs_ptr), args->num_bufs,
+ sizeof(*bufs));
+ if (IS_ERR(bufs)) {
+ SUBMIT_ERR(context, "failed to copy bufs array from userspace");
+ return PTR_ERR(bufs);
+ }
+
+ mappings = kcalloc(args->num_bufs, sizeof(*mappings), GFP_KERNEL);
+ if (!mappings) {
+ SUBMIT_ERR(context, "failed to allocate memory for mapping info");
+ err = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < args->num_bufs; i++) {
+ struct drm_tegra_submit_buf *buf = &bufs[i];
+ struct tegra_drm_mapping *mapping;
+
+ if (buf->flags & ~DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT) {
+ SUBMIT_ERR(context, "invalid flag specified for buffer");
+ err = -EINVAL;
+ goto drop_refs;
+ }
+
+ mapping = tegra_drm_mapping_get(context, buf->mapping);
+ if (!mapping) {
+ SUBMIT_ERR(context, "invalid mapping ID '%u' for buffer", buf->mapping);
+ err = -EINVAL;
+ goto drop_refs;
+ }
+
+ err = submit_write_reloc(context, bo, buf, mapping);
+ if (err) {
+ tegra_drm_mapping_put(mapping);
+ goto drop_refs;
+ }
+
+ mappings[i].mapping = mapping;
+ mappings[i].flags = buf->flags;
+ }
+
+ job_data->used_mappings = mappings;
+ job_data->num_used_mappings = i;
+
+ err = 0;
+
+ goto done;
+
+drop_refs:
+ while (i--)
+ tegra_drm_mapping_put(mappings[i].mapping);
+
+ kfree(mappings);
+ job_data->used_mappings = NULL;
+
+done:
+ kvfree(bufs);
+
+ return err;
+}
+
+static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job,
+ struct xarray *syncpoints, struct drm_tegra_channel_submit *args)
+{
+ struct host1x_syncpt *sp;
+
+ if (args->syncpt.flags) {
+ SUBMIT_ERR(context, "invalid flag specified for syncpt");
+ return -EINVAL;
+ }
+
+ /* Syncpt ref will be dropped on job release */
+ sp = xa_load(syncpoints, args->syncpt.id);
+ if (!sp) {
+ SUBMIT_ERR(context, "syncpoint specified in syncpt was not allocated");
+ return -EINVAL;
+ }
+
+ job->syncpt = host1x_syncpt_get(sp);
+ job->syncpt_incrs = args->syncpt.increments;
+
+ return 0;
+}
+
+static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context,
+ struct drm_tegra_submit_cmd_gather_uptr *cmd,
+ struct gather_bo *bo, u32 *offset,
+ struct tegra_drm_submit_data *job_data,
+ u32 *class)
+{
+ u32 next_offset;
+
+ if (cmd->reserved[0] || cmd->reserved[1] || cmd->reserved[2]) {
+ SUBMIT_ERR(context, "non-zero reserved field in GATHER_UPTR command");
+ return -EINVAL;
+ }
+
+ /* Check for maximum gather size */
+ if (cmd->words > 16383) {
+ SUBMIT_ERR(context, "too many words in GATHER_UPTR command");
+ return -EINVAL;
+ }
+
+ if (check_add_overflow(*offset, cmd->words, &next_offset)) {
+ SUBMIT_ERR(context, "too many total words in job");
+ return -EINVAL;
+ }
+
+ if (next_offset > bo->gather_data_words) {
+ SUBMIT_ERR(context, "GATHER_UPTR command overflows gather data");
+ return -EINVAL;
+ }
+
+ if (tegra_drm_fw_validate(context->client, bo->gather_data, *offset,
+ cmd->words, job_data, class)) {
+ SUBMIT_ERR(context, "job was rejected by firewall");
+ return -EINVAL;
+ }
+
+ host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4);
+
+ *offset = next_offset;
+
+ return 0;
+}
+
+static struct host1x_job *
+submit_create_job(struct tegra_drm_context *context, struct gather_bo *bo,
+ struct drm_tegra_channel_submit *args, struct tegra_drm_submit_data *job_data,
+ struct xarray *syncpoints)
+{
+ struct drm_tegra_submit_cmd *cmds;
+ u32 i, gather_offset = 0, class;
+ struct host1x_job *job;
+ int err;
+
+ /* Set initial class for firewall. */
+ class = context->client->base.class;
+
+ cmds = alloc_copy_user_array(u64_to_user_ptr(args->cmds_ptr), args->num_cmds,
+ sizeof(*cmds));
+ if (IS_ERR(cmds)) {
+ SUBMIT_ERR(context, "failed to copy cmds array from userspace");
+ return ERR_CAST(cmds);
+ }
+
+ job = host1x_job_alloc(context->channel, args->num_cmds, 0, true);
+ if (!job) {
+ SUBMIT_ERR(context, "failed to allocate memory for job");
+ job = ERR_PTR(-ENOMEM);
+ goto done;
+ }
+
+ err = submit_get_syncpt(context, job, syncpoints, args);
+ if (err < 0)
+ goto free_job;
+
+ job->client = &context->client->base;
+ job->class = context->client->base.class;
+ job->serialize = true;
+
+ for (i = 0; i < args->num_cmds; i++) {
+ struct drm_tegra_submit_cmd *cmd = &cmds[i];
+
+ if (cmd->flags) {
+ SUBMIT_ERR(context, "unknown flags given for cmd");
+ err = -EINVAL;
+ goto free_job;
+ }
+
+ if (cmd->type == DRM_TEGRA_SUBMIT_CMD_GATHER_UPTR) {
+ err = submit_job_add_gather(job, context, &cmd->gather_uptr, bo,
+ &gather_offset, job_data, &class);
+ if (err)
+ goto free_job;
+ } else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT) {
+ if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) {
+ SUBMIT_ERR(context, "non-zero reserved value");
+ err = -EINVAL;
+ goto free_job;
+ }
+
+ host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value,
+ false, class);
+ } else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT_RELATIVE) {
+ if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) {
+ SUBMIT_ERR(context, "non-zero reserved value");
+ err = -EINVAL;
+ goto free_job;
+ }
+
+ if (cmd->wait_syncpt.id != args->syncpt.id) {
+ SUBMIT_ERR(context, "syncpoint ID in CMD_WAIT_SYNCPT_RELATIVE is not used by the job");
+ err = -EINVAL;
+ goto free_job;
+ }
+
+ host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value,
+ true, class);
+ } else {
+ SUBMIT_ERR(context, "unknown cmd type");
+ err = -EINVAL;
+ goto free_job;
+ }
+ }
+
+ if (gather_offset == 0) {
+ SUBMIT_ERR(context, "job must have at least one gather");
+ err = -EINVAL;
+ goto free_job;
+ }
+
+ goto done;
+
+free_job:
+ host1x_job_put(job);
+ job = ERR_PTR(err);
+
+done:
+ kvfree(cmds);
+
+ return job;
+}
+
+static void release_job(struct host1x_job *job)
+{
+ struct tegra_drm_client *client = container_of(job->client, struct tegra_drm_client, base);
+ struct tegra_drm_submit_data *job_data = job->user_data;
+ u32 i;
+
+ if (job->memory_context)
+ host1x_memory_context_put(job->memory_context);
+
+ for (i = 0; i < job_data->num_used_mappings; i++)
+ tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
+
+ kfree(job_data->used_mappings);
+ kfree(job_data);
+
+ pm_runtime_mark_last_busy(client->base.dev);
+ pm_runtime_put_autosuspend(client->base.dev);
+}
+
+int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_channel_submit *args = data;
+ struct tegra_drm_submit_data *job_data;
+ struct drm_syncobj *syncobj = NULL;
+ struct tegra_drm_context *context;
+ struct host1x_job *job;
+ struct gather_bo *bo;
+ u32 i;
+ int err;
+
+ mutex_lock(&fpriv->lock);
+
+ context = xa_load(&fpriv->contexts, args->context);
+ if (!context) {
+ mutex_unlock(&fpriv->lock);
+ pr_err_ratelimited("%s: %s: invalid channel context '%#x'", __func__,
+ current->comm, args->context);
+ return -EINVAL;
+ }
+
+ if (args->syncobj_in) {
+ struct dma_fence *fence;
+
+ err = drm_syncobj_find_fence(file, args->syncobj_in, 0, 0, &fence);
+ if (err) {
+ SUBMIT_ERR(context, "invalid syncobj_in '%#x'", args->syncobj_in);
+ goto unlock;
+ }
+
+ err = dma_fence_wait_timeout(fence, true, msecs_to_jiffies(10000));
+ dma_fence_put(fence);
+ if (err) {
+ SUBMIT_ERR(context, "wait for syncobj_in timed out");
+ goto unlock;
+ }
+ }
+
+ if (args->syncobj_out) {
+ syncobj = drm_syncobj_find(file, args->syncobj_out);
+ if (!syncobj) {
+ SUBMIT_ERR(context, "invalid syncobj_out '%#x'", args->syncobj_out);
+ err = -ENOENT;
+ goto unlock;
+ }
+ }
+
+ /* Allocate gather BO and copy gather words in. */
+ err = submit_copy_gather_data(&bo, drm->dev, context, args);
+ if (err)
+ goto unlock;
+
+ job_data = kzalloc(sizeof(*job_data), GFP_KERNEL);
+ if (!job_data) {
+ SUBMIT_ERR(context, "failed to allocate memory for job data");
+ err = -ENOMEM;
+ goto put_bo;
+ }
+
+ /* Get data buffer mappings and do relocation patching. */
+ err = submit_process_bufs(context, bo, args, job_data);
+ if (err)
+ goto free_job_data;
+
+ /* Allocate host1x_job and add gathers and waits to it. */
+ job = submit_create_job(context, bo, args, job_data, &fpriv->syncpoints);
+ if (IS_ERR(job)) {
+ err = PTR_ERR(job);
+ goto free_job_data;
+ }
+
+ /* Map gather data for Host1x. */
+ err = host1x_job_pin(job, context->client->base.dev);
+ if (err) {
+ SUBMIT_ERR(context, "failed to pin job: %d", err);
+ goto put_job;
+ }
+
+ if (context->client->ops->get_streamid_offset) {
+ err = context->client->ops->get_streamid_offset(
+ context->client, &job->engine_streamid_offset);
+ if (err) {
+ SUBMIT_ERR(context, "failed to get streamid offset: %d", err);
+ goto unpin_job;
+ }
+ }
+
+ if (context->memory_context && context->client->ops->can_use_memory_ctx) {
+ bool supported;
+
+ err = context->client->ops->can_use_memory_ctx(context->client, &supported);
+ if (err) {
+ SUBMIT_ERR(context, "failed to detect if engine can use memory context: %d", err);
+ goto unpin_job;
+ }
+
+ if (supported) {
+ job->memory_context = context->memory_context;
+ host1x_memory_context_get(job->memory_context);
+ }
+ } else if (context->client->ops->get_streamid_offset) {
+#ifdef CONFIG_IOMMU_API
+ struct iommu_fwspec *spec;
+
+ /*
+ * Job submission will need to temporarily change stream ID,
+ * so need to tell it what to change it back to.
+ */
+ spec = dev_iommu_fwspec_get(context->client->base.dev);
+ if (spec && spec->num_ids > 0)
+ job->engine_fallback_streamid = spec->ids[0] & 0xffff;
+ else
+ job->engine_fallback_streamid = 0x7f;
+#else
+ job->engine_fallback_streamid = 0x7f;
+#endif
+ }
+
+ /* Boot engine. */
+ err = pm_runtime_resume_and_get(context->client->base.dev);
+ if (err < 0) {
+ SUBMIT_ERR(context, "could not power up engine: %d", err);
+ goto put_memory_context;
+ }
+
+ job->user_data = job_data;
+ job->release = release_job;
+ job->timeout = 10000;
+
+ /*
+ * job_data is now part of job reference counting, so don't release
+ * it from here.
+ */
+ job_data = NULL;
+
+ /* Submit job to hardware. */
+ err = host1x_job_submit(job);
+ if (err) {
+ SUBMIT_ERR(context, "host1x job submission failed: %d", err);
+ goto unpin_job;
+ }
+
+ /* Return postfences to userspace and add fences to DMA reservations. */
+ args->syncpt.value = job->syncpt_end;
+
+ if (syncobj) {
+ struct dma_fence *fence = host1x_fence_create(job->syncpt, job->syncpt_end);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ SUBMIT_ERR(context, "failed to create postfence: %d", err);
+ }
+
+ drm_syncobj_replace_fence(syncobj, fence);
+ }
+
+ goto put_job;
+
+put_memory_context:
+ if (job->memory_context)
+ host1x_memory_context_put(job->memory_context);
+unpin_job:
+ host1x_job_unpin(job);
+put_job:
+ host1x_job_put(job);
+free_job_data:
+ if (job_data && job_data->used_mappings) {
+ for (i = 0; i < job_data->num_used_mappings; i++)
+ tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
+
+ kfree(job_data->used_mappings);
+ }
+
+ if (job_data)
+ kfree(job_data);
+put_bo:
+ gather_bo_put(&bo->base);
+unlock:
+ if (syncobj)
+ drm_syncobj_put(syncobj);
+
+ mutex_unlock(&fpriv->lock);
+ return err;
+}
diff --git a/drivers/gpu/drm/tegra/submit.h b/drivers/gpu/drm/tegra/submit.h
new file mode 100644
index 000000000..cf6a2f0a2
--- /dev/null
+++ b/drivers/gpu/drm/tegra/submit.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020 NVIDIA Corporation */
+
+#ifndef _TEGRA_DRM_UAPI_SUBMIT_H
+#define _TEGRA_DRM_UAPI_SUBMIT_H
+
+struct tegra_drm_used_mapping {
+ struct tegra_drm_mapping *mapping;
+ u32 flags;
+};
+
+struct tegra_drm_submit_data {
+ struct tegra_drm_used_mapping *used_mappings;
+ u32 num_used_mappings;
+};
+
+int tegra_drm_fw_validate(struct tegra_drm_client *client, u32 *data, u32 start,
+ u32 words, struct tegra_drm_submit_data *submit,
+ u32 *job_class);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/trace.c b/drivers/gpu/drm/tegra/trace.c
new file mode 100644
index 000000000..006f65c72
--- /dev/null
+++ b/drivers/gpu/drm/tegra/trace.c
@@ -0,0 +1,2 @@
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/gpu/drm/tegra/trace.h b/drivers/gpu/drm/tegra/trace.h
new file mode 100644
index 000000000..5a1ab4046
--- /dev/null
+++ b/drivers/gpu/drm/tegra/trace.h
@@ -0,0 +1,68 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tegra
+
+#if !defined(DRM_TEGRA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define DRM_TEGRA_TRACE_H 1
+
+#include <linux/device.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(register_access,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value),
+ TP_STRUCT__entry(
+ __field(struct device *, dev)
+ __field(unsigned int, offset)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->offset = offset;
+ __entry->value = value;
+ ),
+ TP_printk("%s %04x %08x", dev_name(__entry->dev), __entry->offset,
+ __entry->value)
+);
+
+DEFINE_EVENT(register_access, dc_writel,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+DEFINE_EVENT(register_access, dc_readl,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+
+DEFINE_EVENT(register_access, hdmi_writel,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+DEFINE_EVENT(register_access, hdmi_readl,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+
+DEFINE_EVENT(register_access, dsi_writel,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+DEFINE_EVENT(register_access, dsi_readl,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+
+DEFINE_EVENT(register_access, dpaux_writel,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+DEFINE_EVENT(register_access, dpaux_readl,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+
+DEFINE_EVENT(register_access, sor_writel,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+DEFINE_EVENT(register_access, sor_readl,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+
+#endif /* DRM_TEGRA_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/tegra
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c
new file mode 100644
index 000000000..a98239cb0
--- /dev/null
+++ b/drivers/gpu/drm/tegra/uapi.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2020 NVIDIA Corporation */
+
+#include <linux/host1x.h>
+#include <linux/iommu.h>
+#include <linux/list.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_utils.h>
+
+#include "drm.h"
+#include "uapi.h"
+
+static void tegra_drm_mapping_release(struct kref *ref)
+{
+ struct tegra_drm_mapping *mapping =
+ container_of(ref, struct tegra_drm_mapping, ref);
+
+ host1x_bo_unpin(mapping->map);
+ host1x_bo_put(mapping->bo);
+
+ kfree(mapping);
+}
+
+void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping)
+{
+ kref_put(&mapping->ref, tegra_drm_mapping_release);
+}
+
+static void tegra_drm_channel_context_close(struct tegra_drm_context *context)
+{
+ struct tegra_drm_mapping *mapping;
+ unsigned long id;
+
+ if (context->memory_context)
+ host1x_memory_context_put(context->memory_context);
+
+ xa_for_each(&context->mappings, id, mapping)
+ tegra_drm_mapping_put(mapping);
+
+ xa_destroy(&context->mappings);
+
+ host1x_channel_put(context->channel);
+
+ kfree(context);
+}
+
+void tegra_drm_uapi_close_file(struct tegra_drm_file *file)
+{
+ struct tegra_drm_context *context;
+ struct host1x_syncpt *sp;
+ unsigned long id;
+
+ xa_for_each(&file->contexts, id, context)
+ tegra_drm_channel_context_close(context);
+
+ xa_for_each(&file->syncpoints, id, sp)
+ host1x_syncpt_put(sp);
+
+ xa_destroy(&file->contexts);
+ xa_destroy(&file->syncpoints);
+}
+
+static struct tegra_drm_client *tegra_drm_find_client(struct tegra_drm *tegra, u32 class)
+{
+ struct tegra_drm_client *client;
+
+ list_for_each_entry(client, &tegra->clients, list)
+ if (client->base.class == class)
+ return client;
+
+ return NULL;
+}
+
+int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_file *file)
+{
+ struct host1x *host = tegra_drm_to_host1x(drm->dev_private);
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct tegra_drm *tegra = drm->dev_private;
+ struct drm_tegra_channel_open *args = data;
+ struct tegra_drm_client *client = NULL;
+ struct tegra_drm_context *context;
+ int err;
+
+ if (args->flags)
+ return -EINVAL;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ client = tegra_drm_find_client(tegra, args->host1x_class);
+ if (!client) {
+ err = -ENODEV;
+ goto free;
+ }
+
+ if (client->shared_channel) {
+ context->channel = host1x_channel_get(client->shared_channel);
+ } else {
+ context->channel = host1x_channel_request(&client->base);
+ if (!context->channel) {
+ err = -EBUSY;
+ goto free;
+ }
+ }
+
+ /* Only allocate context if the engine supports context isolation. */
+ if (device_iommu_mapped(client->base.dev) && client->ops->can_use_memory_ctx) {
+ bool supported;
+
+ err = client->ops->can_use_memory_ctx(client, &supported);
+ if (err)
+ goto put_channel;
+
+ if (supported)
+ context->memory_context = host1x_memory_context_alloc(
+ host, get_task_pid(current, PIDTYPE_TGID));
+
+ if (IS_ERR(context->memory_context)) {
+ if (PTR_ERR(context->memory_context) != -EOPNOTSUPP) {
+ err = PTR_ERR(context->memory_context);
+ goto put_channel;
+ } else {
+ /*
+ * OK, HW does not support contexts or contexts
+ * are disabled.
+ */
+ context->memory_context = NULL;
+ }
+ }
+ }
+
+ err = xa_alloc(&fpriv->contexts, &args->context, context, XA_LIMIT(1, U32_MAX),
+ GFP_KERNEL);
+ if (err < 0)
+ goto put_memctx;
+
+ context->client = client;
+ xa_init_flags(&context->mappings, XA_FLAGS_ALLOC1);
+
+ args->version = client->version;
+ args->capabilities = 0;
+
+ if (device_get_dma_attr(client->base.dev) == DEV_DMA_COHERENT)
+ args->capabilities |= DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT;
+
+ return 0;
+
+put_memctx:
+ if (context->memory_context)
+ host1x_memory_context_put(context->memory_context);
+put_channel:
+ host1x_channel_put(context->channel);
+free:
+ kfree(context);
+
+ return err;
+}
+
+int tegra_drm_ioctl_channel_close(struct drm_device *drm, void *data, struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_channel_close *args = data;
+ struct tegra_drm_context *context;
+
+ mutex_lock(&fpriv->lock);
+
+ context = xa_load(&fpriv->contexts, args->context);
+ if (!context) {
+ mutex_unlock(&fpriv->lock);
+ return -EINVAL;
+ }
+
+ xa_erase(&fpriv->contexts, args->context);
+
+ mutex_unlock(&fpriv->lock);
+
+ tegra_drm_channel_context_close(context);
+
+ return 0;
+}
+
+int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_channel_map *args = data;
+ struct tegra_drm_mapping *mapping;
+ struct tegra_drm_context *context;
+ enum dma_data_direction direction;
+ struct device *mapping_dev;
+ int err = 0;
+
+ if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE)
+ return -EINVAL;
+
+ mutex_lock(&fpriv->lock);
+
+ context = xa_load(&fpriv->contexts, args->context);
+ if (!context) {
+ mutex_unlock(&fpriv->lock);
+ return -EINVAL;
+ }
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ kref_init(&mapping->ref);
+
+ if (context->memory_context)
+ mapping_dev = &context->memory_context->dev;
+ else
+ mapping_dev = context->client->base.dev;
+
+ mapping->bo = tegra_gem_lookup(file, args->handle);
+ if (!mapping->bo) {
+ err = -EINVAL;
+ goto free;
+ }
+
+ switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) {
+ case DRM_TEGRA_CHANNEL_MAP_READ_WRITE:
+ direction = DMA_BIDIRECTIONAL;
+ break;
+
+ case DRM_TEGRA_CHANNEL_MAP_WRITE:
+ direction = DMA_FROM_DEVICE;
+ break;
+
+ case DRM_TEGRA_CHANNEL_MAP_READ:
+ direction = DMA_TO_DEVICE;
+ break;
+
+ default:
+ err = -EINVAL;
+ goto put_gem;
+ }
+
+ mapping->map = host1x_bo_pin(mapping_dev, mapping->bo, direction, NULL);
+ if (IS_ERR(mapping->map)) {
+ err = PTR_ERR(mapping->map);
+ goto put_gem;
+ }
+
+ mapping->iova = mapping->map->phys;
+ mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
+
+ err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
+ GFP_KERNEL);
+ if (err < 0)
+ goto unpin;
+
+ mutex_unlock(&fpriv->lock);
+
+ return 0;
+
+unpin:
+ host1x_bo_unpin(mapping->map);
+put_gem:
+ host1x_bo_put(mapping->bo);
+free:
+ kfree(mapping);
+unlock:
+ mutex_unlock(&fpriv->lock);
+ return err;
+}
+
+int tegra_drm_ioctl_channel_unmap(struct drm_device *drm, void *data, struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_channel_unmap *args = data;
+ struct tegra_drm_mapping *mapping;
+ struct tegra_drm_context *context;
+
+ mutex_lock(&fpriv->lock);
+
+ context = xa_load(&fpriv->contexts, args->context);
+ if (!context) {
+ mutex_unlock(&fpriv->lock);
+ return -EINVAL;
+ }
+
+ mapping = xa_erase(&context->mappings, args->mapping);
+
+ mutex_unlock(&fpriv->lock);
+
+ if (!mapping)
+ return -EINVAL;
+
+ tegra_drm_mapping_put(mapping);
+ return 0;
+}
+
+int tegra_drm_ioctl_syncpoint_allocate(struct drm_device *drm, void *data, struct drm_file *file)
+{
+ struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_syncpoint_allocate *args = data;
+ struct host1x_syncpt *sp;
+ int err;
+
+ if (args->id)
+ return -EINVAL;
+
+ sp = host1x_syncpt_alloc(host1x, HOST1X_SYNCPT_CLIENT_MANAGED, current->comm);
+ if (!sp)
+ return -EBUSY;
+
+ args->id = host1x_syncpt_id(sp);
+
+ err = xa_insert(&fpriv->syncpoints, args->id, sp, GFP_KERNEL);
+ if (err) {
+ host1x_syncpt_put(sp);
+ return err;
+ }
+
+ return 0;
+}
+
+int tegra_drm_ioctl_syncpoint_free(struct drm_device *drm, void *data, struct drm_file *file)
+{
+ struct tegra_drm_file *fpriv = file->driver_priv;
+ struct drm_tegra_syncpoint_allocate *args = data;
+ struct host1x_syncpt *sp;
+
+ mutex_lock(&fpriv->lock);
+ sp = xa_erase(&fpriv->syncpoints, args->id);
+ mutex_unlock(&fpriv->lock);
+
+ if (!sp)
+ return -EINVAL;
+
+ host1x_syncpt_put(sp);
+
+ return 0;
+}
+
+int tegra_drm_ioctl_syncpoint_wait(struct drm_device *drm, void *data, struct drm_file *file)
+{
+ struct host1x *host1x = tegra_drm_to_host1x(drm->dev_private);
+ struct drm_tegra_syncpoint_wait *args = data;
+ signed long timeout_jiffies;
+ struct host1x_syncpt *sp;
+
+ if (args->padding != 0)
+ return -EINVAL;
+
+ sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
+ if (!sp)
+ return -EINVAL;
+
+ timeout_jiffies = drm_timeout_abs_to_jiffies(args->timeout_ns);
+
+ return host1x_syncpt_wait(sp, args->threshold, timeout_jiffies, &args->value);
+}
diff --git a/drivers/gpu/drm/tegra/uapi.h b/drivers/gpu/drm/tegra/uapi.h
new file mode 100644
index 000000000..92ff1e44f
--- /dev/null
+++ b/drivers/gpu/drm/tegra/uapi.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020 NVIDIA Corporation */
+
+#ifndef _TEGRA_DRM_UAPI_H
+#define _TEGRA_DRM_UAPI_H
+
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <linux/xarray.h>
+
+#include <drm/drm.h>
+
+struct drm_file;
+struct drm_device;
+
+struct tegra_drm_file {
+ /* Legacy UAPI state */
+ struct idr legacy_contexts;
+ struct mutex lock;
+
+ /* New UAPI state */
+ struct xarray contexts;
+ struct xarray syncpoints;
+};
+
+struct tegra_drm_mapping {
+ struct kref ref;
+
+ struct host1x_bo_mapping *map;
+ struct host1x_bo *bo;
+
+ dma_addr_t iova;
+ dma_addr_t iova_end;
+};
+
+int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data,
+ struct drm_file *file);
+int tegra_drm_ioctl_channel_close(struct drm_device *drm, void *data,
+ struct drm_file *file);
+int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data,
+ struct drm_file *file);
+int tegra_drm_ioctl_channel_unmap(struct drm_device *drm, void *data,
+ struct drm_file *file);
+int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
+ struct drm_file *file);
+int tegra_drm_ioctl_syncpoint_allocate(struct drm_device *drm, void *data,
+ struct drm_file *file);
+int tegra_drm_ioctl_syncpoint_free(struct drm_device *drm, void *data,
+ struct drm_file *file);
+int tegra_drm_ioctl_syncpoint_wait(struct drm_device *drm, void *data,
+ struct drm_file *file);
+
+void tegra_drm_uapi_close_file(struct tegra_drm_file *file);
+void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
new file mode 100644
index 000000000..7382ee132
--- /dev/null
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, NVIDIA Corporation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/host1x.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/pmc.h>
+
+#include "drm.h"
+#include "falcon.h"
+#include "vic.h"
+
+struct vic_config {
+ const char *firmware;
+ unsigned int version;
+ bool supports_sid;
+};
+
+struct vic {
+ struct falcon falcon;
+
+ void __iomem *regs;
+ struct tegra_drm_client client;
+ struct host1x_channel *channel;
+ struct device *dev;
+ struct clk *clk;
+ struct reset_control *rst;
+
+ bool can_use_context;
+
+ /* Platform configuration */
+ const struct vic_config *config;
+};
+
+static inline struct vic *to_vic(struct tegra_drm_client *client)
+{
+ return container_of(client, struct vic, client);
+}
+
+static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
+{
+ writel(value, vic->regs + offset);
+}
+
+static int vic_boot(struct vic *vic)
+{
+#ifdef CONFIG_IOMMU_API
+ struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
+#endif
+ u32 fce_ucode_size, fce_bin_data_offset;
+ void *hdr;
+ int err = 0;
+
+#ifdef CONFIG_IOMMU_API
+ if (vic->config->supports_sid && spec) {
+ u32 value;
+
+ value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) |
+ TRANSCFG_ATT(0, TRANSCFG_SID_HW);
+ vic_writel(vic, value, VIC_TFBIF_TRANSCFG);
+
+ if (spec->num_ids > 0) {
+ value = spec->ids[0] & 0xffff;
+
+ /*
+ * STREAMID0 is used for input/output buffers.
+ * Initialize it to SID_VIC in case context isolation
+ * is not enabled, and SID_VIC is used for both firmware
+ * and data buffers.
+ *
+ * If context isolation is enabled, it will be
+ * overridden by the SETSTREAMID opcode as part of
+ * each job.
+ */
+ vic_writel(vic, value, VIC_THI_STREAMID0);
+
+ /* STREAMID1 is used for firmware loading. */
+ vic_writel(vic, value, VIC_THI_STREAMID1);
+ }
+ }
+#endif
+
+ /* setup clockgating registers */
+ vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
+ CG_IDLE_CG_EN |
+ CG_WAKEUP_DLY_CNT(4),
+ NV_PVIC_MISC_PRI_VIC_CG);
+
+ err = falcon_boot(&vic->falcon);
+ if (err < 0)
+ return err;
+
+ hdr = vic->falcon.firmware.virt;
+ fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
+
+ /* Old VIC firmware needs kernel help with setting up FCE microcode. */
+ if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
+ hdr = vic->falcon.firmware.virt +
+ *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
+ fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
+
+ falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
+ fce_ucode_size);
+ falcon_execute_method(
+ &vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
+ (vic->falcon.firmware.iova + fce_bin_data_offset) >> 8);
+ }
+
+ err = falcon_wait_idle(&vic->falcon);
+ if (err < 0) {
+ dev_err(vic->dev,
+ "failed to set application ID and FCE base\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int vic_init(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = dev->dev_private;
+ struct vic *vic = to_vic(drm);
+ int err;
+
+ err = host1x_client_iommu_attach(client);
+ if (err < 0 && err != -ENODEV) {
+ dev_err(vic->dev, "failed to attach to domain: %d\n", err);
+ return err;
+ }
+
+ vic->channel = host1x_channel_request(client);
+ if (!vic->channel) {
+ err = -ENOMEM;
+ goto detach;
+ }
+
+ client->syncpts[0] = host1x_syncpt_request(client, 0);
+ if (!client->syncpts[0]) {
+ err = -ENOMEM;
+ goto free_channel;
+ }
+
+ pm_runtime_enable(client->dev);
+ pm_runtime_use_autosuspend(client->dev);
+ pm_runtime_set_autosuspend_delay(client->dev, 500);
+
+ err = tegra_drm_register_client(tegra, drm);
+ if (err < 0)
+ goto disable_rpm;
+
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent host1x device.
+ */
+ client->dev->dma_parms = client->host->dma_parms;
+
+ return 0;
+
+disable_rpm:
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
+ host1x_syncpt_put(client->syncpts[0]);
+free_channel:
+ host1x_channel_put(vic->channel);
+detach:
+ host1x_client_iommu_detach(client);
+
+ return err;
+}
+
+static int vic_exit(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = dev->dev_private;
+ struct vic *vic = to_vic(drm);
+ int err;
+
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
+ err = tegra_drm_unregister_client(tegra, drm);
+ if (err < 0)
+ return err;
+
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
+ host1x_syncpt_put(client->syncpts[0]);
+ host1x_channel_put(vic->channel);
+ host1x_client_iommu_detach(client);
+
+ vic->channel = NULL;
+
+ if (client->group) {
+ dma_unmap_single(vic->dev, vic->falcon.firmware.phys,
+ vic->falcon.firmware.size, DMA_TO_DEVICE);
+ tegra_drm_free(tegra, vic->falcon.firmware.size,
+ vic->falcon.firmware.virt,
+ vic->falcon.firmware.iova);
+ } else {
+ dma_free_coherent(vic->dev, vic->falcon.firmware.size,
+ vic->falcon.firmware.virt,
+ vic->falcon.firmware.iova);
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops vic_client_ops = {
+ .init = vic_init,
+ .exit = vic_exit,
+};
+
+static int vic_load_firmware(struct vic *vic)
+{
+ struct host1x_client *client = &vic->client.base;
+ struct tegra_drm *tegra = vic->client.drm;
+ static DEFINE_MUTEX(lock);
+ u32 fce_bin_data_offset;
+ dma_addr_t iova;
+ size_t size;
+ void *virt;
+ int err;
+
+ mutex_lock(&lock);
+
+ if (vic->falcon.firmware.virt) {
+ err = 0;
+ goto unlock;
+ }
+
+ err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
+ if (err < 0)
+ goto unlock;
+
+ size = vic->falcon.firmware.size;
+
+ if (!client->group) {
+ virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
+ if (!virt) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+ } else {
+ virt = tegra_drm_alloc(tegra, size, &iova);
+ if (IS_ERR(virt)) {
+ err = PTR_ERR(virt);
+ goto unlock;
+ }
+ }
+
+ vic->falcon.firmware.virt = virt;
+ vic->falcon.firmware.iova = iova;
+
+ err = falcon_load_firmware(&vic->falcon);
+ if (err < 0)
+ goto cleanup;
+
+ /*
+ * In this case we have received an IOVA from the shared domain, so we
+ * need to make sure to get the physical address so that the DMA API
+ * knows what memory pages to flush the cache for.
+ */
+ if (client->group) {
+ dma_addr_t phys;
+
+ phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(vic->dev, phys);
+ if (err < 0)
+ goto cleanup;
+
+ vic->falcon.firmware.phys = phys;
+ }
+
+ /*
+ * Check if firmware is new enough to not require mapping firmware
+ * to data buffer domains.
+ */
+ fce_bin_data_offset = *(u32 *)(virt + VIC_UCODE_FCE_DATA_OFFSET);
+
+ if (!vic->config->supports_sid) {
+ vic->can_use_context = false;
+ } else if (fce_bin_data_offset != 0x0 && fce_bin_data_offset != 0xa5a5a5a5) {
+ /*
+ * Firmware will access FCE through STREAMID0, so context
+ * isolation cannot be used.
+ */
+ vic->can_use_context = false;
+ dev_warn_once(vic->dev, "context isolation disabled due to old firmware\n");
+ } else {
+ vic->can_use_context = true;
+ }
+
+unlock:
+ mutex_unlock(&lock);
+ return err;
+
+cleanup:
+ if (!client->group)
+ dma_free_coherent(vic->dev, size, virt, iova);
+ else
+ tegra_drm_free(tegra, size, virt, iova);
+
+ mutex_unlock(&lock);
+ return err;
+}
+
+
+static int __maybe_unused vic_runtime_resume(struct device *dev)
+{
+ struct vic *vic = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(vic->clk);
+ if (err < 0)
+ return err;
+
+ usleep_range(10, 20);
+
+ err = reset_control_deassert(vic->rst);
+ if (err < 0)
+ goto disable;
+
+ usleep_range(10, 20);
+
+ err = vic_load_firmware(vic);
+ if (err < 0)
+ goto assert;
+
+ err = vic_boot(vic);
+ if (err < 0)
+ goto assert;
+
+ return 0;
+
+assert:
+ reset_control_assert(vic->rst);
+disable:
+ clk_disable_unprepare(vic->clk);
+ return err;
+}
+
+static int __maybe_unused vic_runtime_suspend(struct device *dev)
+{
+ struct vic *vic = dev_get_drvdata(dev);
+ int err;
+
+ host1x_channel_stop(vic->channel);
+
+ err = reset_control_assert(vic->rst);
+ if (err < 0)
+ return err;
+
+ usleep_range(2000, 4000);
+
+ clk_disable_unprepare(vic->clk);
+
+ return 0;
+}
+
+static int vic_open_channel(struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ struct vic *vic = to_vic(client);
+
+ context->channel = host1x_channel_get(vic->channel);
+ if (!context->channel)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void vic_close_channel(struct tegra_drm_context *context)
+{
+ host1x_channel_put(context->channel);
+}
+
+static int vic_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
+{
+ struct vic *vic = to_vic(client);
+ int err;
+
+ /* This doesn't access HW so it's safe to call without powering up. */
+ err = vic_load_firmware(vic);
+ if (err < 0)
+ return err;
+
+ *supported = vic->can_use_context;
+
+ return 0;
+}
+
+static const struct tegra_drm_client_ops vic_ops = {
+ .open_channel = vic_open_channel,
+ .close_channel = vic_close_channel,
+ .submit = tegra_drm_submit,
+ .get_streamid_offset = tegra_drm_get_streamid_offset_thi,
+ .can_use_memory_ctx = vic_can_use_memory_ctx,
+};
+
+#define NVIDIA_TEGRA_124_VIC_FIRMWARE "nvidia/tegra124/vic03_ucode.bin"
+
+static const struct vic_config vic_t124_config = {
+ .firmware = NVIDIA_TEGRA_124_VIC_FIRMWARE,
+ .version = 0x40,
+ .supports_sid = false,
+};
+
+#define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin"
+
+static const struct vic_config vic_t210_config = {
+ .firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE,
+ .version = 0x21,
+ .supports_sid = false,
+};
+
+#define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin"
+
+static const struct vic_config vic_t186_config = {
+ .firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE,
+ .version = 0x18,
+ .supports_sid = true,
+};
+
+#define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin"
+
+static const struct vic_config vic_t194_config = {
+ .firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE,
+ .version = 0x19,
+ .supports_sid = true,
+};
+
+#define NVIDIA_TEGRA_234_VIC_FIRMWARE "nvidia/tegra234/vic.bin"
+
+static const struct vic_config vic_t234_config = {
+ .firmware = NVIDIA_TEGRA_234_VIC_FIRMWARE,
+ .version = 0x23,
+ .supports_sid = true,
+};
+
+static const struct of_device_id tegra_vic_of_match[] = {
+ { .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
+ { .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
+ { .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config },
+ { .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config },
+ { .compatible = "nvidia,tegra234-vic", .data = &vic_t234_config },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_vic_of_match);
+
+static int vic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct host1x_syncpt **syncpts;
+ struct vic *vic;
+ int err;
+
+ /* inherit DMA mask from host1x parent */
+ err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
+ vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
+ if (!vic)
+ return -ENOMEM;
+
+ vic->config = of_device_get_match_data(dev);
+
+ syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
+ if (!syncpts)
+ return -ENOMEM;
+
+ vic->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vic->regs))
+ return PTR_ERR(vic->regs);
+
+ vic->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(vic->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(vic->clk);
+ }
+
+ err = clk_set_rate(vic->clk, ULONG_MAX);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set clock rate\n");
+ return err;
+ }
+
+ if (!dev->pm_domain) {
+ vic->rst = devm_reset_control_get(dev, "vic");
+ if (IS_ERR(vic->rst)) {
+ dev_err(&pdev->dev, "failed to get reset\n");
+ return PTR_ERR(vic->rst);
+ }
+ }
+
+ vic->falcon.dev = dev;
+ vic->falcon.regs = vic->regs;
+
+ err = falcon_init(&vic->falcon);
+ if (err < 0)
+ return err;
+
+ platform_set_drvdata(pdev, vic);
+
+ INIT_LIST_HEAD(&vic->client.base.list);
+ vic->client.base.ops = &vic_client_ops;
+ vic->client.base.dev = dev;
+ vic->client.base.class = HOST1X_CLASS_VIC;
+ vic->client.base.syncpts = syncpts;
+ vic->client.base.num_syncpts = 1;
+ vic->dev = dev;
+
+ INIT_LIST_HEAD(&vic->client.list);
+ vic->client.version = vic->config->version;
+ vic->client.ops = &vic_ops;
+
+ err = host1x_client_register(&vic->client.base);
+ if (err < 0) {
+ dev_err(dev, "failed to register host1x client: %d\n", err);
+ goto exit_falcon;
+ }
+
+ return 0;
+
+exit_falcon:
+ falcon_exit(&vic->falcon);
+
+ return err;
+}
+
+static int vic_remove(struct platform_device *pdev)
+{
+ struct vic *vic = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&vic->client.base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ falcon_exit(&vic->falcon);
+
+ return 0;
+}
+
+static const struct dev_pm_ops vic_pm_ops = {
+ RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
+struct platform_driver tegra_vic_driver = {
+ .driver = {
+ .name = "tegra-vic",
+ .of_match_table = tegra_vic_of_match,
+ .pm = &vic_pm_ops
+ },
+ .probe = vic_probe,
+ .remove = vic_remove,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE);
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE);
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE);
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE);
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_234_VIC_FIRMWARE);
+#endif
diff --git a/drivers/gpu/drm/tegra/vic.h b/drivers/gpu/drm/tegra/vic.h
new file mode 100644
index 000000000..acf35aac9
--- /dev/null
+++ b/drivers/gpu/drm/tegra/vic.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015, NVIDIA Corporation.
+ */
+
+#ifndef TEGRA_VIC_H
+#define TEGRA_VIC_H
+
+/* VIC methods */
+
+#define VIC_SET_FCE_UCODE_SIZE 0x0000071C
+#define VIC_SET_FCE_UCODE_OFFSET 0x0000072C
+
+/* VIC registers */
+
+#define VIC_THI_STREAMID0 0x00000030
+#define VIC_THI_STREAMID1 0x00000034
+
+#define NV_PVIC_MISC_PRI_VIC_CG 0x000016d0
+#define CG_IDLE_CG_DLY_CNT(val) ((val & 0x3f) << 0)
+#define CG_IDLE_CG_EN (1 << 6)
+#define CG_WAKEUP_DLY_CNT(val) ((val & 0xf) << 16)
+
+#define VIC_TFBIF_TRANSCFG 0x00002044
+#define TRANSCFG_ATT(i, v) (((v) & 0x3) << (i * 4))
+#define TRANSCFG_SID_HW 0
+#define TRANSCFG_SID_PHY 1
+#define TRANSCFG_SID_FALCON 2
+
+/* Firmware offsets */
+
+#define VIC_UCODE_FCE_HEADER_OFFSET (6*4)
+#define VIC_UCODE_FCE_DATA_OFFSET (7*4)
+#define FCE_UCODE_SIZE_OFFSET (2*4)
+
+#endif /* TEGRA_VIC_H */