summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/display
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/gpu/drm/i915/display
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/i915/display')
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c415
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c367
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c503
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c710
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c280
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c319
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c1404
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.h30
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c606
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.h19
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c271
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.h26
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c1056
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h28
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c2127
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.h15
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi_regs.h342
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c360
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c351
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h55
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c1120
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h69
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c1410
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio_regs.h160
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c1794
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.h52
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight_regs.h124
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c3790
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h280
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c1203
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h72
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c3283
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h86
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c2295
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c439
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h162
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c295
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c1124
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c705
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h39
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c315
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c828
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c4525
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h73
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c1662
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h73
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h84
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c9140
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h721
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h426
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c2254
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c2489
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h279
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c1614
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c1956
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h177
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h589
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h2074
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c109
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.h24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c1132
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h61
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h89
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c5475
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h124
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c771
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c519
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c823
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c1456
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h42
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c1076
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c1106
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h59
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c2061
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c4564
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h376
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c316
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c299
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c363
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c114
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h177
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c200
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c1037
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c550
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h140
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c2081
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h96
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c303
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c1846
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h49
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c733
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h60
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c1076
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h40
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c511
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c328
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h170
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c257
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h90
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c1003
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.h49
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus_regs.h81
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c2586
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h46
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_regs.h270
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c3237
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h58
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c769
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h32
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c368
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c716
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c1019
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c735
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c246
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c1250
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h139
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c1533
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c686
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h54
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c650
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.h45
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c679
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c672
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.h38
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c323
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c1554
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h56
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c2656
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h61
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.c309
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c230
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c3446
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo_regs.h741
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c2036
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.h38
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy_regs.h75
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c1848
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c1007
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h41
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc_phy_regs.h280
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c2020
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h1067
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c1218
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h30
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c167
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c262
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h33
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c847
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h35
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c2525
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.h35
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c3575
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h80
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c2015
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.h19
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c624
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.h38
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll_regs.h109
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_regs.h482
197 files changed, 135473 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
new file mode 100644
index 000000000..0589994dd
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "intel_display_types.h"
+#include "intel_dvo_dev.h"
+
+#define CH7017_TV_DISPLAY_MODE 0x00
+#define CH7017_FLICKER_FILTER 0x01
+#define CH7017_VIDEO_BANDWIDTH 0x02
+#define CH7017_TEXT_ENHANCEMENT 0x03
+#define CH7017_START_ACTIVE_VIDEO 0x04
+#define CH7017_HORIZONTAL_POSITION 0x05
+#define CH7017_VERTICAL_POSITION 0x06
+#define CH7017_BLACK_LEVEL 0x07
+#define CH7017_CONTRAST_ENHANCEMENT 0x08
+#define CH7017_TV_PLL 0x09
+#define CH7017_TV_PLL_M 0x0a
+#define CH7017_TV_PLL_N 0x0b
+#define CH7017_SUB_CARRIER_0 0x0c
+#define CH7017_CIV_CONTROL 0x10
+#define CH7017_CIV_0 0x11
+#define CH7017_CHROMA_BOOST 0x14
+#define CH7017_CLOCK_MODE 0x1c
+#define CH7017_INPUT_CLOCK 0x1d
+#define CH7017_GPIO_CONTROL 0x1e
+#define CH7017_INPUT_DATA_FORMAT 0x1f
+#define CH7017_CONNECTION_DETECT 0x20
+#define CH7017_DAC_CONTROL 0x21
+#define CH7017_BUFFERED_CLOCK_OUTPUT 0x22
+#define CH7017_DEFEAT_VSYNC 0x47
+#define CH7017_TEST_PATTERN 0x48
+
+#define CH7017_POWER_MANAGEMENT 0x49
+/** Enables the TV output path. */
+#define CH7017_TV_EN (1 << 0)
+#define CH7017_DAC0_POWER_DOWN (1 << 1)
+#define CH7017_DAC1_POWER_DOWN (1 << 2)
+#define CH7017_DAC2_POWER_DOWN (1 << 3)
+#define CH7017_DAC3_POWER_DOWN (1 << 4)
+/** Powers down the TV out block, and DAC0-3 */
+#define CH7017_TV_POWER_DOWN_EN (1 << 5)
+
+#define CH7017_VERSION_ID 0x4a
+
+#define CH7017_DEVICE_ID 0x4b
+#define CH7017_DEVICE_ID_VALUE 0x1b
+#define CH7018_DEVICE_ID_VALUE 0x1a
+#define CH7019_DEVICE_ID_VALUE 0x19
+
+#define CH7017_XCLK_D2_ADJUST 0x53
+#define CH7017_UP_SCALER_COEFF_0 0x55
+#define CH7017_UP_SCALER_COEFF_1 0x56
+#define CH7017_UP_SCALER_COEFF_2 0x57
+#define CH7017_UP_SCALER_COEFF_3 0x58
+#define CH7017_UP_SCALER_COEFF_4 0x59
+#define CH7017_UP_SCALER_VERTICAL_INC_0 0x5a
+#define CH7017_UP_SCALER_VERTICAL_INC_1 0x5b
+#define CH7017_GPIO_INVERT 0x5c
+#define CH7017_UP_SCALER_HORIZONTAL_INC_0 0x5d
+#define CH7017_UP_SCALER_HORIZONTAL_INC_1 0x5e
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT 0x5f
+/**< Low bits of horizontal active pixel input */
+
+#define CH7017_ACTIVE_INPUT_LINE_OUTPUT 0x60
+/** High bits of horizontal active pixel input */
+#define CH7017_LVDS_HAP_INPUT_MASK (0x7 << 0)
+/** High bits of vertical active line output */
+#define CH7017_LVDS_VAL_HIGH_MASK (0x7 << 3)
+
+#define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT 0x61
+/**< Low bits of vertical active line output */
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT 0x62
+/**< Low bits of horizontal active pixel output */
+
+#define CH7017_LVDS_POWER_DOWN 0x63
+/** High bits of horizontal active pixel output */
+#define CH7017_LVDS_HAP_HIGH_MASK (0x7 << 0)
+/** Enables the LVDS power down state transition */
+#define CH7017_LVDS_POWER_DOWN_EN (1 << 6)
+/** Enables the LVDS upscaler */
+#define CH7017_LVDS_UPSCALER_EN (1 << 7)
+#define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08
+
+#define CH7017_LVDS_ENCODING 0x64
+#define CH7017_LVDS_DITHER_2D (1 << 2)
+#define CH7017_LVDS_DITHER_DIS (1 << 3)
+#define CH7017_LVDS_DUAL_CHANNEL_EN (1 << 4)
+#define CH7017_LVDS_24_BIT (1 << 5)
+
+#define CH7017_LVDS_ENCODING_2 0x65
+
+#define CH7017_LVDS_PLL_CONTROL 0x66
+/** Enables the LVDS panel output path */
+#define CH7017_LVDS_PANEN (1 << 0)
+/** Enables the LVDS panel backlight */
+#define CH7017_LVDS_BKLEN (1 << 3)
+
+#define CH7017_POWER_SEQUENCING_T1 0x67
+#define CH7017_POWER_SEQUENCING_T2 0x68
+#define CH7017_POWER_SEQUENCING_T3 0x69
+#define CH7017_POWER_SEQUENCING_T4 0x6a
+#define CH7017_POWER_SEQUENCING_T5 0x6b
+#define CH7017_GPIO_DRIVER_TYPE 0x6c
+#define CH7017_GPIO_DATA 0x6d
+#define CH7017_GPIO_DIRECTION_CONTROL 0x6e
+
+#define CH7017_LVDS_PLL_FEEDBACK_DIV 0x71
+# define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4
+# define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0
+# define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80
+
+#define CH7017_LVDS_PLL_VCO_CONTROL 0x72
+# define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80
+# define CH7017_LVDS_PLL_VCO_SHIFT 4
+# define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0
+
+#define CH7017_OUTPUTS_ENABLE 0x73
+# define CH7017_CHARGE_PUMP_LOW 0x0
+# define CH7017_CHARGE_PUMP_HIGH 0x3
+# define CH7017_LVDS_CHANNEL_A (1 << 3)
+# define CH7017_LVDS_CHANNEL_B (1 << 4)
+# define CH7017_TV_DAC_A (1 << 5)
+# define CH7017_TV_DAC_B (1 << 6)
+# define CH7017_DDC_SELECT_DC2 (1 << 7)
+
+#define CH7017_LVDS_OUTPUT_AMPLITUDE 0x74
+#define CH7017_LVDS_PLL_EMI_REDUCTION 0x75
+#define CH7017_LVDS_POWER_DOWN_FLICKER 0x76
+
+#define CH7017_LVDS_CONTROL_2 0x78
+# define CH7017_LOOP_FILTER_SHIFT 5
+# define CH7017_PHASE_DETECTOR_SHIFT 0
+
+#define CH7017_BANG_LIMIT_CONTROL 0x7f
+
+struct ch7017_priv {
+ u8 dummy;
+};
+
+static void ch7017_dump_regs(struct intel_dvo_device *dvo);
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
+
+static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &addr,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = val,
+ }
+ };
+ return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
+}
+
+static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
+{
+ u8 buf[2] = { addr, val };
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = buf,
+ };
+ return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
+}
+
+/** Probes for a CH7017 on the given bus and slave address. */
+static bool ch7017_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ struct ch7017_priv *priv;
+ const char *str;
+ u8 val;
+
+ priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = priv;
+
+ if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
+ goto fail;
+
+ switch (val) {
+ case CH7017_DEVICE_ID_VALUE:
+ str = "ch7017";
+ break;
+ case CH7018_DEVICE_ID_VALUE:
+ str = "ch7018";
+ break;
+ case CH7019_DEVICE_ID_VALUE:
+ str = "ch7019";
+ break;
+ default:
+ DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
+ "slave %d.\n",
+ val, adapter->name, dvo->slave_addr);
+ goto fail;
+ }
+
+ DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
+ str, adapter->name, dvo->slave_addr);
+ return true;
+
+fail:
+ kfree(priv);
+ return false;
+}
+
+static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
+{
+ return connector_status_connected;
+}
+
+static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 160000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void ch7017_mode_set(struct intel_dvo_device *dvo,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ u8 lvds_pll_feedback_div, lvds_pll_vco_control;
+ u8 outputs_enable, lvds_control_2, lvds_power_down;
+ u8 horizontal_active_pixel_input;
+ u8 horizontal_active_pixel_output, vertical_active_line_output;
+ u8 active_input_line_output;
+
+ DRM_DEBUG_KMS("Registers before mode setting\n");
+ ch7017_dump_regs(dvo);
+
+ /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
+ if (mode->clock < 100000) {
+ outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW;
+ lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+ (13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+ (3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+ lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) |
+ (0 << CH7017_PHASE_DETECTOR_SHIFT);
+ } else {
+ outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH;
+ lvds_pll_feedback_div =
+ CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+ (3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+ lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) |
+ (0 << CH7017_PHASE_DETECTOR_SHIFT);
+ if (1) { /* XXX: dual channel panel detection. Assume yes for now. */
+ outputs_enable |= CH7017_LVDS_CHANNEL_B;
+ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+ (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+ } else {
+ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+ (1 << CH7017_LVDS_PLL_VCO_SHIFT) |
+ (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+ }
+ }
+
+ horizontal_active_pixel_input = mode->hdisplay & 0x00ff;
+
+ vertical_active_line_output = mode->vdisplay & 0x00ff;
+ horizontal_active_pixel_output = mode->hdisplay & 0x00ff;
+
+ active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) |
+ (((mode->vdisplay & 0x0700) >> 8) << 3);
+
+ lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
+ (mode->hdisplay & 0x0700) >> 8;
+
+ ch7017_dpms(dvo, false);
+ ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
+ horizontal_active_pixel_input);
+ ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
+ horizontal_active_pixel_output);
+ ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT,
+ vertical_active_line_output);
+ ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT,
+ active_input_line_output);
+ ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control);
+ ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div);
+ ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2);
+ ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable);
+
+ /* Turn the LVDS back on with new settings. */
+ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
+
+ DRM_DEBUG_KMS("Registers after mode setting\n");
+ ch7017_dump_regs(dvo);
+}
+
+/* set the CH7017 power state */
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ u8 val;
+
+ ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+ /* Turn off TV/VGA, and never turn it on since we don't support it. */
+ ch7017_write(dvo, CH7017_POWER_MANAGEMENT,
+ CH7017_DAC0_POWER_DOWN |
+ CH7017_DAC1_POWER_DOWN |
+ CH7017_DAC2_POWER_DOWN |
+ CH7017_DAC3_POWER_DOWN |
+ CH7017_TV_POWER_DOWN_EN);
+
+ if (enable) {
+ /* Turn on the LVDS */
+ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+ val & ~CH7017_LVDS_POWER_DOWN_EN);
+ } else {
+ /* Turn off the LVDS */
+ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+ val | CH7017_LVDS_POWER_DOWN_EN);
+ }
+
+ /* XXX: Should actually wait for update power status somehow */
+ msleep(20);
+}
+
+static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
+{
+ u8 val;
+
+ ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+ if (val & CH7017_LVDS_POWER_DOWN_EN)
+ return false;
+ else
+ return true;
+}
+
+static void ch7017_dump_regs(struct intel_dvo_device *dvo)
+{
+ u8 val;
+
+#define DUMP(reg) \
+do { \
+ ch7017_read(dvo, reg, &val); \
+ DRM_DEBUG_KMS(#reg ": %02x\n", val); \
+} while (0)
+
+ DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
+ DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT);
+ DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT);
+ DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT);
+ DUMP(CH7017_LVDS_PLL_VCO_CONTROL);
+ DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV);
+ DUMP(CH7017_LVDS_CONTROL_2);
+ DUMP(CH7017_OUTPUTS_ENABLE);
+ DUMP(CH7017_LVDS_POWER_DOWN);
+}
+
+static void ch7017_destroy(struct intel_dvo_device *dvo)
+{
+ struct ch7017_priv *priv = dvo->dev_priv;
+
+ if (priv) {
+ kfree(priv);
+ dvo->dev_priv = NULL;
+ }
+}
+
+const struct intel_dvo_dev_ops ch7017_ops = {
+ .init = ch7017_init,
+ .detect = ch7017_detect,
+ .mode_valid = ch7017_mode_valid,
+ .mode_set = ch7017_mode_set,
+ .dpms = ch7017_dpms,
+ .get_hw_state = ch7017_get_hw_state,
+ .dump_regs = ch7017_dump_regs,
+ .destroy = ch7017_destroy,
+};
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
new file mode 100644
index 000000000..54f58ba44
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -0,0 +1,367 @@
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "intel_display_types.h"
+#include "intel_dvo_dev.h"
+
+#define CH7xxx_REG_VID 0x4a
+#define CH7xxx_REG_DID 0x4b
+
+#define CH7011_VID 0x83 /* 7010 as well */
+#define CH7010B_VID 0x05
+#define CH7009A_VID 0x84
+#define CH7009B_VID 0x85
+#define CH7301_VID 0x95
+
+#define CH7xxx_VID 0x84
+#define CH7xxx_DID 0x17
+#define CH7010_DID 0x16
+
+#define CH7xxx_NUM_REGS 0x4c
+
+#define CH7xxx_CM 0x1c
+#define CH7xxx_CM_XCM (1<<0)
+#define CH7xxx_CM_MCP (1<<2)
+#define CH7xxx_INPUT_CLOCK 0x1d
+#define CH7xxx_GPIO 0x1e
+#define CH7xxx_GPIO_HPIR (1<<3)
+#define CH7xxx_IDF 0x1f
+
+#define CH7xxx_IDF_HSP (1<<3)
+#define CH7xxx_IDF_VSP (1<<4)
+
+#define CH7xxx_CONNECTION_DETECT 0x20
+#define CH7xxx_CDET_DVI (1<<5)
+
+#define CH7301_DAC_CNTL 0x21
+#define CH7301_HOTPLUG 0x23
+#define CH7xxx_TCTL 0x31
+#define CH7xxx_TVCO 0x32
+#define CH7xxx_TPCP 0x33
+#define CH7xxx_TPD 0x34
+#define CH7xxx_TPVT 0x35
+#define CH7xxx_TLPF 0x36
+#define CH7xxx_TCT 0x37
+#define CH7301_TEST_PATTERN 0x48
+
+#define CH7xxx_PM 0x49
+#define CH7xxx_PM_FPD (1<<0)
+#define CH7301_PM_DACPD0 (1<<1)
+#define CH7301_PM_DACPD1 (1<<2)
+#define CH7301_PM_DACPD2 (1<<3)
+#define CH7xxx_PM_DVIL (1<<6)
+#define CH7xxx_PM_DVIP (1<<7)
+
+#define CH7301_SYNC_POLARITY 0x56
+#define CH7301_SYNC_RGB_YUV (1<<0)
+#define CH7301_SYNC_POL_DVI (1<<5)
+
+/** @file
+ * driver for the Chrontel 7xxx DVI chip over DVO.
+ */
+
+static struct ch7xxx_id_struct {
+ u8 vid;
+ char *name;
+} ch7xxx_ids[] = {
+ { CH7011_VID, "CH7011" },
+ { CH7010B_VID, "CH7010B" },
+ { CH7009A_VID, "CH7009A" },
+ { CH7009B_VID, "CH7009B" },
+ { CH7301_VID, "CH7301" },
+};
+
+static struct ch7xxx_did_struct {
+ u8 did;
+ char *name;
+} ch7xxx_dids[] = {
+ { CH7xxx_DID, "CH7XXX" },
+ { CH7010_DID, "CH7010B" },
+};
+
+struct ch7xxx_priv {
+ bool quiet;
+};
+
+static char *ch7xxx_get_id(u8 vid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) {
+ if (ch7xxx_ids[i].vid == vid)
+ return ch7xxx_ids[i].name;
+ }
+
+ return NULL;
+}
+
+static char *ch7xxx_get_did(u8 did)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ch7xxx_dids); i++) {
+ if (ch7xxx_dids[i].did == did)
+ return ch7xxx_dids[i].name;
+ }
+
+ return NULL;
+}
+
+/** Reads an 8 bit register */
+static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
+{
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ }
+
+ if (!ch7xxx->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+ return false;
+}
+
+/** Writes an 8 bit register */
+static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
+{
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(adapter, &msg, 1) == 1)
+ return true;
+
+ if (!ch7xxx->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+static bool ch7xxx_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ /* this will detect the CH7xxx chip on the specified i2c bus */
+ struct ch7xxx_priv *ch7xxx;
+ u8 vendor, device;
+ char *name, *devid;
+
+ ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
+ if (ch7xxx == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = ch7xxx;
+ ch7xxx->quiet = true;
+
+ if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor))
+ goto out;
+
+ name = ch7xxx_get_id(vendor);
+ if (!name) {
+ DRM_DEBUG_KMS("ch7xxx not detected; got VID 0x%02x from %s slave %d.\n",
+ vendor, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+
+
+ if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
+ goto out;
+
+ devid = ch7xxx_get_did(device);
+ if (!devid) {
+ DRM_DEBUG_KMS("ch7xxx not detected; got DID 0x%02x from %s slave %d.\n",
+ device, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+
+ ch7xxx->quiet = false;
+ DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
+ name, vendor, device);
+ return true;
+out:
+ kfree(ch7xxx);
+ return false;
+}
+
+static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
+{
+ u8 cdet, orig_pm, pm;
+
+ ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
+
+ pm = orig_pm;
+ pm &= ~CH7xxx_PM_FPD;
+ pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP;
+
+ ch7xxx_writeb(dvo, CH7xxx_PM, pm);
+
+ ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet);
+
+ ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm);
+
+ if (cdet & CH7xxx_CDET_DVI)
+ return connector_status_connected;
+ return connector_status_disconnected;
+}
+
+static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ u8 tvco, tpcp, tpd, tlpf, idf;
+
+ if (mode->clock <= 65000) {
+ tvco = 0x23;
+ tpcp = 0x08;
+ tpd = 0x16;
+ tlpf = 0x60;
+ } else {
+ tvco = 0x2d;
+ tpcp = 0x06;
+ tpd = 0x26;
+ tlpf = 0xa0;
+ }
+
+ ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00);
+ ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco);
+ ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp);
+ ch7xxx_writeb(dvo, CH7xxx_TPD, tpd);
+ ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30);
+ ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf);
+ ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00);
+
+ ch7xxx_readb(dvo, CH7xxx_IDF, &idf);
+
+ idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP);
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ idf |= CH7xxx_IDF_HSP;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ idf |= CH7xxx_IDF_VSP;
+
+ ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
+}
+
+/* set the CH7xxx power state */
+static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ if (enable)
+ ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
+ else
+ ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
+}
+
+static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
+{
+ u8 val;
+
+ ch7xxx_readb(dvo, CH7xxx_PM, &val);
+
+ if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
+ return true;
+ else
+ return false;
+}
+
+static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
+{
+ int i;
+
+ for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+ u8 val;
+ if ((i % 8) == 0)
+ DRM_DEBUG_KMS("\n %02X: ", i);
+ ch7xxx_readb(dvo, i, &val);
+ DRM_DEBUG_KMS("%02X ", val);
+ }
+}
+
+static void ch7xxx_destroy(struct intel_dvo_device *dvo)
+{
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+
+ if (ch7xxx) {
+ kfree(ch7xxx);
+ dvo->dev_priv = NULL;
+ }
+}
+
+const struct intel_dvo_dev_ops ch7xxx_ops = {
+ .init = ch7xxx_init,
+ .detect = ch7xxx_detect,
+ .mode_valid = ch7xxx_mode_valid,
+ .mode_set = ch7xxx_mode_set,
+ .dpms = ch7xxx_dpms,
+ .get_hw_state = ch7xxx_get_hw_state,
+ .dump_regs = ch7xxx_dump_regs,
+ .destroy = ch7xxx_destroy,
+};
diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
new file mode 100644
index 000000000..f43d8c610
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Thomas Richter <thor@math.tu-berlin.de>
+ *
+ * Minor modifications (Dithering enable):
+ * Thomas Richter <thor@math.tu-berlin.de>
+ *
+ */
+
+#include "intel_display_types.h"
+#include "intel_dvo_dev.h"
+
+/*
+ * register definitions for the i82807aa.
+ *
+ * Documentation on this chipset can be found in datasheet #29069001 at
+ * intel.com.
+ */
+
+/*
+ * VCH Revision & GMBus Base Addr
+ */
+#define VR00 0x00
+# define VR00_BASE_ADDRESS_MASK 0x007f
+
+/*
+ * Functionality Enable
+ */
+#define VR01 0x01
+
+/*
+ * Enable the panel fitter
+ */
+# define VR01_PANEL_FIT_ENABLE (1 << 3)
+/*
+ * Enables the LCD display.
+ *
+ * This must not be set while VR01_DVO_BYPASS_ENABLE is set.
+ */
+# define VR01_LCD_ENABLE (1 << 2)
+/* Enables the DVO repeater. */
+# define VR01_DVO_BYPASS_ENABLE (1 << 1)
+/* Enables the DVO clock */
+# define VR01_DVO_ENABLE (1 << 0)
+/* Enable dithering for 18bpp panels. Not documented. */
+# define VR01_DITHER_ENABLE (1 << 4)
+
+/*
+ * LCD Interface Format
+ */
+#define VR10 0x10
+/* Enables LVDS output instead of CMOS */
+# define VR10_LVDS_ENABLE (1 << 4)
+/* Enables 18-bit LVDS output. */
+# define VR10_INTERFACE_1X18 (0 << 2)
+/* Enables 24-bit LVDS or CMOS output */
+# define VR10_INTERFACE_1X24 (1 << 2)
+/* Enables 2x18-bit LVDS or CMOS output. */
+# define VR10_INTERFACE_2X18 (2 << 2)
+/* Enables 2x24-bit LVDS output */
+# define VR10_INTERFACE_2X24 (3 << 2)
+/* Mask that defines the depth of the pipeline */
+# define VR10_INTERFACE_DEPTH_MASK (3 << 2)
+
+/*
+ * VR20 LCD Horizontal Display Size
+ */
+#define VR20 0x20
+
+/*
+ * LCD Vertical Display Size
+ */
+#define VR21 0x21
+
+/*
+ * Panel power down status
+ */
+#define VR30 0x30
+/* Read only bit indicating that the panel is not in a safe poweroff state. */
+# define VR30_PANEL_ON (1 << 15)
+
+#define VR40 0x40
+# define VR40_STALL_ENABLE (1 << 13)
+# define VR40_VERTICAL_INTERP_ENABLE (1 << 12)
+# define VR40_ENHANCED_PANEL_FITTING (1 << 11)
+# define VR40_HORIZONTAL_INTERP_ENABLE (1 << 10)
+# define VR40_AUTO_RATIO_ENABLE (1 << 9)
+# define VR40_CLOCK_GATING_ENABLE (1 << 8)
+
+/*
+ * Panel Fitting Vertical Ratio
+ * (((image_height - 1) << 16) / ((panel_height - 1))) >> 2
+ */
+#define VR41 0x41
+
+/*
+ * Panel Fitting Horizontal Ratio
+ * (((image_width - 1) << 16) / ((panel_width - 1))) >> 2
+ */
+#define VR42 0x42
+
+/*
+ * Horizontal Image Size
+ */
+#define VR43 0x43
+
+/* VR80 GPIO 0
+ */
+#define VR80 0x80
+#define VR81 0x81
+#define VR82 0x82
+#define VR83 0x83
+#define VR84 0x84
+#define VR85 0x85
+#define VR86 0x86
+#define VR87 0x87
+
+/* VR88 GPIO 8
+ */
+#define VR88 0x88
+
+/* Graphics BIOS scratch 0
+ */
+#define VR8E 0x8E
+# define VR8E_PANEL_TYPE_MASK (0xf << 0)
+# define VR8E_PANEL_INTERFACE_CMOS (0 << 4)
+# define VR8E_PANEL_INTERFACE_LVDS (1 << 4)
+# define VR8E_FORCE_DEFAULT_PANEL (1 << 5)
+
+/* Graphics BIOS scratch 1
+ */
+#define VR8F 0x8F
+# define VR8F_VCH_PRESENT (1 << 0)
+# define VR8F_DISPLAY_CONN (1 << 1)
+# define VR8F_POWER_MASK (0x3c)
+# define VR8F_POWER_POS (2)
+
+/* Some Bios implementations do not restore the DVO state upon
+ * resume from standby. Thus, this driver has to handle it
+ * instead. The following list contains all registers that
+ * require saving.
+ */
+static const u16 backup_addresses[] = {
+ 0x11, 0x12,
+ 0x18, 0x19, 0x1a, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x8e, 0x8f,
+ 0x10 /* this must come last */
+};
+
+
+struct ivch_priv {
+ bool quiet;
+
+ u16 width, height;
+
+ /* Register backup */
+
+ u16 reg_backup[ARRAY_SIZE(backup_addresses)];
+};
+
+
+static void ivch_dump_regs(struct intel_dvo_device *dvo);
+/*
+ * Reads a register on the ivch.
+ *
+ * Each of the 256 registers are 16 bits long.
+ */
+static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[1];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 0,
+ },
+ {
+ .addr = 0,
+ .flags = I2C_M_NOSTART,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD | I2C_M_NOSTART,
+ .len = 2,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+
+ if (i2c_transfer(adapter, msgs, 3) == 3) {
+ *data = (in_buf[1] << 8) | in_buf[0];
+ return true;
+ }
+
+ if (!priv->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from "
+ "%s:%02x.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+ return false;
+}
+
+/* Writes a 16-bit register on the ivch */
+static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[3];
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 3,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = data & 0xff;
+ out_buf[2] = data >> 8;
+
+ if (i2c_transfer(adapter, &msg, 1) == 1)
+ return true;
+
+ if (!priv->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/* Probes the given bus and slave address for an ivch */
+static bool ivch_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ struct ivch_priv *priv;
+ u16 temp;
+ int i;
+
+ priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = priv;
+ priv->quiet = true;
+
+ if (!ivch_read(dvo, VR00, &temp))
+ goto out;
+ priv->quiet = false;
+
+ /* Since the identification bits are probably zeroes, which doesn't seem
+ * very unique, check that the value in the base address field matches
+ * the address it's responding on.
+ */
+ if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
+ DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
+ "(%d vs %d)\n",
+ (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
+ goto out;
+ }
+
+ ivch_read(dvo, VR20, &priv->width);
+ ivch_read(dvo, VR21, &priv->height);
+
+ /* Make a backup of the registers to be able to restore them
+ * upon suspend.
+ */
+ for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+ ivch_read(dvo, backup_addresses[i], priv->reg_backup + i);
+
+ ivch_dump_regs(dvo);
+
+ return true;
+
+out:
+ kfree(priv);
+ return false;
+}
+
+static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo)
+{
+ return connector_status_connected;
+}
+
+static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 112000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+/* Restore the DVO registers after a resume
+ * from RAM. Registers have been saved during
+ * the initialization.
+ */
+static void ivch_reset(struct intel_dvo_device *dvo)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+ int i;
+
+ DRM_DEBUG_KMS("Resetting the IVCH registers\n");
+
+ ivch_write(dvo, VR10, 0x0000);
+
+ for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+ ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]);
+}
+
+/* Sets the power state of the panel connected to the ivch */
+static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ int i;
+ u16 vr01, vr30, backlight;
+
+ ivch_reset(dvo);
+
+ /* Set the new power state of the panel. */
+ if (!ivch_read(dvo, VR01, &vr01))
+ return;
+
+ if (enable)
+ backlight = 1;
+ else
+ backlight = 0;
+
+ ivch_write(dvo, VR80, backlight);
+
+ if (enable)
+ vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
+ else
+ vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
+
+ ivch_write(dvo, VR01, vr01);
+
+ /* Wait for the panel to make its state transition */
+ for (i = 0; i < 100; i++) {
+ if (!ivch_read(dvo, VR30, &vr30))
+ break;
+
+ if (((vr30 & VR30_PANEL_ON) != 0) == enable)
+ break;
+ udelay(1000);
+ }
+ /* wait some more; vch may fail to resync sometimes without this */
+ udelay(16 * 1000);
+}
+
+static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
+{
+ u16 vr01;
+
+ ivch_reset(dvo);
+
+ /* Set the new power state of the panel. */
+ if (!ivch_read(dvo, VR01, &vr01))
+ return false;
+
+ if (vr01 & VR01_LCD_ENABLE)
+ return true;
+ else
+ return false;
+}
+
+static void ivch_mode_set(struct intel_dvo_device *dvo,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+ u16 vr40 = 0;
+ u16 vr01 = 0;
+ u16 vr10;
+
+ ivch_reset(dvo);
+
+ vr10 = priv->reg_backup[ARRAY_SIZE(backup_addresses) - 1];
+
+ /* Enable dithering for 18 bpp pipelines */
+ vr10 &= VR10_INTERFACE_DEPTH_MASK;
+ if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18)
+ vr01 = VR01_DITHER_ENABLE;
+
+ vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
+ VR40_HORIZONTAL_INTERP_ENABLE);
+
+ if (mode->hdisplay != adjusted_mode->crtc_hdisplay ||
+ mode->vdisplay != adjusted_mode->crtc_vdisplay) {
+ u16 x_ratio, y_ratio;
+
+ vr01 |= VR01_PANEL_FIT_ENABLE;
+ vr40 |= VR40_CLOCK_GATING_ENABLE;
+ x_ratio = (((mode->hdisplay - 1) << 16) /
+ (adjusted_mode->crtc_hdisplay - 1)) >> 2;
+ y_ratio = (((mode->vdisplay - 1) << 16) /
+ (adjusted_mode->crtc_vdisplay - 1)) >> 2;
+ ivch_write(dvo, VR42, x_ratio);
+ ivch_write(dvo, VR41, y_ratio);
+ } else {
+ vr01 &= ~VR01_PANEL_FIT_ENABLE;
+ vr40 &= ~VR40_CLOCK_GATING_ENABLE;
+ }
+ vr40 &= ~VR40_AUTO_RATIO_ENABLE;
+
+ ivch_write(dvo, VR01, vr01);
+ ivch_write(dvo, VR40, vr40);
+}
+
+static void ivch_dump_regs(struct intel_dvo_device *dvo)
+{
+ u16 val;
+
+ ivch_read(dvo, VR00, &val);
+ DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
+ ivch_read(dvo, VR01, &val);
+ DRM_DEBUG_KMS("VR01: 0x%04x\n", val);
+ ivch_read(dvo, VR10, &val);
+ DRM_DEBUG_KMS("VR10: 0x%04x\n", val);
+ ivch_read(dvo, VR30, &val);
+ DRM_DEBUG_KMS("VR30: 0x%04x\n", val);
+ ivch_read(dvo, VR40, &val);
+ DRM_DEBUG_KMS("VR40: 0x%04x\n", val);
+
+ /* GPIO registers */
+ ivch_read(dvo, VR80, &val);
+ DRM_DEBUG_KMS("VR80: 0x%04x\n", val);
+ ivch_read(dvo, VR81, &val);
+ DRM_DEBUG_KMS("VR81: 0x%04x\n", val);
+ ivch_read(dvo, VR82, &val);
+ DRM_DEBUG_KMS("VR82: 0x%04x\n", val);
+ ivch_read(dvo, VR83, &val);
+ DRM_DEBUG_KMS("VR83: 0x%04x\n", val);
+ ivch_read(dvo, VR84, &val);
+ DRM_DEBUG_KMS("VR84: 0x%04x\n", val);
+ ivch_read(dvo, VR85, &val);
+ DRM_DEBUG_KMS("VR85: 0x%04x\n", val);
+ ivch_read(dvo, VR86, &val);
+ DRM_DEBUG_KMS("VR86: 0x%04x\n", val);
+ ivch_read(dvo, VR87, &val);
+ DRM_DEBUG_KMS("VR87: 0x%04x\n", val);
+ ivch_read(dvo, VR88, &val);
+ DRM_DEBUG_KMS("VR88: 0x%04x\n", val);
+
+ /* Scratch register 0 - AIM Panel type */
+ ivch_read(dvo, VR8E, &val);
+ DRM_DEBUG_KMS("VR8E: 0x%04x\n", val);
+
+ /* Scratch register 1 - Status register */
+ ivch_read(dvo, VR8F, &val);
+ DRM_DEBUG_KMS("VR8F: 0x%04x\n", val);
+}
+
+static void ivch_destroy(struct intel_dvo_device *dvo)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+
+ if (priv) {
+ kfree(priv);
+ dvo->dev_priv = NULL;
+ }
+}
+
+const struct intel_dvo_dev_ops ivch_ops = {
+ .init = ivch_init,
+ .dpms = ivch_dpms,
+ .get_hw_state = ivch_get_hw_state,
+ .mode_valid = ivch_mode_valid,
+ .mode_set = ivch_mode_set,
+ .detect = ivch_detect,
+ .dump_regs = ivch_dump_regs,
+ .destroy = ivch_destroy,
+};
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
new file mode 100644
index 000000000..a724a8755
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -0,0 +1,710 @@
+/*
+ *
+ * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_display_types.h"
+#include "intel_dvo_dev.h"
+
+#define NS2501_VID 0x1305
+#define NS2501_DID 0x6726
+
+#define NS2501_VID_LO 0x00
+#define NS2501_VID_HI 0x01
+#define NS2501_DID_LO 0x02
+#define NS2501_DID_HI 0x03
+#define NS2501_REV 0x04
+#define NS2501_RSVD 0x05
+#define NS2501_FREQ_LO 0x06
+#define NS2501_FREQ_HI 0x07
+
+#define NS2501_REG8 0x08
+#define NS2501_8_VEN (1<<5)
+#define NS2501_8_HEN (1<<4)
+#define NS2501_8_DSEL (1<<3)
+#define NS2501_8_BPAS (1<<2)
+#define NS2501_8_RSVD (1<<1)
+#define NS2501_8_PD (1<<0)
+
+#define NS2501_REG9 0x09
+#define NS2501_9_VLOW (1<<7)
+#define NS2501_9_MSEL_MASK (0x7<<4)
+#define NS2501_9_TSEL (1<<3)
+#define NS2501_9_RSEN (1<<2)
+#define NS2501_9_RSVD (1<<1)
+#define NS2501_9_MDI (1<<0)
+
+#define NS2501_REGC 0x0c
+
+/*
+ * The following registers are not part of the official datasheet
+ * and are the result of reverse engineering.
+ */
+
+/*
+ * Register c0 controls how the DVO synchronizes with
+ * its input.
+ */
+#define NS2501_REGC0 0xc0
+#define NS2501_C0_ENABLE (1<<0) /* enable the DVO sync in general */
+#define NS2501_C0_HSYNC (1<<1) /* synchronize horizontal with input */
+#define NS2501_C0_VSYNC (1<<2) /* synchronize vertical with input */
+#define NS2501_C0_RESET (1<<7) /* reset the synchronization flip/flops */
+
+/*
+ * Register 41 is somehow related to the sync register and sync
+ * configuration. It should be 0x32 whenever regC0 is 0x05 (hsync off)
+ * and 0x00 otherwise.
+ */
+#define NS2501_REG41 0x41
+
+/*
+ * this register controls the dithering of the DVO
+ * One bit enables it, the other define the dithering depth.
+ * The higher the value, the lower the dithering depth.
+ */
+#define NS2501_F9_REG 0xf9
+#define NS2501_F9_ENABLE (1<<0) /* if set, dithering is enabled */
+#define NS2501_F9_DITHER_MASK (0x7f<<1) /* controls the dither depth */
+#define NS2501_F9_DITHER_SHIFT 1 /* shifts the dither mask */
+
+/*
+ * PLL configuration register. This is a pair of registers,
+ * one single byte register at 1B, and a pair at 1C,1D.
+ * These registers are counters/dividers.
+ */
+#define NS2501_REG1B 0x1b /* one byte PLL control register */
+#define NS2501_REG1C 0x1c /* low-part of the second register */
+#define NS2501_REG1D 0x1d /* high-part of the second register */
+
+/*
+ * Scaler control registers. Horizontal at b8,b9,
+ * vertical at 10,11. The scale factor is computed as
+ * 2^16/control-value. The low-byte comes first.
+ */
+#define NS2501_REG10 0x10 /* low-byte vertical scaler */
+#define NS2501_REG11 0x11 /* high-byte vertical scaler */
+#define NS2501_REGB8 0xb8 /* low-byte horizontal scaler */
+#define NS2501_REGB9 0xb9 /* high-byte horizontal scaler */
+
+/*
+ * Display window definition. This consists of four registers
+ * per dimension. One register pair defines the start of the
+ * display, one the end.
+ * As far as I understand, this defines the window within which
+ * the scaler samples the input.
+ */
+#define NS2501_REGC1 0xc1 /* low-byte horizontal display start */
+#define NS2501_REGC2 0xc2 /* high-byte horizontal display start */
+#define NS2501_REGC3 0xc3 /* low-byte horizontal display stop */
+#define NS2501_REGC4 0xc4 /* high-byte horizontal display stop */
+#define NS2501_REGC5 0xc5 /* low-byte vertical display start */
+#define NS2501_REGC6 0xc6 /* high-byte vertical display start */
+#define NS2501_REGC7 0xc7 /* low-byte vertical display stop */
+#define NS2501_REGC8 0xc8 /* high-byte vertical display stop */
+
+/*
+ * The following register pair seems to define the start of
+ * the vertical sync. If automatic syncing is enabled, and the
+ * register value defines a sync pulse that is later than the
+ * incoming sync, then the register value is ignored and the
+ * external hsync triggers the synchronization.
+ */
+#define NS2501_REG80 0x80 /* low-byte vsync-start */
+#define NS2501_REG81 0x81 /* high-byte vsync-start */
+
+/*
+ * The following register pair seems to define the total number
+ * of lines created at the output side of the scaler.
+ * This is again a low-high register pair.
+ */
+#define NS2501_REG82 0x82 /* output display height, low byte */
+#define NS2501_REG83 0x83 /* output display height, high byte */
+
+/*
+ * The following registers define the end of the front-porch
+ * in horizontal and vertical position and hence allow to shift
+ * the image left/right or up/down.
+ */
+#define NS2501_REG98 0x98 /* horizontal start of display + 256, low */
+#define NS2501_REG99 0x99 /* horizontal start of display + 256, high */
+#define NS2501_REG8E 0x8e /* vertical start of the display, low byte */
+#define NS2501_REG8F 0x8f /* vertical start of the display, high byte */
+
+/*
+ * The following register pair control the function of the
+ * backlight and the DVO output. To enable the corresponding
+ * function, the corresponding bit must be set in both registers.
+ */
+#define NS2501_REG34 0x34 /* DVO enable functions, first register */
+#define NS2501_REG35 0x35 /* DVO enable functions, second register */
+#define NS2501_34_ENABLE_OUTPUT (1<<0) /* enable DVO output */
+#define NS2501_34_ENABLE_BACKLIGHT (1<<1) /* enable backlight */
+
+/*
+ * Registers 9C and 9D define the vertical output offset
+ * of the visible region.
+ */
+#define NS2501_REG9C 0x9c
+#define NS2501_REG9D 0x9d
+
+/*
+ * The register 9F defines the dithering. This requires the
+ * scaler to be ON. Bit 0 enables dithering, the remaining
+ * bits control the depth of the dither. The higher the value,
+ * the LOWER the dithering amplitude. A good value seems to be
+ * 15 (total register value).
+ */
+#define NS2501_REGF9 0xf9
+#define NS2501_F9_ENABLE_DITHER (1<<0) /* enable dithering */
+#define NS2501_F9_DITHER_MASK (0x7f<<1) /* dither masking */
+#define NS2501_F9_DITHER_SHIFT 1 /* upshift of the dither mask */
+
+enum {
+ MODE_640x480,
+ MODE_800x600,
+ MODE_1024x768,
+};
+
+struct ns2501_reg {
+ u8 offset;
+ u8 value;
+};
+
+/*
+ * The following structure keeps the complete configuration of
+ * the DVO, given a specific output configuration.
+ * This is pretty much guess-work from reverse-engineering, so
+ * read all this with a grain of salt.
+ */
+struct ns2501_configuration {
+ u8 sync; /* configuration of the C0 register */
+ u8 conf; /* configuration register 8 */
+ u8 syncb; /* configuration register 41 */
+ u8 dither; /* configuration of the dithering */
+ u8 pll_a; /* PLL configuration, register A, 1B */
+ u16 pll_b; /* PLL configuration, register B, 1C/1D */
+ u16 hstart; /* horizontal start, registers C1/C2 */
+ u16 hstop; /* horizontal total, registers C3/C4 */
+ u16 vstart; /* vertical start, registers C5/C6 */
+ u16 vstop; /* vertical total, registers C7/C8 */
+ u16 vsync; /* manual vertical sync start, 80/81 */
+ u16 vtotal; /* number of lines generated, 82/83 */
+ u16 hpos; /* horizontal position + 256, 98/99 */
+ u16 vpos; /* vertical position, 8e/8f */
+ u16 voffs; /* vertical output offset, 9c/9d */
+ u16 hscale; /* horizontal scaling factor, b8/b9 */
+ u16 vscale; /* vertical scaling factor, 10/11 */
+};
+
+/*
+ * DVO configuration values, partially based on what the BIOS
+ * of the Fujitsu Lifebook S6010 writes into registers,
+ * partially found by manual tweaking. These configurations assume
+ * a 1024x768 panel.
+ */
+static const struct ns2501_configuration ns2501_modes[] = {
+ [MODE_640x480] = {
+ .sync = NS2501_C0_ENABLE | NS2501_C0_VSYNC,
+ .conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD,
+ .syncb = 0x32,
+ .dither = 0x0f,
+ .pll_a = 17,
+ .pll_b = 852,
+ .hstart = 144,
+ .hstop = 783,
+ .vstart = 22,
+ .vstop = 514,
+ .vsync = 2047, /* actually, ignored with this config */
+ .vtotal = 1341,
+ .hpos = 0,
+ .vpos = 16,
+ .voffs = 36,
+ .hscale = 40960,
+ .vscale = 40960
+ },
+ [MODE_800x600] = {
+ .sync = NS2501_C0_ENABLE |
+ NS2501_C0_HSYNC | NS2501_C0_VSYNC,
+ .conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD,
+ .syncb = 0x00,
+ .dither = 0x0f,
+ .pll_a = 25,
+ .pll_b = 612,
+ .hstart = 215,
+ .hstop = 1016,
+ .vstart = 26,
+ .vstop = 627,
+ .vsync = 807,
+ .vtotal = 1341,
+ .hpos = 0,
+ .vpos = 4,
+ .voffs = 35,
+ .hscale = 51248,
+ .vscale = 51232
+ },
+ [MODE_1024x768] = {
+ .sync = NS2501_C0_ENABLE | NS2501_C0_VSYNC,
+ .conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD,
+ .syncb = 0x32,
+ .dither = 0x0f,
+ .pll_a = 11,
+ .pll_b = 1350,
+ .hstart = 276,
+ .hstop = 1299,
+ .vstart = 15,
+ .vstop = 1056,
+ .vsync = 2047,
+ .vtotal = 1341,
+ .hpos = 0,
+ .vpos = 7,
+ .voffs = 27,
+ .hscale = 65535,
+ .vscale = 65535
+ }
+};
+
+/*
+ * Other configuration values left by the BIOS of the
+ * Fujitsu S6010 in the DVO control registers. Their
+ * value does not depend on the BIOS and their meaning
+ * is unknown.
+ */
+
+static const struct ns2501_reg mode_agnostic_values[] = {
+ /* 08 is mode specific */
+ [0] = { .offset = 0x0a, .value = 0x81, },
+ /* 10,11 are part of the mode specific configuration */
+ [1] = { .offset = 0x12, .value = 0x02, },
+ [2] = { .offset = 0x18, .value = 0x07, },
+ [3] = { .offset = 0x19, .value = 0x00, },
+ [4] = { .offset = 0x1a, .value = 0x00, }, /* PLL?, ignored */
+ /* 1b,1c,1d are part of the mode specific configuration */
+ [5] = { .offset = 0x1e, .value = 0x02, },
+ [6] = { .offset = 0x1f, .value = 0x40, },
+ [7] = { .offset = 0x20, .value = 0x00, },
+ [8] = { .offset = 0x21, .value = 0x00, },
+ [9] = { .offset = 0x22, .value = 0x00, },
+ [10] = { .offset = 0x23, .value = 0x00, },
+ [11] = { .offset = 0x24, .value = 0x00, },
+ [12] = { .offset = 0x25, .value = 0x00, },
+ [13] = { .offset = 0x26, .value = 0x00, },
+ [14] = { .offset = 0x27, .value = 0x00, },
+ [15] = { .offset = 0x7e, .value = 0x18, },
+ /* 80-84 are part of the mode-specific configuration */
+ [16] = { .offset = 0x84, .value = 0x00, },
+ [17] = { .offset = 0x85, .value = 0x00, },
+ [18] = { .offset = 0x86, .value = 0x00, },
+ [19] = { .offset = 0x87, .value = 0x00, },
+ [20] = { .offset = 0x88, .value = 0x00, },
+ [21] = { .offset = 0x89, .value = 0x00, },
+ [22] = { .offset = 0x8a, .value = 0x00, },
+ [23] = { .offset = 0x8b, .value = 0x00, },
+ [24] = { .offset = 0x8c, .value = 0x10, },
+ [25] = { .offset = 0x8d, .value = 0x02, },
+ /* 8e,8f are part of the mode-specific configuration */
+ [26] = { .offset = 0x90, .value = 0xff, },
+ [27] = { .offset = 0x91, .value = 0x07, },
+ [28] = { .offset = 0x92, .value = 0xa0, },
+ [29] = { .offset = 0x93, .value = 0x02, },
+ [30] = { .offset = 0x94, .value = 0x00, },
+ [31] = { .offset = 0x95, .value = 0x00, },
+ [32] = { .offset = 0x96, .value = 0x05, },
+ [33] = { .offset = 0x97, .value = 0x00, },
+ /* 98,99 are part of the mode-specific configuration */
+ [34] = { .offset = 0x9a, .value = 0x88, },
+ [35] = { .offset = 0x9b, .value = 0x00, },
+ /* 9c,9d are part of the mode-specific configuration */
+ [36] = { .offset = 0x9e, .value = 0x25, },
+ [37] = { .offset = 0x9f, .value = 0x03, },
+ [38] = { .offset = 0xa0, .value = 0x28, },
+ [39] = { .offset = 0xa1, .value = 0x01, },
+ [40] = { .offset = 0xa2, .value = 0x28, },
+ [41] = { .offset = 0xa3, .value = 0x05, },
+ /* register 0xa4 is mode specific, but 0x80..0x84 works always */
+ [42] = { .offset = 0xa4, .value = 0x84, },
+ [43] = { .offset = 0xa5, .value = 0x00, },
+ [44] = { .offset = 0xa6, .value = 0x00, },
+ [45] = { .offset = 0xa7, .value = 0x00, },
+ [46] = { .offset = 0xa8, .value = 0x00, },
+ /* 0xa9 to 0xab are mode specific, but have no visible effect */
+ [47] = { .offset = 0xa9, .value = 0x04, },
+ [48] = { .offset = 0xaa, .value = 0x70, },
+ [49] = { .offset = 0xab, .value = 0x4f, },
+ [50] = { .offset = 0xac, .value = 0x00, },
+ [51] = { .offset = 0xad, .value = 0x00, },
+ [52] = { .offset = 0xb6, .value = 0x09, },
+ [53] = { .offset = 0xb7, .value = 0x03, },
+ /* b8,b9 are part of the mode-specific configuration */
+ [54] = { .offset = 0xba, .value = 0x00, },
+ [55] = { .offset = 0xbb, .value = 0x20, },
+ [56] = { .offset = 0xf3, .value = 0x90, },
+ [57] = { .offset = 0xf4, .value = 0x00, },
+ [58] = { .offset = 0xf7, .value = 0x88, },
+ /* f8 is mode specific, but the value does not matter */
+ [59] = { .offset = 0xf8, .value = 0x0a, },
+ [60] = { .offset = 0xf9, .value = 0x00, }
+};
+
+static const struct ns2501_reg regs_init[] = {
+ [0] = { .offset = 0x35, .value = 0xff, },
+ [1] = { .offset = 0x34, .value = 0x00, },
+ [2] = { .offset = 0x08, .value = 0x30, },
+};
+
+struct ns2501_priv {
+ bool quiet;
+ const struct ns2501_configuration *conf;
+};
+
+#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
+
+/*
+** Read a register from the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ }
+
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS
+ ("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
+ adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/*
+** Write a register to the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(adapter, &msg, 1) == 1) {
+ return true;
+ }
+
+ if (!ns->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/* National Semiconductor 2501 driver for chip on i2c bus
+ * scan for the chip on the bus.
+ * Hope the VBIOS initialized the PLL correctly so we can
+ * talk to it. If not, it will not be seen and not detected.
+ * Bummer!
+ */
+static bool ns2501_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ /* this will detect the NS2501 chip on the specified i2c bus */
+ struct ns2501_priv *ns;
+ unsigned char ch;
+
+ ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
+ if (ns == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = ns;
+ ns->quiet = true;
+
+ if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_VID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+
+ if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
+ goto out;
+
+ if (ch != (NS2501_DID & 0xff)) {
+ DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+ ns->quiet = false;
+
+ DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
+
+ return true;
+
+out:
+ kfree(ns);
+ return false;
+}
+
+static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
+{
+ /*
+ * This is a Laptop display, it doesn't have hotplugging.
+ * Even if not, the detection bit of the 2501 is unreliable as
+ * it only works for some display types.
+ * It is even more unreliable as the PLL must be active for
+ * allowing reading from the chiop.
+ */
+ return connector_status_connected;
+}
+
+static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ DRM_DEBUG_KMS
+ ("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
+ mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
+
+ /*
+ * Currently, these are all the modes I have data from.
+ * More might exist. Unclear how to find the native resolution
+ * of the panel in here so we could always accept it
+ * by disabling the scaler.
+ */
+ if ((mode->hdisplay == 640 && mode->vdisplay == 480 && mode->clock == 25175) ||
+ (mode->hdisplay == 800 && mode->vdisplay == 600 && mode->clock == 40000) ||
+ (mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 65000)) {
+ return MODE_OK;
+ } else {
+ return MODE_ONE_SIZE; /* Is this a reasonable error? */
+ }
+}
+
+static void ns2501_mode_set(struct intel_dvo_device *dvo,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ const struct ns2501_configuration *conf;
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+ int mode_idx, i;
+
+ DRM_DEBUG_KMS
+ ("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
+ mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
+
+ DRM_DEBUG_KMS("Detailed requested mode settings are:\n"
+ "clock : %d kHz\n"
+ "hdisplay : %d\n"
+ "hblank start : %d\n"
+ "hblank end : %d\n"
+ "hsync start : %d\n"
+ "hsync end : %d\n"
+ "htotal : %d\n"
+ "hskew : %d\n"
+ "vdisplay : %d\n"
+ "vblank start : %d\n"
+ "hblank end : %d\n"
+ "vsync start : %d\n"
+ "vsync end : %d\n"
+ "vtotal : %d\n",
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay,
+ adjusted_mode->crtc_hblank_start,
+ adjusted_mode->crtc_hblank_end,
+ adjusted_mode->crtc_hsync_start,
+ adjusted_mode->crtc_hsync_end,
+ adjusted_mode->crtc_htotal,
+ adjusted_mode->crtc_hskew,
+ adjusted_mode->crtc_vdisplay,
+ adjusted_mode->crtc_vblank_start,
+ adjusted_mode->crtc_vblank_end,
+ adjusted_mode->crtc_vsync_start,
+ adjusted_mode->crtc_vsync_end,
+ adjusted_mode->crtc_vtotal);
+
+ if (mode->hdisplay == 640 && mode->vdisplay == 480)
+ mode_idx = MODE_640x480;
+ else if (mode->hdisplay == 800 && mode->vdisplay == 600)
+ mode_idx = MODE_800x600;
+ else if (mode->hdisplay == 1024 && mode->vdisplay == 768)
+ mode_idx = MODE_1024x768;
+ else
+ return;
+
+ /* Hopefully doing it every time won't hurt... */
+ for (i = 0; i < ARRAY_SIZE(regs_init); i++)
+ ns2501_writeb(dvo, regs_init[i].offset, regs_init[i].value);
+
+ /* Write the mode-agnostic values */
+ for (i = 0; i < ARRAY_SIZE(mode_agnostic_values); i++)
+ ns2501_writeb(dvo, mode_agnostic_values[i].offset,
+ mode_agnostic_values[i].value);
+
+ /* Write now the mode-specific configuration */
+ conf = ns2501_modes + mode_idx;
+ ns->conf = conf;
+
+ ns2501_writeb(dvo, NS2501_REG8, conf->conf);
+ ns2501_writeb(dvo, NS2501_REG1B, conf->pll_a);
+ ns2501_writeb(dvo, NS2501_REG1C, conf->pll_b & 0xff);
+ ns2501_writeb(dvo, NS2501_REG1D, conf->pll_b >> 8);
+ ns2501_writeb(dvo, NS2501_REGC1, conf->hstart & 0xff);
+ ns2501_writeb(dvo, NS2501_REGC2, conf->hstart >> 8);
+ ns2501_writeb(dvo, NS2501_REGC3, conf->hstop & 0xff);
+ ns2501_writeb(dvo, NS2501_REGC4, conf->hstop >> 8);
+ ns2501_writeb(dvo, NS2501_REGC5, conf->vstart & 0xff);
+ ns2501_writeb(dvo, NS2501_REGC6, conf->vstart >> 8);
+ ns2501_writeb(dvo, NS2501_REGC7, conf->vstop & 0xff);
+ ns2501_writeb(dvo, NS2501_REGC8, conf->vstop >> 8);
+ ns2501_writeb(dvo, NS2501_REG80, conf->vsync & 0xff);
+ ns2501_writeb(dvo, NS2501_REG81, conf->vsync >> 8);
+ ns2501_writeb(dvo, NS2501_REG82, conf->vtotal & 0xff);
+ ns2501_writeb(dvo, NS2501_REG83, conf->vtotal >> 8);
+ ns2501_writeb(dvo, NS2501_REG98, conf->hpos & 0xff);
+ ns2501_writeb(dvo, NS2501_REG99, conf->hpos >> 8);
+ ns2501_writeb(dvo, NS2501_REG8E, conf->vpos & 0xff);
+ ns2501_writeb(dvo, NS2501_REG8F, conf->vpos >> 8);
+ ns2501_writeb(dvo, NS2501_REG9C, conf->voffs & 0xff);
+ ns2501_writeb(dvo, NS2501_REG9D, conf->voffs >> 8);
+ ns2501_writeb(dvo, NS2501_REGB8, conf->hscale & 0xff);
+ ns2501_writeb(dvo, NS2501_REGB9, conf->hscale >> 8);
+ ns2501_writeb(dvo, NS2501_REG10, conf->vscale & 0xff);
+ ns2501_writeb(dvo, NS2501_REG11, conf->vscale >> 8);
+ ns2501_writeb(dvo, NS2501_REGF9, conf->dither);
+ ns2501_writeb(dvo, NS2501_REG41, conf->syncb);
+ ns2501_writeb(dvo, NS2501_REGC0, conf->sync);
+}
+
+/* set the NS2501 power state */
+static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
+{
+ unsigned char ch;
+
+ if (!ns2501_readb(dvo, NS2501_REG8, &ch))
+ return false;
+
+ return ch & NS2501_8_PD;
+}
+
+/* set the NS2501 power state */
+static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+ DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
+
+ if (enable) {
+ ns2501_writeb(dvo, NS2501_REGC0, ns->conf->sync | 0x08);
+
+ ns2501_writeb(dvo, NS2501_REG41, ns->conf->syncb);
+
+ ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT);
+ msleep(15);
+
+ ns2501_writeb(dvo, NS2501_REG8,
+ ns->conf->conf | NS2501_8_BPAS);
+ if (!(ns->conf->conf & NS2501_8_BPAS))
+ ns2501_writeb(dvo, NS2501_REG8, ns->conf->conf);
+ msleep(200);
+
+ ns2501_writeb(dvo, NS2501_REG34,
+ NS2501_34_ENABLE_OUTPUT | NS2501_34_ENABLE_BACKLIGHT);
+
+ ns2501_writeb(dvo, NS2501_REGC0, ns->conf->sync);
+ } else {
+ ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT);
+ msleep(200);
+
+ ns2501_writeb(dvo, NS2501_REG8, NS2501_8_VEN | NS2501_8_HEN |
+ NS2501_8_BPAS);
+ msleep(15);
+
+ ns2501_writeb(dvo, NS2501_REG34, 0x00);
+ }
+}
+
+static void ns2501_destroy(struct intel_dvo_device *dvo)
+{
+ struct ns2501_priv *ns = dvo->dev_priv;
+
+ if (ns) {
+ kfree(ns);
+ dvo->dev_priv = NULL;
+ }
+}
+
+const struct intel_dvo_dev_ops ns2501_ops = {
+ .init = ns2501_init,
+ .detect = ns2501_detect,
+ .mode_valid = ns2501_mode_valid,
+ .mode_set = ns2501_mode_set,
+ .dpms = ns2501_dpms,
+ .get_hw_state = ns2501_get_hw_state,
+ .destroy = ns2501_destroy,
+};
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
new file mode 100644
index 000000000..0dfa0a020
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -0,0 +1,280 @@
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "intel_display_types.h"
+#include "intel_dvo_dev.h"
+
+#define SIL164_VID 0x0001
+#define SIL164_DID 0x0006
+
+#define SIL164_VID_LO 0x00
+#define SIL164_VID_HI 0x01
+#define SIL164_DID_LO 0x02
+#define SIL164_DID_HI 0x03
+#define SIL164_REV 0x04
+#define SIL164_RSVD 0x05
+#define SIL164_FREQ_LO 0x06
+#define SIL164_FREQ_HI 0x07
+
+#define SIL164_REG8 0x08
+#define SIL164_8_VEN (1<<5)
+#define SIL164_8_HEN (1<<4)
+#define SIL164_8_DSEL (1<<3)
+#define SIL164_8_BSEL (1<<2)
+#define SIL164_8_EDGE (1<<1)
+#define SIL164_8_PD (1<<0)
+
+#define SIL164_REG9 0x09
+#define SIL164_9_VLOW (1<<7)
+#define SIL164_9_MSEL_MASK (0x7<<4)
+#define SIL164_9_TSEL (1<<3)
+#define SIL164_9_RSEN (1<<2)
+#define SIL164_9_HTPLG (1<<1)
+#define SIL164_9_MDI (1<<0)
+
+#define SIL164_REGC 0x0c
+
+struct sil164_priv {
+ //I2CDevRec d;
+ bool quiet;
+};
+
+#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
+
+static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
+{
+ struct sil164_priv *sil = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ }
+
+ if (!sil->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+ return false;
+}
+
+static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
+{
+ struct sil164_priv *sil = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(adapter, &msg, 1) == 1)
+ return true;
+
+ if (!sil->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+/* Silicon Image 164 driver for chip on i2c bus */
+static bool sil164_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ /* this will detect the SIL164 chip on the specified i2c bus */
+ struct sil164_priv *sil;
+ unsigned char ch;
+
+ sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL);
+ if (sil == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = sil;
+ sil->quiet = true;
+
+ if (!sil164_readb(dvo, SIL164_VID_LO, &ch))
+ goto out;
+
+ if (ch != (SIL164_VID & 0xff)) {
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+
+ if (!sil164_readb(dvo, SIL164_DID_LO, &ch))
+ goto out;
+
+ if (ch != (SIL164_DID & 0xff)) {
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
+ ch, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+ sil->quiet = false;
+
+ DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
+ return true;
+
+out:
+ kfree(sil);
+ return false;
+}
+
+static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
+{
+ u8 reg9;
+
+ sil164_readb(dvo, SIL164_REG9, &reg9);
+
+ if (reg9 & SIL164_9_HTPLG)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void sil164_mode_set(struct intel_dvo_device *dvo,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ /* As long as the basics are set up, since we don't have clock
+ * dependencies in the mode setup, we can just leave the
+ * registers alone and everything will work fine.
+ */
+ /* recommended programming sequence from doc */
+ /*sil164_writeb(sil, 0x08, 0x30);
+ sil164_writeb(sil, 0x09, 0x00);
+ sil164_writeb(sil, 0x0a, 0x90);
+ sil164_writeb(sil, 0x0c, 0x89);
+ sil164_writeb(sil, 0x08, 0x31);*/
+ /* don't do much */
+ return;
+}
+
+/* set the SIL164 power state */
+static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ int ret;
+ unsigned char ch;
+
+ ret = sil164_readb(dvo, SIL164_REG8, &ch);
+ if (ret == false)
+ return;
+
+ if (enable)
+ ch |= SIL164_8_PD;
+ else
+ ch &= ~SIL164_8_PD;
+
+ sil164_writeb(dvo, SIL164_REG8, ch);
+ return;
+}
+
+static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
+{
+ int ret;
+ unsigned char ch;
+
+ ret = sil164_readb(dvo, SIL164_REG8, &ch);
+ if (ret == false)
+ return false;
+
+ if (ch & SIL164_8_PD)
+ return true;
+ else
+ return false;
+}
+
+static void sil164_dump_regs(struct intel_dvo_device *dvo)
+{
+ u8 val;
+
+ sil164_readb(dvo, SIL164_FREQ_LO, &val);
+ DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_FREQ_HI, &val);
+ DRM_DEBUG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_REG8, &val);
+ DRM_DEBUG_KMS("SIL164_REG8: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_REG9, &val);
+ DRM_DEBUG_KMS("SIL164_REG9: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_REGC, &val);
+ DRM_DEBUG_KMS("SIL164_REGC: 0x%02x\n", val);
+}
+
+static void sil164_destroy(struct intel_dvo_device *dvo)
+{
+ struct sil164_priv *sil = dvo->dev_priv;
+
+ if (sil) {
+ kfree(sil);
+ dvo->dev_priv = NULL;
+ }
+}
+
+const struct intel_dvo_dev_ops sil164_ops = {
+ .init = sil164_init,
+ .detect = sil164_detect,
+ .mode_valid = sil164_mode_valid,
+ .mode_set = sil164_mode_set,
+ .dpms = sil164_dpms,
+ .get_hw_state = sil164_get_hw_state,
+ .dump_regs = sil164_dump_regs,
+ .destroy = sil164_destroy,
+};
diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
new file mode 100644
index 000000000..009d65b0f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright © 2007 Dave Mueller
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Mueller <dave.mueller@gmx.ch>
+ *
+ */
+
+#include "intel_display_types.h"
+#include "intel_dvo_dev.h"
+
+/* register definitions according to the TFP410 data sheet */
+#define TFP410_VID 0x014C
+#define TFP410_DID 0x0410
+
+#define TFP410_VID_LO 0x00
+#define TFP410_VID_HI 0x01
+#define TFP410_DID_LO 0x02
+#define TFP410_DID_HI 0x03
+#define TFP410_REV 0x04
+
+#define TFP410_CTL_1 0x08
+#define TFP410_CTL_1_TDIS (1<<6)
+#define TFP410_CTL_1_VEN (1<<5)
+#define TFP410_CTL_1_HEN (1<<4)
+#define TFP410_CTL_1_DSEL (1<<3)
+#define TFP410_CTL_1_BSEL (1<<2)
+#define TFP410_CTL_1_EDGE (1<<1)
+#define TFP410_CTL_1_PD (1<<0)
+
+#define TFP410_CTL_2 0x09
+#define TFP410_CTL_2_VLOW (1<<7)
+#define TFP410_CTL_2_MSEL_MASK (0x7<<4)
+#define TFP410_CTL_2_MSEL (1<<4)
+#define TFP410_CTL_2_TSEL (1<<3)
+#define TFP410_CTL_2_RSEN (1<<2)
+#define TFP410_CTL_2_HTPLG (1<<1)
+#define TFP410_CTL_2_MDI (1<<0)
+
+#define TFP410_CTL_3 0x0A
+#define TFP410_CTL_3_DK_MASK (0x7<<5)
+#define TFP410_CTL_3_DK (1<<5)
+#define TFP410_CTL_3_DKEN (1<<4)
+#define TFP410_CTL_3_CTL_MASK (0x7<<1)
+#define TFP410_CTL_3_CTL (1<<1)
+
+#define TFP410_USERCFG 0x0B
+
+#define TFP410_DE_DLY 0x32
+
+#define TFP410_DE_CTL 0x33
+#define TFP410_DE_CTL_DEGEN (1<<6)
+#define TFP410_DE_CTL_VSPOL (1<<5)
+#define TFP410_DE_CTL_HSPOL (1<<4)
+#define TFP410_DE_CTL_DEDLY8 (1<<0)
+
+#define TFP410_DE_TOP 0x34
+
+#define TFP410_DE_CNT_LO 0x36
+#define TFP410_DE_CNT_HI 0x37
+
+#define TFP410_DE_LIN_LO 0x38
+#define TFP410_DE_LIN_HI 0x39
+
+#define TFP410_H_RES_LO 0x3A
+#define TFP410_H_RES_HI 0x3B
+
+#define TFP410_V_RES_LO 0x3C
+#define TFP410_V_RES_HI 0x3D
+
+struct tfp410_priv {
+ bool quiet;
+};
+
+static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = dvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ }
+
+ if (!tfp->quiet) {
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+ return false;
+}
+
+static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ u8 out_buf[2];
+ struct i2c_msg msg = {
+ .addr = dvo->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(adapter, &msg, 1) == 1)
+ return true;
+
+ if (!tfp->quiet) {
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ addr, adapter->name, dvo->slave_addr);
+ }
+
+ return false;
+}
+
+static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
+{
+ u8 ch1, ch2;
+
+ if (tfp410_readb(dvo, addr+0, &ch1) &&
+ tfp410_readb(dvo, addr+1, &ch2))
+ return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF);
+
+ return -1;
+}
+
+/* Ti TFP410 driver for chip on i2c bus */
+static bool tfp410_init(struct intel_dvo_device *dvo,
+ struct i2c_adapter *adapter)
+{
+ /* this will detect the tfp410 chip on the specified i2c bus */
+ struct tfp410_priv *tfp;
+ int id;
+
+ tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL);
+ if (tfp == NULL)
+ return false;
+
+ dvo->i2c_bus = adapter;
+ dvo->dev_priv = tfp;
+ tfp->quiet = true;
+
+ if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
+ DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
+ "Slave %d.\n",
+ id, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+
+ if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
+ DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
+ "Slave %d.\n",
+ id, adapter->name, dvo->slave_addr);
+ goto out;
+ }
+ tfp->quiet = false;
+ return true;
+out:
+ kfree(tfp);
+ return false;
+}
+
+static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
+{
+ enum drm_connector_status ret = connector_status_disconnected;
+ u8 ctl2;
+
+ if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
+ if (ctl2 & TFP410_CTL_2_RSEN)
+ ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+ }
+
+ return ret;
+}
+
+static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void tfp410_mode_set(struct intel_dvo_device *dvo,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ /* As long as the basics are set up, since we don't have clock dependencies
+ * in the mode setup, we can just leave the registers alone and everything
+ * will work fine.
+ */
+ /* don't do much */
+ return;
+}
+
+/* set the tfp410 power state */
+static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+ u8 ctl1;
+
+ if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+ return;
+
+ if (enable)
+ ctl1 |= TFP410_CTL_1_PD;
+ else
+ ctl1 &= ~TFP410_CTL_1_PD;
+
+ tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
+}
+
+static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
+{
+ u8 ctl1;
+
+ if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+ return false;
+
+ if (ctl1 & TFP410_CTL_1_PD)
+ return true;
+ else
+ return false;
+}
+
+static void tfp410_dump_regs(struct intel_dvo_device *dvo)
+{
+ u8 val, val2;
+
+ tfp410_readb(dvo, TFP410_REV, &val);
+ DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_CTL_1, &val);
+ DRM_DEBUG_KMS("TFP410_CTL1: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_CTL_2, &val);
+ DRM_DEBUG_KMS("TFP410_CTL2: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_CTL_3, &val);
+ DRM_DEBUG_KMS("TFP410_CTL3: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_USERCFG, &val);
+ DRM_DEBUG_KMS("TFP410_USERCFG: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_DLY, &val);
+ DRM_DEBUG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_CTL, &val);
+ DRM_DEBUG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_TOP, &val);
+ DRM_DEBUG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
+ tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
+ DRM_DEBUG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+ tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
+ tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
+ DRM_DEBUG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+ tfp410_readb(dvo, TFP410_H_RES_LO, &val);
+ tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
+ DRM_DEBUG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+ tfp410_readb(dvo, TFP410_V_RES_LO, &val);
+ tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
+ DRM_DEBUG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
+}
+
+static void tfp410_destroy(struct intel_dvo_device *dvo)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+
+ if (tfp) {
+ kfree(tfp);
+ dvo->dev_priv = NULL;
+ }
+}
+
+const struct intel_dvo_dev_ops tfp410_ops = {
+ .init = tfp410_init,
+ .detect = tfp410_detect,
+ .mode_valid = tfp410_mode_valid,
+ .mode_set = tfp410_mode_set,
+ .dpms = tfp410_dpms,
+ .get_hw_state = tfp410_get_hw_state,
+ .dump_regs = tfp410_dump_regs,
+ .destroy = tfp410_destroy,
+};
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
new file mode 100644
index 000000000..e3e3d27ff
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -0,0 +1,1404 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * DisplayPort support for G4x,ILK,SNB,IVB,VLV,CHV (HSW+ handled by the DDI code).
+ */
+
+#include <linux/string_helpers.h>
+
+#include "g4x_dp.h"
+#include "intel_audio.h"
+#include "intel_backlight.h"
+#include "intel_connector.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_dp_link_training.h"
+#include "intel_dpio_phy.h"
+#include "intel_fifo_underrun.h"
+#include "intel_hdmi.h"
+#include "intel_hotplug.h"
+#include "intel_pch_display.h"
+#include "intel_pps.h"
+#include "vlv_sideband.h"
+
+static const struct dpll g4x_dpll[] = {
+ { .dot = 162000, .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8, },
+ { .dot = 270000, .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2, },
+};
+
+static const struct dpll pch_dpll[] = {
+ { .dot = 162000, .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9, },
+ { .dot = 270000, .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8, },
+};
+
+static const struct dpll vlv_dpll[] = {
+ { .dot = 162000, .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81, },
+ { .dot = 270000, .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27, },
+};
+
+static const struct dpll chv_dpll[] = {
+ /* m2 is .22 binary fixed point */
+ { .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
+ { .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
+};
+
+const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
+{
+ return IS_CHERRYVIEW(i915) ? &chv_dpll[0] : &vlv_dpll[0];
+}
+
+void g4x_dp_set_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct dpll *divisor = NULL;
+ int i, count = 0;
+
+ if (IS_G4X(dev_priv)) {
+ divisor = g4x_dpll;
+ count = ARRAY_SIZE(g4x_dpll);
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ divisor = pch_dpll;
+ count = ARRAY_SIZE(pch_dpll);
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ divisor = chv_dpll;
+ count = ARRAY_SIZE(chv_dpll);
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ divisor = vlv_dpll;
+ count = ARRAY_SIZE(vlv_dpll);
+ }
+
+ if (divisor && count) {
+ for (i = 0; i < count; i++) {
+ if (pipe_config->port_clock == divisor[i].dot) {
+ pipe_config->dpll = divisor[i];
+ pipe_config->clock_set = true;
+ break;
+ }
+ }
+ }
+}
+
+static void intel_dp_prepare(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ enum port port = encoder->port;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+
+ intel_dp_set_link_params(intel_dp,
+ pipe_config->port_clock,
+ pipe_config->lane_count);
+
+ /*
+ * There are four kinds of DP registers:
+ * IBX PCH
+ * SNB CPU
+ * IVB CPU
+ * CPT PCH
+ *
+ * IBX PCH and CPU are the same for almost everything,
+ * except that the CPU DP PLL is configured in this
+ * register
+ *
+ * CPT PCH is quite different, having many bits moved
+ * to the TRANS_DP_CTL register instead. That
+ * configuration happens (oddly) in ilk_pch_enable
+ */
+
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
+
+ /* Handle DP bits in common between all three register formats */
+ intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
+
+ /* Split out the IBX/CPU vs CPT settings */
+
+ if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
+ } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+ u32 trans_dp;
+
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ trans_dp |= TRANS_DP_ENH_FRAMING;
+ else
+ trans_dp &= ~TRANS_DP_ENH_FRAMING;
+ intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
+ } else {
+ if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
+ intel_dp->DP |= DP_COLOR_RANGE_16_235;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
+ else
+ intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
+ }
+}
+
+static void assert_dp_port(struct intel_dp *intel_dp, bool state)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
+
+ I915_STATE_WARN(cur_state != state,
+ "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ str_on_off(state), str_on_off(cur_state));
+}
+#define assert_dp_port_disabled(d) assert_dp_port((d), false)
+
+static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
+{
+ bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
+
+ I915_STATE_WARN(cur_state != state,
+ "eDP PLL state assertion failure (expected %s, current %s)\n",
+ str_on_off(state), str_on_off(cur_state));
+}
+#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
+#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
+
+static void ilk_edp_pll_on(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder);
+ assert_dp_port_disabled(intel_dp);
+ assert_edp_pll_disabled(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
+ pipe_config->port_clock);
+
+ intel_dp->DP &= ~DP_PLL_FREQ_MASK;
+
+ if (pipe_config->port_clock == 162000)
+ intel_dp->DP |= DP_PLL_FREQ_162MHZ;
+ else
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+
+ intel_de_write(dev_priv, DP_A, intel_dp->DP);
+ intel_de_posting_read(dev_priv, DP_A);
+ udelay(500);
+
+ /*
+ * [DevILK] Work around required when enabling DP PLL
+ * while a pipe is enabled going to FDI:
+ * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
+ * 2. Program DP PLL enable
+ */
+ if (IS_IRONLAKE(dev_priv))
+ intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
+
+ intel_dp->DP |= DP_PLL_ENABLE;
+
+ intel_de_write(dev_priv, DP_A, intel_dp->DP);
+ intel_de_posting_read(dev_priv, DP_A);
+ udelay(200);
+}
+
+static void ilk_edp_pll_off(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
+ assert_dp_port_disabled(intel_dp);
+ assert_edp_pll_enabled(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
+
+ intel_dp->DP &= ~DP_PLL_ENABLE;
+
+ intel_de_write(dev_priv, DP_A, intel_dp->DP);
+ intel_de_posting_read(dev_priv, DP_A);
+ udelay(200);
+}
+
+static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
+ enum port port, enum pipe *pipe)
+{
+ enum pipe p;
+
+ for_each_pipe(dev_priv, p) {
+ u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
+
+ if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
+ *pipe = p;
+ return true;
+ }
+ }
+
+ drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
+ port_name(port));
+
+ /* must initialize pipe to something for the asserts */
+ *pipe = PIPE_A;
+
+ return false;
+}
+
+bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t dp_reg, enum port port,
+ enum pipe *pipe)
+{
+ bool ret;
+ u32 val;
+
+ val = intel_de_read(dev_priv, dp_reg);
+
+ ret = val & DP_PORT_EN;
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+ *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
+ else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
+ ret &= cpt_dp_port_selected(dev_priv, port, pipe);
+ else if (IS_CHERRYVIEW(dev_priv))
+ *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
+ else
+ *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
+
+ return ret;
+}
+
+static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = g4x_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ encoder->port, pipe);
+
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+
+ return ret;
+}
+
+static void g4x_dp_get_m_n(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc_state->has_pch_encoder) {
+ intel_pch_transcoder_get_m1_n1(crtc, &crtc_state->dp_m_n);
+ intel_pch_transcoder_get_m2_n2(crtc, &crtc_state->dp_m2_n2);
+ } else {
+ intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder,
+ &crtc_state->dp_m_n);
+ intel_cpu_transcoder_get_m2_n2(crtc, crtc_state->cpu_transcoder,
+ &crtc_state->dp_m2_n2);
+ }
+}
+
+static void intel_dp_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u32 tmp, flags = 0;
+ enum port port = encoder->port;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
+ else
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
+
+ tmp = intel_de_read(dev_priv, intel_dp->output_reg);
+
+ pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
+
+ if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
+ u32 trans_dp = intel_de_read(dev_priv,
+ TRANS_DP_CTL(crtc->pipe));
+
+ if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ } else {
+ if (tmp & DP_SYNC_HS_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (tmp & DP_SYNC_VS_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ }
+
+ pipe_config->hw.adjusted_mode.flags |= flags;
+
+ if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
+ pipe_config->limited_color_range = true;
+
+ pipe_config->lane_count =
+ ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
+
+ g4x_dp_get_m_n(pipe_config);
+
+ if (port == PORT_A) {
+ if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
+ pipe_config->port_clock = 162000;
+ else
+ pipe_config->port_clock = 270000;
+ }
+
+ pipe_config->hw.adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+
+ if (intel_dp_is_edp(intel_dp))
+ intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
+}
+
+static void
+intel_dp_link_down(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ enum port port = encoder->port;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ (intel_de_read(dev_priv, intel_dp->output_reg) &
+ DP_PORT_EN) == 0))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
+ } else {
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE;
+ }
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ /*
+ * HW workaround for IBX, we need to move the port
+ * to transcoder A after disabling it to allow the
+ * matching HDMI port to be enabled on transcoder A.
+ */
+ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
+ /*
+ * We get CPU/PCH FIFO underruns on the other pipe when
+ * doing the workaround. Sweep them under the rug.
+ */
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+
+ /* always enable with pattern 1 (as per spec) */
+ intel_dp->DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
+ intel_dp->DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
+ DP_LINK_TRAIN_PAT_1;
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ intel_dp->DP &= ~DP_PORT_EN;
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ }
+
+ msleep(intel_dp->pps.panel_power_down_delay);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_wakeref_t wakeref;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_dp->pps.active_pipe = INVALID_PIPE;
+ }
+}
+
+static void intel_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_dp->link_trained = false;
+
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+
+ /*
+ * Make sure the panel is off before trying to change the mode.
+ * But also ensure that we have vdd while we switch off the panel.
+ */
+ intel_pps_vdd_on(intel_dp);
+ intel_edp_backlight_off(old_conn_state);
+ intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
+ intel_pps_off(intel_dp);
+}
+
+static void g4x_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
+}
+
+static void vlv_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
+}
+
+static void g4x_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ enum port port = encoder->port;
+
+ /*
+ * Bspec does not list a specific disable sequence for g4x DP.
+ * Follow the ilk+ sequence (disable pipe before the port) for
+ * g4x DP as it does not suffer from underruns like the normal
+ * g4x modeset sequence (disable pipe after the port).
+ */
+ intel_dp_link_down(encoder, old_crtc_state);
+
+ /* Only ilk+ has port A */
+ if (port == PORT_A)
+ ilk_edp_pll_off(intel_dp, old_crtc_state);
+}
+
+static void vlv_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_dp_link_down(encoder, old_crtc_state);
+}
+
+static void chv_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ intel_dp_link_down(encoder, old_crtc_state);
+
+ vlv_dpio_get(dev_priv);
+
+ /* Assert data lane reset */
+ chv_data_lane_soft_reset(encoder, old_crtc_state, true);
+
+ vlv_dpio_put(dev_priv);
+}
+
+static void
+cpt_set_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ u8 dp_train_pat)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
+
+ switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_1_CPT;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_2_CPT;
+ break;
+ default:
+ MISSING_CASE(intel_dp_training_pattern_symbol(dp_train_pat));
+ return;
+ }
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
+static void
+g4x_set_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ u8 dp_train_pat)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
+
+ switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_2;
+ break;
+ default:
+ MISSING_CASE(intel_dp_training_pattern_symbol(dp_train_pat));
+ return;
+ }
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
+static void intel_dp_enable_port(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ /* enable with pattern 1 (as per spec) */
+
+ intel_dp_program_link_training_pattern(intel_dp, crtc_state,
+ DP_PHY_DPRX, DP_TRAINING_PATTERN_1);
+
+ /*
+ * Magic for VLV/CHV. We _must_ first set up the register
+ * without actually enabling the port, and then do another
+ * write to enable the port. Otherwise link training will
+ * fail when the power sequencer is freshly used for this port.
+ */
+ intel_dp->DP |= DP_PORT_EN;
+ if (crtc_state->has_audio)
+ intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
+static void intel_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
+ intel_wakeref_t wakeref;
+
+ if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ vlv_pps_init(encoder, pipe_config);
+
+ intel_dp_enable_port(intel_dp, pipe_config);
+
+ intel_pps_vdd_on_unlocked(intel_dp);
+ intel_pps_on_unlocked(intel_dp);
+ intel_pps_vdd_off_unlocked(intel_dp, true);
+ }
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ unsigned int lane_mask = 0x0;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
+
+ vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
+ lane_mask);
+ }
+
+ intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+ intel_dp_configure_protocol_converter(intel_dp, pipe_config);
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
+ intel_dp_start_link_train(intel_dp, pipe_config);
+ intel_dp_stop_link_train(intel_dp, pipe_config);
+
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
+}
+
+static void g4x_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ intel_enable_dp(state, encoder, pipe_config, conn_state);
+ intel_edp_backlight_on(pipe_config, conn_state);
+}
+
+static void vlv_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ intel_edp_backlight_on(pipe_config, conn_state);
+}
+
+static void g4x_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ enum port port = encoder->port;
+
+ intel_dp_prepare(encoder, pipe_config);
+
+ /* Only ilk+ has port A */
+ if (port == PORT_A)
+ ilk_edp_pll_on(intel_dp, pipe_config);
+}
+
+static void vlv_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ vlv_phy_pre_encoder_enable(encoder, pipe_config);
+
+ intel_enable_dp(state, encoder, pipe_config, conn_state);
+}
+
+static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ intel_dp_prepare(encoder, pipe_config);
+
+ vlv_phy_pre_pll_enable(encoder, pipe_config);
+}
+
+static void chv_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ chv_phy_pre_encoder_enable(encoder, pipe_config);
+
+ intel_enable_dp(state, encoder, pipe_config, conn_state);
+
+ /* Second common lane will stay alive on its own now */
+ chv_phy_release_cl2_override(encoder);
+}
+
+static void chv_dp_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ intel_dp_prepare(encoder, pipe_config);
+
+ chv_phy_pre_pll_enable(encoder, pipe_config);
+}
+
+static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ chv_phy_post_pll_disable(encoder, old_crtc_state);
+}
+
+static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+}
+
+static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+}
+
+static u8 intel_dp_preemph_max_2(struct intel_dp *intel_dp)
+{
+ return DP_TRAIN_PRE_EMPH_LEVEL_2;
+}
+
+static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp)
+{
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
+}
+
+static void vlv_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ unsigned long demph_reg_value, preemph_reg_value,
+ uniqtranscale_reg_value;
+ u8 train_set = intel_dp->train_set[0];
+
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPH_LEVEL_0:
+ preemph_reg_value = 0x0004000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ demph_reg_value = 0x2B405555;
+ uniqtranscale_reg_value = 0x552AB83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ demph_reg_value = 0x2B404040;
+ uniqtranscale_reg_value = 0x5548B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ demph_reg_value = 0x2B245555;
+ uniqtranscale_reg_value = 0x5560B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+ demph_reg_value = 0x2B405555;
+ uniqtranscale_reg_value = 0x5598DA3A;
+ break;
+ default:
+ return;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_1:
+ preemph_reg_value = 0x0002000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ demph_reg_value = 0x2B404040;
+ uniqtranscale_reg_value = 0x5552B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ demph_reg_value = 0x2B404848;
+ uniqtranscale_reg_value = 0x5580B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ demph_reg_value = 0x2B404040;
+ uniqtranscale_reg_value = 0x55ADDA3A;
+ break;
+ default:
+ return;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_2:
+ preemph_reg_value = 0x0000000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ demph_reg_value = 0x2B305555;
+ uniqtranscale_reg_value = 0x5570B83A;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ demph_reg_value = 0x2B2B4040;
+ uniqtranscale_reg_value = 0x55ADDA3A;
+ break;
+ default:
+ return;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_3:
+ preemph_reg_value = 0x0006000;
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ demph_reg_value = 0x1B405555;
+ uniqtranscale_reg_value = 0x55ADDA3A;
+ break;
+ default:
+ return;
+ }
+ break;
+ default:
+ return;
+ }
+
+ vlv_set_phy_signal_level(encoder, crtc_state,
+ demph_reg_value, preemph_reg_value,
+ uniqtranscale_reg_value, 0);
+}
+
+static void chv_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u32 deemph_reg_value, margin_reg_value;
+ bool uniq_trans_scale = false;
+ u8 train_set = intel_dp->train_set[0];
+
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPH_LEVEL_0:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ deemph_reg_value = 128;
+ margin_reg_value = 52;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ deemph_reg_value = 128;
+ margin_reg_value = 77;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ deemph_reg_value = 128;
+ margin_reg_value = 102;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+ deemph_reg_value = 128;
+ margin_reg_value = 154;
+ uniq_trans_scale = true;
+ break;
+ default:
+ return;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_1:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ deemph_reg_value = 85;
+ margin_reg_value = 78;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ deemph_reg_value = 85;
+ margin_reg_value = 116;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ deemph_reg_value = 85;
+ margin_reg_value = 154;
+ break;
+ default:
+ return;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_2:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ deemph_reg_value = 64;
+ margin_reg_value = 104;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ deemph_reg_value = 64;
+ margin_reg_value = 154;
+ break;
+ default:
+ return;
+ }
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_3:
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ deemph_reg_value = 43;
+ margin_reg_value = 154;
+ break;
+ default:
+ return;
+ }
+ break;
+ default:
+ return;
+ }
+
+ chv_set_phy_signal_level(encoder, crtc_state,
+ deemph_reg_value, margin_reg_value,
+ uniq_trans_scale);
+}
+
+static u32 g4x_signal_levels(u8 train_set)
+{
+ u32 signal_levels = 0;
+
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ default:
+ signal_levels |= DP_VOLTAGE_0_4;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ signal_levels |= DP_VOLTAGE_0_6;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ signal_levels |= DP_VOLTAGE_0_8;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+ signal_levels |= DP_VOLTAGE_1_2;
+ break;
+ }
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPH_LEVEL_0:
+ default:
+ signal_levels |= DP_PRE_EMPHASIS_0;
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_1:
+ signal_levels |= DP_PRE_EMPHASIS_3_5;
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_2:
+ signal_levels |= DP_PRE_EMPHASIS_6;
+ break;
+ case DP_TRAIN_PRE_EMPH_LEVEL_3:
+ signal_levels |= DP_PRE_EMPHASIS_9_5;
+ break;
+ }
+ return signal_levels;
+}
+
+static void
+g4x_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u8 train_set = intel_dp->train_set[0];
+ u32 signal_levels;
+
+ signal_levels = g4x_signal_levels(train_set);
+
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
+ intel_dp->DP |= signal_levels;
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
+/* SNB CPU eDP voltage swing and pre-emphasis control */
+static u32 snb_cpu_edp_signal_levels(u8 train_set)
+{
+ u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
+ default:
+ MISSING_CASE(signal_levels);
+ return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+ }
+}
+
+static void
+snb_cpu_edp_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u8 train_set = intel_dp->train_set[0];
+ u32 signal_levels;
+
+ signal_levels = snb_cpu_edp_signal_levels(train_set);
+
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
+ intel_dp->DP |= signal_levels;
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
+/* IVB CPU eDP voltage swing and pre-emphasis control */
+static u32 ivb_cpu_edp_signal_levels(u8 train_set)
+{
+ u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return EDP_LINK_TRAIN_400MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return EDP_LINK_TRAIN_600MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return EDP_LINK_TRAIN_800MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+ default:
+ MISSING_CASE(signal_levels);
+ return EDP_LINK_TRAIN_500MV_0DB_IVB;
+ }
+}
+
+static void
+ivb_cpu_edp_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u8 train_set = intel_dp->train_set[0];
+ u32 signal_levels;
+
+ signal_levels = ivb_cpu_edp_signal_levels(train_set);
+
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
+ intel_dp->DP |= signal_levels;
+
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+}
+
+/*
+ * If display is now connected check links status,
+ * there has been known issues of link loss triggering
+ * long pulse.
+ *
+ * Some sinks (eg. ASUS PB287Q) seem to perform some
+ * weird HPD ping pong during modesets. So we can apparently
+ * end up with HPD going low during a modeset, and then
+ * going back up soon after. And once that happens we must
+ * retrain the link to get a picture. That's in case no
+ * userspace component reacted to intermittent HPD dip.
+ */
+static enum intel_hotplug_state
+intel_dp_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_modeset_acquire_ctx ctx;
+ enum intel_hotplug_state state;
+ int ret;
+
+ if (intel_dp->compliance.test_active &&
+ intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
+ intel_dp_phy_test(encoder);
+ /* just do the PHY test and nothing else */
+ return INTEL_HOTPLUG_UNCHANGED;
+ }
+
+ state = intel_encoder_hotplug(encoder, connector);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ for (;;) {
+ ret = intel_dp_retrain_link(encoder, &ctx);
+
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ continue;
+ }
+
+ break;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_WARN(encoder->base.dev, ret,
+ "Acquiring modeset locks failed with %i\n", ret);
+
+ /*
+ * Keeping it consistent with intel_ddi_hotplug() and
+ * intel_hdmi_hotplug().
+ */
+ if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
+ state = INTEL_HOTPLUG_RETRY;
+
+ return state;
+}
+
+static bool ibx_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
+
+ return intel_de_read(dev_priv, SDEISR) & bit;
+}
+
+static bool g4x_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit;
+
+ switch (encoder->hpd_pin) {
+ case HPD_PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ case HPD_PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ case HPD_PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ default:
+ MISSING_CASE(encoder->hpd_pin);
+ return false;
+ }
+
+ return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
+}
+
+static bool gm45_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit;
+
+ switch (encoder->hpd_pin) {
+ case HPD_PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
+ break;
+ case HPD_PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
+ break;
+ case HPD_PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
+ break;
+ default:
+ MISSING_CASE(encoder->hpd_pin);
+ return false;
+ }
+
+ return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
+}
+
+static bool ilk_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+
+ return intel_de_read(dev_priv, DEISR) & bit;
+}
+
+static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+ intel_dp_encoder_flush_work(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(enc_to_dig_port(to_intel_encoder(encoder)));
+}
+
+enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum pipe pipe;
+
+ if (g4x_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ encoder->port, &pipe))
+ return pipe;
+
+ return INVALID_PIPE;
+}
+
+static void intel_dp_encoder_reset(struct drm_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
+
+ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
+
+ intel_dp->reset_link_params = true;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_wakeref_t wakeref;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
+ }
+
+ intel_pps_encoder_reset(intel_dp);
+}
+
+static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+ .reset = intel_dp_encoder_reset,
+ .destroy = intel_dp_encoder_destroy,
+};
+
+bool g4x_dp_init(struct drm_i915_private *dev_priv,
+ i915_reg_t output_reg, enum port port)
+{
+ struct intel_digital_port *dig_port;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+
+ dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ if (!dig_port)
+ return false;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector)
+ goto err_connector_alloc;
+
+ intel_encoder = &dig_port->base;
+ encoder = &intel_encoder->base;
+
+ mutex_init(&dig_port->hdcp_mutex);
+
+ if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
+ "DP %c", port_name(port)))
+ goto err_encoder_init;
+
+ intel_encoder->hotplug = intel_dp_hotplug;
+ intel_encoder->compute_config = intel_dp_compute_config;
+ intel_encoder->get_hw_state = intel_dp_get_hw_state;
+ intel_encoder->get_config = intel_dp_get_config;
+ intel_encoder->sync_state = intel_dp_sync_state;
+ intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check;
+ intel_encoder->update_pipe = intel_backlight_update;
+ intel_encoder->suspend = intel_dp_encoder_suspend;
+ intel_encoder->shutdown = intel_dp_encoder_shutdown;
+ if (IS_CHERRYVIEW(dev_priv)) {
+ intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
+ intel_encoder->pre_enable = chv_pre_enable_dp;
+ intel_encoder->enable = vlv_enable_dp;
+ intel_encoder->disable = vlv_disable_dp;
+ intel_encoder->post_disable = chv_post_disable_dp;
+ intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
+ intel_encoder->pre_enable = vlv_pre_enable_dp;
+ intel_encoder->enable = vlv_enable_dp;
+ intel_encoder->disable = vlv_disable_dp;
+ intel_encoder->post_disable = vlv_post_disable_dp;
+ } else {
+ intel_encoder->pre_enable = g4x_pre_enable_dp;
+ intel_encoder->enable = g4x_enable_dp;
+ intel_encoder->disable = g4x_disable_dp;
+ intel_encoder->post_disable = g4x_post_disable_dp;
+ }
+
+ if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev_priv) && port != PORT_A))
+ dig_port->dp.set_link_train = cpt_set_link_train;
+ else
+ dig_port->dp.set_link_train = g4x_set_link_train;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ intel_encoder->set_signal_levels = chv_set_signal_levels;
+ else if (IS_VALLEYVIEW(dev_priv))
+ intel_encoder->set_signal_levels = vlv_set_signal_levels;
+ else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+ intel_encoder->set_signal_levels = ivb_cpu_edp_set_signal_levels;
+ else if (IS_SANDYBRIDGE(dev_priv) && port == PORT_A)
+ intel_encoder->set_signal_levels = snb_cpu_edp_set_signal_levels;
+ else
+ intel_encoder->set_signal_levels = g4x_set_signal_levels;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
+ (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
+ dig_port->dp.preemph_max = intel_dp_preemph_max_3;
+ dig_port->dp.voltage_max = intel_dp_voltage_max_3;
+ } else {
+ dig_port->dp.preemph_max = intel_dp_preemph_max_2;
+ dig_port->dp.voltage_max = intel_dp_voltage_max_2;
+ }
+
+ dig_port->dp.output_reg = output_reg;
+ dig_port->max_lanes = 4;
+
+ intel_encoder->type = INTEL_OUTPUT_DP;
+ intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
+ if (IS_CHERRYVIEW(dev_priv)) {
+ if (port == PORT_D)
+ intel_encoder->pipe_mask = BIT(PIPE_C);
+ else
+ intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
+ } else {
+ intel_encoder->pipe_mask = ~0;
+ }
+ intel_encoder->cloneable = 0;
+ intel_encoder->port = port;
+ intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+
+ dig_port->hpd_pulse = intel_dp_hpd_pulse;
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_GM45(dev_priv))
+ dig_port->connected = gm45_digital_port_connected;
+ else
+ dig_port->connected = g4x_digital_port_connected;
+ } else {
+ if (port == PORT_A)
+ dig_port->connected = ilk_digital_port_connected;
+ else
+ dig_port->connected = ibx_digital_port_connected;
+ }
+
+ if (port != PORT_A)
+ intel_infoframe_init(dig_port);
+
+ dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ if (!intel_dp_init_connector(dig_port, intel_connector))
+ goto err_init_connector;
+
+ return true;
+
+err_init_connector:
+ drm_encoder_cleanup(encoder);
+err_encoder_init:
+ kfree(intel_connector);
+err_connector_alloc:
+ kfree(dig_port);
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.h b/drivers/gpu/drm/i915/display/g4x_dp.h
new file mode 100644
index 000000000..e1f50263a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/g4x_dp.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _G4X_DP_H_
+#define _G4X_DP_H_
+
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
+enum pipe;
+enum port;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+
+const struct dpll *vlv_get_dpll(struct drm_i915_private *i915);
+enum pipe vlv_active_pipe(struct intel_dp *intel_dp);
+void g4x_dp_set_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config);
+bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t dp_reg, enum port port,
+ enum pipe *pipe);
+bool g4x_dp_init(struct drm_i915_private *dev_priv,
+ i915_reg_t output_reg, enum port port);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
new file mode 100644
index 000000000..2b73f5ff0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -0,0 +1,606 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * HDMI support for G4x,ILK,SNB,IVB,VLV,CHV (HSW+ handled by the DDI code).
+ */
+
+#include "g4x_hdmi.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
+#include "intel_dpio_phy.h"
+#include "intel_fifo_underrun.h"
+#include "intel_hdmi.h"
+#include "intel_hotplug.h"
+#include "intel_sdvo.h"
+#include "vlv_sideband.h"
+
+static void intel_hdmi_prepare(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ u32 hdmi_val;
+
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+
+ hdmi_val = SDVO_ENCODING_HDMI;
+ if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
+ hdmi_val |= HDMI_COLOR_RANGE_16_235;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
+
+ if (crtc_state->pipe_bpp > 24)
+ hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
+ else
+ hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
+
+ if (crtc_state->has_hdmi_sink)
+ hdmi_val |= HDMI_MODE_SELECT_HDMI;
+
+ if (HAS_PCH_CPT(dev_priv))
+ hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
+ else if (IS_CHERRYVIEW(dev_priv))
+ hdmi_val |= SDVO_PIPE_SEL_CHV(crtc->pipe);
+ else
+ hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
+
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, hdmi_val);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+}
+
+static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
+
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+
+ return ret;
+}
+
+static void intel_hdmi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 tmp, flags = 0;
+ int dotclock;
+
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
+
+ tmp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (tmp & SDVO_VSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ if (tmp & HDMI_MODE_SELECT_HDMI)
+ pipe_config->has_hdmi_sink = true;
+
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
+
+ if (pipe_config->infoframes.enable)
+ pipe_config->has_infoframe = true;
+
+ if (tmp & HDMI_AUDIO_ENABLE)
+ pipe_config->has_audio = true;
+
+ if (!HAS_PCH_SPLIT(dev_priv) &&
+ tmp & HDMI_COLOR_RANGE_16_235)
+ pipe_config->limited_color_range = true;
+
+ pipe_config->hw.adjusted_mode.flags |= flags;
+
+ if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
+ dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 2, 3);
+ else
+ dotclock = pipe_config->port_clock;
+
+ if (pipe_config->pixel_multiplier)
+ dotclock /= pipe_config->pixel_multiplier;
+
+ pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
+
+ pipe_config->lane_count = 4;
+
+ intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
+
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &pipe_config->infoframes.avi);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &pipe_config->infoframes.spd);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &pipe_config->infoframes.hdmi);
+}
+
+static void g4x_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ temp |= SDVO_ENABLE;
+ if (pipe_config->has_audio)
+ temp |= HDMI_AUDIO_ENABLE;
+
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
+ !pipe_config->has_hdmi_sink);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
+}
+
+static void ibx_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ temp |= SDVO_ENABLE;
+ if (pipe_config->has_audio)
+ temp |= HDMI_AUDIO_ENABLE;
+
+ /*
+ * HW workaround, need to write this twice for issue
+ * that may result in first write getting masked.
+ */
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ /*
+ * HW workaround, need to toggle enable bit off and on
+ * for 12bpc with pixel repeat.
+ *
+ * FIXME: BSpec says this should be done at the end of
+ * the modeset sequence, so not sure if this isn't too soon.
+ */
+ if (pipe_config->pipe_bpp > 24 &&
+ pipe_config->pixel_multiplier > 1) {
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg,
+ temp & ~SDVO_ENABLE);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ /*
+ * HW workaround, need to write this twice for issue
+ * that may result in first write getting masked.
+ */
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ }
+
+ drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
+ !pipe_config->has_hdmi_sink);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
+}
+
+static void cpt_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ enum pipe pipe = crtc->pipe;
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ temp |= SDVO_ENABLE;
+ if (pipe_config->has_audio)
+ temp |= HDMI_AUDIO_ENABLE;
+
+ /*
+ * WaEnableHDMI8bpcBefore12bpc:snb,ivb
+ *
+ * The procedure for 12bpc is as follows:
+ * 1. disable HDMI clock gating
+ * 2. enable HDMI with 8bpc
+ * 3. enable HDMI with 12bpc
+ * 4. enable HDMI clock gating
+ */
+
+ if (pipe_config->pipe_bpp > 24) {
+ intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
+ intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+
+ temp &= ~SDVO_COLOR_FORMAT_MASK;
+ temp |= SDVO_COLOR_FORMAT_8bpc;
+ }
+
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ if (pipe_config->pipe_bpp > 24) {
+ temp &= ~SDVO_COLOR_FORMAT_MASK;
+ temp |= HDMI_COLOR_FORMAT_12bpc;
+
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
+ intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ }
+
+ drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
+ !pipe_config->has_hdmi_sink);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
+}
+
+static void vlv_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+}
+
+static void intel_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_digital_port *dig_port =
+ hdmi_to_dig_port(intel_hdmi);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ /*
+ * HW workaround for IBX, we need to move the port
+ * to transcoder A after disabling it to allow the
+ * matching DP port to be enabled on transcoder A.
+ */
+ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+ /*
+ * We get CPU/PCH FIFO underruns on the other pipe when
+ * doing the workaround. Sweep them under the rug.
+ */
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+
+ temp &= ~SDVO_PIPE_SEL_MASK;
+ temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
+ /*
+ * HW workaround, need to write this twice for issue
+ * that may result in first write getting masked.
+ */
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ temp &= ~SDVO_ENABLE;
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ }
+
+ dig_port->set_infoframes(encoder,
+ false,
+ old_crtc_state, old_conn_state);
+
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
+}
+
+static void g4x_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+
+ intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
+}
+
+static void pch_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+}
+
+static void pch_post_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
+}
+
+static void intel_hdmi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_digital_port *dig_port =
+ enc_to_dig_port(encoder);
+
+ intel_hdmi_prepare(encoder, pipe_config);
+
+ dig_port->set_infoframes(encoder,
+ pipe_config->has_infoframe,
+ pipe_config, conn_state);
+}
+
+static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ vlv_phy_pre_encoder_enable(encoder, pipe_config);
+
+ /* HDMI 1.0V-2dB */
+ vlv_set_phy_signal_level(encoder, pipe_config,
+ 0x2b245f5f, 0x00002000,
+ 0x5578b83a, 0x2b247878);
+
+ dig_port->set_infoframes(encoder,
+ pipe_config->has_infoframe,
+ pipe_config, conn_state);
+
+ g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
+
+ vlv_wait_port_ready(dev_priv, dig_port, 0x0);
+}
+
+static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ intel_hdmi_prepare(encoder, pipe_config);
+
+ vlv_phy_pre_pll_enable(encoder, pipe_config);
+}
+
+static void chv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ intel_hdmi_prepare(encoder, pipe_config);
+
+ chv_phy_pre_pll_enable(encoder, pipe_config);
+}
+
+static void chv_hdmi_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ chv_phy_post_pll_disable(encoder, old_crtc_state);
+}
+
+static void vlv_hdmi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ /* Reset lanes to avoid HDMI flicker (VLV w/a) */
+ vlv_phy_reset_lanes(encoder, old_crtc_state);
+}
+
+static void chv_hdmi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ vlv_dpio_get(dev_priv);
+
+ /* Assert data lane reset */
+ chv_data_lane_soft_reset(encoder, old_crtc_state, true);
+
+ vlv_dpio_put(dev_priv);
+}
+
+static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ chv_phy_pre_encoder_enable(encoder, pipe_config);
+
+ /* FIXME: Program the support xxx V-dB */
+ /* Use 800mV-0dB */
+ chv_set_phy_signal_level(encoder, pipe_config, 128, 102, false);
+
+ dig_port->set_infoframes(encoder,
+ pipe_config->has_infoframe,
+ pipe_config, conn_state);
+
+ g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
+
+ vlv_wait_port_ready(dev_priv, dig_port, 0x0);
+
+ /* Second common lane will stay alive on its own now */
+ chv_phy_release_cl2_override(encoder);
+}
+
+static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+static enum intel_hotplug_state
+intel_hdmi_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector)
+{
+ enum intel_hotplug_state state;
+
+ state = intel_encoder_hotplug(encoder, connector);
+
+ /*
+ * On many platforms the HDMI live state signal is known to be
+ * unreliable, so we can't use it to detect if a sink is connected or
+ * not. Instead we detect if it's connected based on whether we can
+ * read the EDID or not. That in turn has a problem during disconnect,
+ * since the HPD interrupt may be raised before the DDC lines get
+ * disconnected (due to how the required length of DDC vs. HPD
+ * connector pins are specified) and so we'll still be able to get a
+ * valid EDID. To solve this schedule another detection cycle if this
+ * time around we didn't detect any change in the sink's connection
+ * status.
+ */
+ if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
+ state = INTEL_HOTPLUG_RETRY;
+
+ return state;
+}
+
+void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+ i915_reg_t hdmi_reg, enum port port)
+{
+ struct intel_digital_port *dig_port;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
+
+ dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ if (!dig_port)
+ return;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
+ kfree(dig_port);
+ return;
+ }
+
+ intel_encoder = &dig_port->base;
+
+ mutex_init(&dig_port->hdcp_mutex);
+
+ drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
+ "HDMI %c", port_name(port));
+
+ intel_encoder->hotplug = intel_hdmi_hotplug;
+ intel_encoder->compute_config = intel_hdmi_compute_config;
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ intel_encoder->disable = pch_disable_hdmi;
+ intel_encoder->post_disable = pch_post_disable_hdmi;
+ } else {
+ intel_encoder->disable = g4x_disable_hdmi;
+ }
+ intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+ intel_encoder->get_config = intel_hdmi_get_config;
+ if (IS_CHERRYVIEW(dev_priv)) {
+ intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
+ intel_encoder->pre_enable = chv_hdmi_pre_enable;
+ intel_encoder->enable = vlv_enable_hdmi;
+ intel_encoder->post_disable = chv_hdmi_post_disable;
+ intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
+ intel_encoder->pre_enable = vlv_hdmi_pre_enable;
+ intel_encoder->enable = vlv_enable_hdmi;
+ intel_encoder->post_disable = vlv_hdmi_post_disable;
+ } else {
+ intel_encoder->pre_enable = intel_hdmi_pre_enable;
+ if (HAS_PCH_CPT(dev_priv))
+ intel_encoder->enable = cpt_enable_hdmi;
+ else if (HAS_PCH_IBX(dev_priv))
+ intel_encoder->enable = ibx_enable_hdmi;
+ else
+ intel_encoder->enable = g4x_enable_hdmi;
+ }
+ intel_encoder->shutdown = intel_hdmi_encoder_shutdown;
+
+ intel_encoder->type = INTEL_OUTPUT_HDMI;
+ intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
+ intel_encoder->port = port;
+ if (IS_CHERRYVIEW(dev_priv)) {
+ if (port == PORT_D)
+ intel_encoder->pipe_mask = BIT(PIPE_C);
+ else
+ intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
+ } else {
+ intel_encoder->pipe_mask = ~0;
+ }
+ intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
+ intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ /*
+ * BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
+ * to work on real hardware. And since g4x can send infoframes to
+ * only one port anyway, nothing is lost by allowing it.
+ */
+ if (IS_G4X(dev_priv))
+ intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
+
+ dig_port->hdmi.hdmi_reg = hdmi_reg;
+ dig_port->dp.output_reg = INVALID_MMIO_REG;
+ dig_port->max_lanes = 4;
+
+ intel_infoframe_init(dig_port);
+
+ dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ intel_hdmi_init_connector(dig_port, intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.h b/drivers/gpu/drm/i915/display/g4x_hdmi.h
new file mode 100644
index 000000000..db9a93bc9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _G4X_HDMI_H_
+#define _G4X_HDMI_H_
+
+#include <linux/types.h>
+
+#include "i915_reg_defs.h"
+
+enum port;
+struct drm_i915_private;
+
+void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+ i915_reg_t hdmi_reg, enum port port);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
new file mode 100644
index 000000000..a5be4af79
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "hsw_ips.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_pcode.h"
+
+static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (!crtc_state->ips_enabled)
+ return;
+
+ /*
+ * We can only enable IPS after we enable a plane and wait for a vblank
+ * This function is called from post_plane_update, which is run after
+ * a vblank wait.
+ */
+ drm_WARN_ON(&i915->drm,
+ !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
+
+ if (IS_BROADWELL(i915)) {
+ drm_WARN_ON(&i915->drm,
+ snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL,
+ IPS_ENABLE | IPS_PCODE_CONTROL));
+ /*
+ * Quoting Art Runyan: "its not safe to expect any particular
+ * value in IPS_CTL bit 31 after enabling IPS through the
+ * mailbox." Moreover, the mailbox may return a bogus state,
+ * so we need to just enable it and continue on.
+ */
+ } else {
+ intel_de_write(i915, IPS_CTL, IPS_ENABLE);
+ /*
+ * The bit only becomes 1 in the next vblank, so this wait here
+ * is essentially intel_wait_for_vblank. If we don't have this
+ * and don't wait for vblanks until the end of crtc_enable, then
+ * the HW state readout code will complain that the expected
+ * IPS_CTL value is not the one we read.
+ */
+ if (intel_de_wait_for_set(i915, IPS_CTL, IPS_ENABLE, 50))
+ drm_err(&i915->drm,
+ "Timed out waiting for IPS enable\n");
+ }
+}
+
+bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ bool need_vblank_wait = false;
+
+ if (!crtc_state->ips_enabled)
+ return need_vblank_wait;
+
+ if (IS_BROADWELL(i915)) {
+ drm_WARN_ON(&i915->drm,
+ snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0));
+ /*
+ * Wait for PCODE to finish disabling IPS. The BSpec specified
+ * 42ms timeout value leads to occasional timeouts so use 100ms
+ * instead.
+ */
+ if (intel_de_wait_for_clear(i915, IPS_CTL, IPS_ENABLE, 100))
+ drm_err(&i915->drm,
+ "Timed out waiting for IPS disable\n");
+ } else {
+ intel_de_write(i915, IPS_CTL, 0);
+ intel_de_posting_read(i915, IPS_CTL);
+ }
+
+ /* We need to wait for a vblank before we can disable the plane. */
+ need_vblank_wait = true;
+
+ return need_vblank_wait;
+}
+
+static bool hsw_ips_need_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!old_crtc_state->ips_enabled)
+ return false;
+
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ return true;
+
+ /*
+ * Workaround : Do not read or write the pipe palette/gamma data while
+ * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+ *
+ * Disable IPS before we program the LUT.
+ */
+ if (IS_HASWELL(i915) &&
+ (new_crtc_state->uapi.color_mgmt_changed ||
+ new_crtc_state->update_pipe) &&
+ new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
+ return true;
+
+ return !new_crtc_state->ips_enabled;
+}
+
+bool hsw_ips_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ if (!hsw_ips_need_disable(state, crtc))
+ return false;
+
+ return hsw_ips_disable(old_crtc_state);
+}
+
+static bool hsw_ips_need_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!new_crtc_state->ips_enabled)
+ return false;
+
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ return true;
+
+ /*
+ * Workaround : Do not read or write the pipe palette/gamma data while
+ * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+ *
+ * Re-enable IPS after the LUT has been programmed.
+ */
+ if (IS_HASWELL(i915) &&
+ (new_crtc_state->uapi.color_mgmt_changed ||
+ new_crtc_state->update_pipe) &&
+ new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
+ return true;
+
+ /*
+ * We can't read out IPS on broadwell, assume the worst and
+ * forcibly enable IPS on the first fastset.
+ */
+ if (new_crtc_state->update_pipe && old_crtc_state->inherited)
+ return true;
+
+ return !old_crtc_state->ips_enabled;
+}
+
+void hsw_ips_post_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!hsw_ips_need_enable(state, crtc))
+ return;
+
+ hsw_ips_enable(new_crtc_state);
+}
+
+/* IPS only exists on ULT machines and is tied to pipe A. */
+bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
+{
+ return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
+}
+
+bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ /* IPS only exists on ULT machines and is tied to pipe A. */
+ if (!hsw_crtc_supports_ips(crtc))
+ return false;
+
+ if (!i915->params.enable_ips)
+ return false;
+
+ if (crtc_state->pipe_bpp > 24)
+ return false;
+
+ /*
+ * We compare against max which means we must take
+ * the increased cdclk requirement into account when
+ * calculating the new cdclk.
+ *
+ * Should measure whether using a lower cdclk w/o IPS
+ */
+ if (IS_BROADWELL(i915) &&
+ crtc_state->pixel_rate > i915->display.cdclk.max_cdclk_freq * 95 / 100)
+ return false;
+
+ return true;
+}
+
+int hsw_ips_compute_config(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ crtc_state->ips_enabled = false;
+
+ if (!hsw_crtc_state_ips_capable(crtc_state))
+ return 0;
+
+ /*
+ * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+ * enabled and disabled dynamically based on package C states,
+ * user space can't make reliable use of the CRCs, so let's just
+ * completely disable it.
+ */
+ if (crtc_state->crc_enabled)
+ return 0;
+
+ /* IPS should be fine as long as at least one plane is enabled. */
+ if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
+ return 0;
+
+ if (IS_BROADWELL(i915)) {
+ const struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
+ return 0;
+ }
+
+ crtc_state->ips_enabled = true;
+
+ return 0;
+}
+
+void hsw_ips_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (!hsw_crtc_supports_ips(crtc))
+ return;
+
+ if (IS_HASWELL(i915)) {
+ crtc_state->ips_enabled = intel_de_read(i915, IPS_CTL) & IPS_ENABLE;
+ } else {
+ /*
+ * We cannot readout IPS state on broadwell, set to
+ * true so we can set it to a defined state on first
+ * commit.
+ */
+ crtc_state->ips_enabled = true;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.h b/drivers/gpu/drm/i915/display/hsw_ips.h
new file mode 100644
index 000000000..4564dee49
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/hsw_ips.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __HSW_IPS_H__
+#define __HSW_IPS_H__
+
+#include <linux/types.h>
+
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+
+bool hsw_ips_disable(const struct intel_crtc_state *crtc_state);
+bool hsw_ips_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void hsw_ips_post_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+bool hsw_crtc_supports_ips(struct intel_crtc *crtc);
+bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
+int hsw_ips_compute_config(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void hsw_ips_get_config(struct intel_crtc_state *crtc_state);
+
+#endif /* __HSW_IPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
new file mode 100644
index 000000000..5afbe3e98
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_fourcc.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_fbc.h"
+#include "intel_sprite.h"
+#include "i9xx_plane.h"
+
+/* Primary plane formats for gen <= 3 */
+static const u32 i8xx_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+/* Primary plane formats for ivb (no fp16 due to hw issue) */
+static const u32 ivb_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+};
+
+/* Primary plane formats for gen >= 4, except ivb */
+static const u32 i965_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+};
+
+/* Primary plane formats for vlv/chv */
+static const u32 vlv_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+};
+
+static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
+ return false;
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XRGB8888:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED;
+ default:
+ return false;
+ }
+}
+
+static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
+ return false;
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XBGR16161616F:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED;
+ default:
+ return false;
+ }
+}
+
+static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ if (!HAS_FBC(dev_priv))
+ return false;
+
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return i9xx_plane == PLANE_A; /* tied to pipe A */
+ else if (IS_IVYBRIDGE(dev_priv))
+ return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
+ i9xx_plane == PLANE_C;
+ else if (DISPLAY_VER(dev_priv) >= 4)
+ return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
+ else
+ return i9xx_plane == PLANE_A;
+}
+
+static struct intel_fbc *i9xx_plane_fbc(struct drm_i915_private *dev_priv,
+ enum i9xx_plane_id i9xx_plane)
+{
+ if (i9xx_plane_has_fbc(dev_priv, i9xx_plane))
+ return dev_priv->display.fbc[INTEL_FBC_A];
+ else
+ return NULL;
+}
+
+static bool i9xx_plane_has_windowing(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ return i9xx_plane == PLANE_B;
+ else if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
+ return false;
+ else if (DISPLAY_VER(dev_priv) == 4)
+ return i9xx_plane == PLANE_C;
+ else
+ return i9xx_plane == PLANE_B ||
+ i9xx_plane == PLANE_C;
+}
+
+static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ u32 dspcntr;
+
+ dspcntr = DISP_ENABLE;
+
+ if (IS_G4X(dev_priv) || IS_IRONLAKE(dev_priv) ||
+ IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ dspcntr |= DISP_TRICKLE_FEED_DISABLE;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ dspcntr |= DISP_FORMAT_8BPP;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ dspcntr |= DISP_FORMAT_BGRX555;
+ break;
+ case DRM_FORMAT_ARGB1555:
+ dspcntr |= DISP_FORMAT_BGRA555;
+ break;
+ case DRM_FORMAT_RGB565:
+ dspcntr |= DISP_FORMAT_BGRX565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dspcntr |= DISP_FORMAT_BGRX888;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ dspcntr |= DISP_FORMAT_RGBX888;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISP_FORMAT_BGRA888;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISP_FORMAT_RGBA888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ dspcntr |= DISP_FORMAT_BGRX101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ dspcntr |= DISP_FORMAT_RGBX101010;
+ break;
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISP_FORMAT_BGRA101010;
+ break;
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISP_FORMAT_RGBA101010;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ dspcntr |= DISP_FORMAT_RGBX161616;
+ break;
+ default:
+ MISSING_CASE(fb->format->format);
+ return 0;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 4 &&
+ fb->modifier == I915_FORMAT_MOD_X_TILED)
+ dspcntr |= DISP_TILED;
+
+ if (rotation & DRM_MODE_ROTATE_180)
+ dspcntr |= DISP_ROTATE_180;
+
+ if (rotation & DRM_MODE_REFLECT_X)
+ dspcntr |= DISP_MIRROR;
+
+ return dspcntr;
+}
+
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_x, src_y, src_w;
+ u32 offset;
+ int ret;
+
+ ret = intel_plane_compute_gtt(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+
+ /* Undocumented hardware limit on i965/g4x/vlv/chv */
+ if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
+ return -EINVAL;
+
+ intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+
+ if (DISPLAY_VER(dev_priv) >= 4)
+ offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+ plane_state, 0);
+ else
+ offset = 0;
+
+ /*
+ * When using an X-tiled surface the plane starts to
+ * misbehave if the x offset + width exceeds the stride.
+ * hsw/bdw: underrun galore
+ * ilk/snb/ivb: wrap to the next tile row mid scanout
+ * i965/g4x: so far appear immune to this
+ * vlv/chv: TODO check
+ *
+ * Linear surfaces seem to work just fine, even on hsw/bdw
+ * despite them not using the linear offset anymore.
+ */
+ if (DISPLAY_VER(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) {
+ u32 alignment = intel_surf_alignment(fb, 0);
+ int cpp = fb->format->cpp[0];
+
+ while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].mapping_stride) {
+ if (offset == 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Unable to find suitable display surface offset due to X-tiling\n");
+ return -EINVAL;
+ }
+
+ offset = intel_plane_adjust_aligned_offset(&src_x, &src_y, plane_state, 0,
+ offset, offset - alignment);
+ }
+ }
+
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->uapi.src,
+ src_x << 16, src_y << 16);
+
+ /* HSW/BDW do this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ if (rotation & DRM_MODE_ROTATE_180) {
+ src_x += src_w - 1;
+ src_y += src_h - 1;
+ } else if (rotation & DRM_MODE_REFLECT_X) {
+ src_x += src_w - 1;
+ }
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ drm_WARN_ON(&dev_priv->drm, src_x > 8191 || src_y > 4095);
+ } else if (DISPLAY_VER(dev_priv) >= 4 &&
+ fb->modifier == I915_FORMAT_MOD_X_TILED) {
+ drm_WARN_ON(&dev_priv->drm, src_x > 4095 || src_y > 4095);
+ }
+
+ plane_state->view.color_plane[0].offset = offset;
+ plane_state->view.color_plane[0].x = src_x;
+ plane_state->view.color_plane[0].y = src_y;
+
+ return 0;
+}
+
+static int
+i9xx_plane_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ int ret;
+
+ ret = chv_plane_check_rotation(plane_state);
+ if (ret)
+ return ret;
+
+ ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ i9xx_plane_has_windowing(plane));
+ if (ret)
+ return ret;
+
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dspcntr = 0;
+
+ if (crtc_state->gamma_enable)
+ dspcntr |= DISP_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ dspcntr |= DISP_PIPE_CSC_ENABLE;
+
+ if (DISPLAY_VER(dev_priv) < 5)
+ dspcntr |= DISP_PIPE_SEL(crtc->pipe);
+
+ return dspcntr;
+}
+
+static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ /*
+ * g4x bspec says 64bpp pixel rate can't exceed 80%
+ * of cdclk when the sprite plane is enabled on the
+ * same pipe. ilk/snb bspec says 64bpp pixel rate is
+ * never allowed to exceed 80% of cdclk. Let's just go
+ * with the ilk/snb limit always.
+ */
+ if (cpp == 8) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* two pixels per clock with double wide pipe */
+ if (crtc_state->double_wide)
+ den *= 2;
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static void i9xx_plane_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
+ plane_state->view.color_plane[0].mapping_stride);
+
+ if (DISPLAY_VER(dev_priv) < 4) {
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ int crtc_h = drm_rect_height(&plane_state->uapi.dst);
+
+ /*
+ * PLANE_A doesn't actually have a full window
+ * generator but let's assume we still need to
+ * program whatever is there.
+ */
+ intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
+ DISP_POS_Y(crtc_y) | DISP_POS_X(crtc_x));
+ intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
+ DISP_HEIGHT(crtc_h - 1) | DISP_WIDTH(crtc_w - 1));
+ }
+}
+
+static void i9xx_plane_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ int x = plane_state->view.color_plane[0].x;
+ int y = plane_state->view.color_plane[0].y;
+ u32 dspcntr, dspaddr_offset, linear_offset;
+
+ dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
+
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+ if (DISPLAY_VER(dev_priv) >= 4)
+ dspaddr_offset = plane_state->view.color_plane[0].offset;
+ else
+ dspaddr_offset = linear_offset;
+
+ if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ int crtc_h = drm_rect_height(&plane_state->uapi.dst);
+
+ intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
+ PRIM_POS_Y(crtc_y) | PRIM_POS_X(crtc_x));
+ intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
+ PRIM_HEIGHT(crtc_h - 1) | PRIM_WIDTH(crtc_w - 1));
+ intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
+ DISP_OFFSET_Y(y) | DISP_OFFSET_X(x));
+ } else if (DISPLAY_VER(dev_priv) >= 4) {
+ intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
+ linear_offset);
+ intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
+ DISP_OFFSET_Y(y) | DISP_OFFSET_X(x));
+ }
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+
+ if (DISPLAY_VER(dev_priv) >= 4)
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ else
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+}
+
+static void i830_plane_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ /*
+ * On i830/i845 all registers are self-arming [ALM040].
+ *
+ * Additional breakage on i830 causes register reads to return
+ * the last latched value instead of the last written value [ALM026].
+ */
+ i9xx_plane_update_noarm(plane, crtc_state, plane_state);
+ i9xx_plane_update_arm(plane, crtc_state, plane_state);
+}
+
+static void i9xx_plane_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 dspcntr;
+
+ /*
+ * DSPCNTR pipe gamma enable on g4x+ and pipe csc
+ * enable on ilk+ affect the pipe bottom color as
+ * well, so we must configure them even if the plane
+ * is disabled.
+ *
+ * On pre-g4x there is no way to gamma correct the
+ * pipe bottom color but we'll keep on doing this
+ * anyway so that the crtc state readout works correctly.
+ */
+ dspcntr = i9xx_plane_ctl_crtc(crtc_state);
+
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+
+ if (DISPLAY_VER(dev_priv) >= 4)
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
+ else
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
+}
+
+static void
+g4x_primary_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
+ u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ if (async_flip)
+ dspcntr |= DISP_ASYNC_FLIP;
+
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
+
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+}
+
+static void
+vlv_primary_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ intel_de_write_fw(dev_priv, DSPADDR_VLV(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+}
+
+static void
+bdw_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+bdw_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ivb_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ivb_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ilk_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+ilk_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+vlv_primary_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+vlv_primary_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ intel_wakeref_t wakeref;
+ bool ret;
+ u32 val;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-4 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
+
+ ret = val & DISP_ENABLE;
+
+ if (DISPLAY_VER(dev_priv) >= 5)
+ *pipe = plane->pipe;
+ else
+ *pipe = REG_FIELD_GET(DISP_PIPE_SEL_MASK, val);
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static unsigned int
+hsw_primary_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
+ return min(8192 * cpp, 32 * 1024);
+}
+
+static unsigned int
+ilk_primary_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return min(4096 * cpp, 32 * 1024);
+ else
+ return 32 * 1024;
+}
+
+unsigned int
+i965_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return min(4096 * cpp, 16 * 1024);
+ else
+ return 32 * 1024;
+}
+
+static unsigned int
+i9xx_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+ if (DISPLAY_VER(dev_priv) >= 3) {
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return 8*1024;
+ else
+ return 16*1024;
+ } else {
+ if (plane->i9xx_plane == PLANE_C)
+ return 4*1024;
+ else
+ return 8*1024;
+ }
+}
+
+static const struct drm_plane_funcs i965_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = i965_plane_format_mod_supported,
+};
+
+static const struct drm_plane_funcs i8xx_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = i8xx_plane_format_mod_supported,
+};
+
+struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_plane *plane;
+ const struct drm_plane_funcs *plane_funcs;
+ unsigned int supported_rotations;
+ const u64 *modifiers;
+ const u32 *formats;
+ int num_formats;
+ int ret, zpos;
+
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
+
+ plane->pipe = pipe;
+ /*
+ * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
+ * port is hooked to pipe B. Hence we want plane A feeding pipe B.
+ */
+ if (HAS_FBC(dev_priv) && DISPLAY_VER(dev_priv) < 4 &&
+ INTEL_NUM_PIPES(dev_priv) == 2)
+ plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
+ else
+ plane->i9xx_plane = (enum i9xx_plane_id) pipe;
+ plane->id = PLANE_PRIMARY;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
+
+ intel_fbc_add_plane(i9xx_plane_fbc(dev_priv, plane->i9xx_plane), plane);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ formats = vlv_primary_formats;
+ num_formats = ARRAY_SIZE(vlv_primary_formats);
+ } else if (DISPLAY_VER(dev_priv) >= 4) {
+ /*
+ * WaFP16GammaEnabling:ivb
+ * "Workaround : When using the 64-bit format, the plane
+ * output on each color channel has one quarter amplitude.
+ * It can be brought up to full amplitude by using pipe
+ * gamma correction or pipe color space conversion to
+ * multiply the plane output by four."
+ *
+ * There is no dedicated plane gamma for the primary plane,
+ * and using the pipe gamma/csc could conflict with other
+ * planes, so we choose not to expose fp16 on IVB primary
+ * planes. HSW primary planes no longer have this problem.
+ */
+ if (IS_IVYBRIDGE(dev_priv)) {
+ formats = ivb_primary_formats;
+ num_formats = ARRAY_SIZE(ivb_primary_formats);
+ } else {
+ formats = i965_primary_formats;
+ num_formats = ARRAY_SIZE(i965_primary_formats);
+ }
+ } else {
+ formats = i8xx_primary_formats;
+ num_formats = ARRAY_SIZE(i8xx_primary_formats);
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 4)
+ plane_funcs = &i965_plane_funcs;
+ else
+ plane_funcs = &i8xx_plane_funcs;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ plane->min_cdclk = vlv_plane_min_cdclk;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ else if (IS_IVYBRIDGE(dev_priv))
+ plane->min_cdclk = ivb_plane_min_cdclk;
+ else
+ plane->min_cdclk = i9xx_plane_min_cdclk;
+
+ if (HAS_GMCH(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) >= 4)
+ plane->max_stride = i965_plane_max_stride;
+ else
+ plane->max_stride = i9xx_plane_max_stride;
+ } else {
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->max_stride = hsw_primary_max_stride;
+ else
+ plane->max_stride = ilk_primary_max_stride;
+ }
+
+ if (IS_I830(dev_priv) || IS_I845G(dev_priv)) {
+ plane->update_arm = i830_plane_update_arm;
+ } else {
+ plane->update_noarm = i9xx_plane_update_noarm;
+ plane->update_arm = i9xx_plane_update_arm;
+ }
+ plane->disable_arm = i9xx_plane_disable_arm;
+ plane->get_hw_state = i9xx_plane_get_hw_state;
+ plane->check_plane = i9xx_plane_check;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ plane->async_flip = vlv_primary_async_flip;
+ plane->enable_flip_done = vlv_primary_enable_flip_done;
+ plane->disable_flip_done = vlv_primary_disable_flip_done;
+ } else if (IS_BROADWELL(dev_priv)) {
+ plane->need_async_flip_disable_wa = true;
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = bdw_primary_enable_flip_done;
+ plane->disable_flip_done = bdw_primary_disable_flip_done;
+ } else if (DISPLAY_VER(dev_priv) >= 7) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ivb_primary_enable_flip_done;
+ plane->disable_flip_done = ivb_primary_disable_flip_done;
+ } else if (DISPLAY_VER(dev_priv) >= 5) {
+ plane->async_flip = g4x_primary_async_flip;
+ plane->enable_flip_done = ilk_primary_enable_flip_done;
+ plane->disable_flip_done = ilk_primary_disable_flip_done;
+ }
+
+ modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X);
+
+ if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ 0, plane_funcs,
+ formats, num_formats,
+ modifiers,
+ DRM_PLANE_TYPE_PRIMARY,
+ "primary %c", pipe_name(pipe));
+ else
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ 0, plane_funcs,
+ formats, num_formats,
+ modifiers,
+ DRM_PLANE_TYPE_PRIMARY,
+ "plane %c",
+ plane_name(plane->i9xx_plane));
+
+ kfree(modifiers);
+
+ if (ret)
+ goto fail;
+
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X;
+ } else if (DISPLAY_VER(dev_priv) >= 4) {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
+ } else {
+ supported_rotations = DRM_MODE_ROTATE_0;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+
+ zpos = 0;
+ drm_plane_create_zpos_immutable_property(&plane->base, zpos);
+
+ intel_plane_helper_add(plane);
+
+ return plane;
+
+fail:
+ intel_plane_free(plane);
+
+ return ERR_PTR(ret);
+}
+
+static int i9xx_format_to_fourcc(int format)
+{
+ switch (format) {
+ case DISP_FORMAT_8BPP:
+ return DRM_FORMAT_C8;
+ case DISP_FORMAT_BGRA555:
+ return DRM_FORMAT_ARGB1555;
+ case DISP_FORMAT_BGRX555:
+ return DRM_FORMAT_XRGB1555;
+ case DISP_FORMAT_BGRX565:
+ return DRM_FORMAT_RGB565;
+ default:
+ case DISP_FORMAT_BGRX888:
+ return DRM_FORMAT_XRGB8888;
+ case DISP_FORMAT_RGBX888:
+ return DRM_FORMAT_XBGR8888;
+ case DISP_FORMAT_BGRA888:
+ return DRM_FORMAT_ARGB8888;
+ case DISP_FORMAT_RGBA888:
+ return DRM_FORMAT_ABGR8888;
+ case DISP_FORMAT_BGRX101010:
+ return DRM_FORMAT_XRGB2101010;
+ case DISP_FORMAT_RGBX101010:
+ return DRM_FORMAT_XBGR2101010;
+ case DISP_FORMAT_BGRA101010:
+ return DRM_FORMAT_ARGB2101010;
+ case DISP_FORMAT_RGBA101010:
+ return DRM_FORMAT_ABGR2101010;
+ case DISP_FORMAT_RGBX161616:
+ return DRM_FORMAT_XBGR16161616F;
+ }
+}
+
+void
+i9xx_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ enum pipe pipe;
+ u32 val, base, offset;
+ int fourcc, pixel_format;
+ unsigned int aligned_height;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+
+ if (!plane->get_hw_state(plane, &pipe))
+ return;
+
+ drm_WARN_ON(dev, pipe != crtc->pipe);
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
+ drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
+ return;
+ }
+
+ fb = &intel_fb->base;
+
+ fb->dev = dev;
+
+ val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
+
+ if (DISPLAY_VER(dev_priv) >= 4) {
+ if (val & DISP_TILED) {
+ plane_config->tiling = I915_TILING_X;
+ fb->modifier = I915_FORMAT_MOD_X_TILED;
+ }
+
+ if (val & DISP_ROTATE_180)
+ plane_config->rotation = DRM_MODE_ROTATE_180;
+ }
+
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
+ val & DISP_MIRROR)
+ plane_config->rotation |= DRM_MODE_REFLECT_X;
+
+ pixel_format = val & DISP_FORMAT_MASK;
+ fourcc = i9xx_format_to_fourcc(pixel_format);
+ fb->format = drm_format_info(fourcc);
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
+ base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & DISP_ADDR_MASK;
+ } else if (DISPLAY_VER(dev_priv) >= 4) {
+ if (plane_config->tiling)
+ offset = intel_de_read(dev_priv,
+ DSPTILEOFF(i9xx_plane));
+ else
+ offset = intel_de_read(dev_priv,
+ DSPLINOFF(i9xx_plane));
+ base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & DISP_ADDR_MASK;
+ } else {
+ base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
+ }
+ plane_config->base = base;
+
+ val = intel_de_read(dev_priv, PIPESRC(pipe));
+ fb->width = REG_FIELD_GET(PIPESRC_WIDTH_MASK, val) + 1;
+ fb->height = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, val) + 1;
+
+ val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
+ fb->pitches[0] = val & 0xffffffc0;
+
+ aligned_height = intel_fb_align_height(fb, 0, fb->height);
+
+ plane_config->size = fb->pitches[0] * aligned_height;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
+ plane_config->size);
+
+ plane_config->fb = intel_fb;
+}
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
new file mode 100644
index 000000000..027b66053
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _I9XX_PLANE_H_
+#define _I9XX_PLANE_H_
+
+#include <linux/types.h>
+
+enum pipe;
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_initial_plane_config;
+struct intel_plane;
+struct intel_plane_state;
+
+unsigned int i965_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation);
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+
+struct intel_plane *
+intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe);
+
+void i9xx_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config);
+#endif
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
new file mode 100644
index 000000000..f7422f0cf
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -0,0 +1,2127 @@
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Madhav Chauhan <madhav.chauhan@intel.com>
+ * Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include <drm/display/drm_dsc_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_mipi_dsi.h>
+
+#include "icl_dsi.h"
+#include "icl_dsi_regs.h"
+#include "intel_atomic.h"
+#include "intel_backlight.h"
+#include "intel_backlight_regs.h"
+#include "intel_combo_phy.h"
+#include "intel_combo_phy_regs.h"
+#include "intel_connector.h"
+#include "intel_crtc.h"
+#include "intel_ddi.h"
+#include "intel_de.h"
+#include "intel_dsi.h"
+#include "intel_dsi_vbt.h"
+#include "intel_panel.h"
+#include "intel_vdsc.h"
+#include "skl_scaler.h"
+#include "skl_universal_plane.h"
+
+static int header_credits_available(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
+{
+ return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
+ >> FREE_HEADER_CREDIT_SHIFT;
+}
+
+static int payload_credits_available(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
+{
+ return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
+ >> FREE_PLOAD_CREDIT_SHIFT;
+}
+
+static bool wait_for_header_credits(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans, int hdr_credit)
+{
+ if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
+ hdr_credit, 100)) {
+ drm_err(&dev_priv->drm, "DSI header credits not released\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool wait_for_payload_credits(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans, int payld_credit)
+{
+ if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
+ payld_credit, 100)) {
+ drm_err(&dev_priv->drm, "DSI payload credits not released\n");
+ return false;
+ }
+
+ return true;
+}
+
+static enum transcoder dsi_port_to_transcoder(enum port port)
+{
+ if (port == PORT_A)
+ return TRANSCODER_DSI_0;
+ else
+ return TRANSCODER_DSI_1;
+}
+
+static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct mipi_dsi_device *dsi;
+ enum port port;
+ enum transcoder dsi_trans;
+ int ret;
+
+ /* wait for header/payload credits to be released */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT);
+ wait_for_payload_credits(dev_priv, dsi_trans, MAX_PLOAD_CREDIT);
+ }
+
+ /* send nop DCS command */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi = intel_dsi->dsi_hosts[port]->device;
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+ dsi->channel = 0;
+ ret = mipi_dsi_dcs_nop(dsi);
+ if (ret < 0)
+ drm_err(&dev_priv->drm,
+ "error sending DCS NOP command\n");
+ }
+
+ /* wait for header credits to be released */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT);
+ }
+
+ /* wait for LP TX in progress bit to be cleared */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ if (wait_for_us(!(intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &
+ LPTX_IN_PROGRESS), 20))
+ drm_err(&dev_priv->drm, "LPTX bit not cleared\n");
+ }
+}
+
+static int dsi_send_pkt_payld(struct intel_dsi_host *host,
+ const struct mipi_dsi_packet *packet)
+{
+ struct intel_dsi *intel_dsi = host->intel_dsi;
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+ const u8 *data = packet->payload;
+ u32 len = packet->payload_length;
+ int i, j;
+
+ /* payload queue can accept *256 bytes*, check limit */
+ if (len > MAX_PLOAD_CREDIT * 4) {
+ drm_err(&i915->drm, "payload size exceeds max queue limit\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i += 4) {
+ u32 tmp = 0;
+
+ if (!wait_for_payload_credits(i915, dsi_trans, 1))
+ return -EBUSY;
+
+ for (j = 0; j < min_t(u32, len - i, 4); j++)
+ tmp |= *data++ << 8 * j;
+
+ intel_de_write(i915, DSI_CMD_TXPYLD(dsi_trans), tmp);
+ }
+
+ return 0;
+}
+
+static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
+ const struct mipi_dsi_packet *packet,
+ bool enable_lpdt)
+{
+ struct intel_dsi *intel_dsi = host->intel_dsi;
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+ u32 tmp;
+
+ if (!wait_for_header_credits(dev_priv, dsi_trans, 1))
+ return -EBUSY;
+
+ tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans));
+
+ if (packet->payload)
+ tmp |= PAYLOAD_PRESENT;
+ else
+ tmp &= ~PAYLOAD_PRESENT;
+
+ tmp &= ~VBLANK_FENCE;
+
+ if (enable_lpdt)
+ tmp |= LP_DATA_TRANSFER;
+ else
+ tmp &= ~LP_DATA_TRANSFER;
+
+ tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
+ tmp |= ((packet->header[0] & VC_MASK) << VC_SHIFT);
+ tmp |= ((packet->header[0] & DT_MASK) << DT_SHIFT);
+ tmp |= (packet->header[1] << PARAM_WC_LOWER_SHIFT);
+ tmp |= (packet->header[2] << PARAM_WC_UPPER_SHIFT);
+ intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans), tmp);
+
+ return 0;
+}
+
+void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 tmp, mode_flags;
+ enum port port;
+
+ mode_flags = crtc_state->mode_flags;
+
+ /*
+ * case 1 also covers dual link
+ * In case of dual link, frame update should be set on
+ * DSI_0
+ */
+ if (mode_flags & I915_MODE_FLAG_DSI_USE_TE0)
+ port = PORT_A;
+ else if (mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
+ port = PORT_B;
+ else
+ return;
+
+ tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
+ tmp |= DSI_FRAME_UPDATE_REQUEST;
+ intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
+}
+
+static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum phy phy;
+ u32 tmp;
+ int lane;
+
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ /*
+ * Program voltage swing and pre-emphasis level values as per
+ * table in BSPEC under DDI buffer programing
+ */
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
+ tmp |= SCALING_MODE_SEL(0x2);
+ tmp |= TAP2_DISABLE | TAP3_DISABLE;
+ tmp |= RTERM_SELECT(0x6);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
+ tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
+ tmp |= SCALING_MODE_SEL(0x2);
+ tmp |= TAP2_DISABLE | TAP3_DISABLE;
+ tmp |= RTERM_SELECT(0x6);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
+
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
+ tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+ RCOMP_SCALAR_MASK);
+ tmp |= SWING_SEL_UPPER(0x2);
+ tmp |= SWING_SEL_LOWER(0x2);
+ tmp |= RCOMP_SCALAR(0x98);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
+
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
+ tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+ RCOMP_SCALAR_MASK);
+ tmp |= SWING_SEL_UPPER(0x2);
+ tmp |= SWING_SEL_LOWER(0x2);
+ tmp |= RCOMP_SCALAR(0x98);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
+
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
+ tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+ CURSOR_COEFF_MASK);
+ tmp |= POST_CURSOR_1(0x0);
+ tmp |= POST_CURSOR_2(0x0);
+ tmp |= CURSOR_COEFF(0x3f);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
+
+ for (lane = 0; lane <= 3; lane++) {
+ /* Bspec: must not use GRP register for write */
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy));
+ tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+ CURSOR_COEFF_MASK);
+ tmp |= POST_CURSOR_1(0x0);
+ tmp |= POST_CURSOR_2(0x0);
+ tmp |= CURSOR_COEFF(0x3f);
+ intel_de_write(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy), tmp);
+ }
+ }
+}
+
+static void configure_dual_link_mode(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+ u32 dss_ctl1;
+
+ /* FIXME: Move all DSS handling to intel_vdsc.c */
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe);
+ } else {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ }
+
+ dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg);
+ dss_ctl1 |= SPLITTER_ENABLE;
+ dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
+ dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
+
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+ u32 dss_ctl2;
+ u16 hactive = adjusted_mode->crtc_hdisplay;
+ u16 dl_buffer_depth;
+
+ dss_ctl1 &= ~DUAL_LINK_MODE_INTERLEAVE;
+ dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap;
+
+ if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH)
+ drm_err(&dev_priv->drm,
+ "DL buffer depth exceed max value\n");
+
+ dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
+ dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+ dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg);
+ dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
+ dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+ intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2);
+ } else {
+ /* Interleave */
+ dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
+ }
+
+ intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1);
+}
+
+/* aka DSI 8X clock */
+static int afe_clk(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ int bpp;
+
+ if (crtc_state->dsc.compression_enable)
+ bpp = crtc_state->dsc.compressed_bpp;
+ else
+ bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ return DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp, intel_dsi->lane_count);
+}
+
+static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ int afe_clk_khz;
+ int theo_word_clk, act_word_clk;
+ u32 esc_clk_div_m, esc_clk_div_m_phy;
+
+ afe_clk_khz = afe_clk(encoder, crtc_state);
+
+ if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) {
+ theo_word_clk = DIV_ROUND_UP(afe_clk_khz, 8 * DSI_MAX_ESC_CLK);
+ act_word_clk = max(3, theo_word_clk + (theo_word_clk + 1) % 2);
+ esc_clk_div_m = act_word_clk * 8;
+ esc_clk_div_m_phy = (act_word_clk - 1) / 2;
+ } else {
+ esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ intel_de_write(dev_priv, ICL_DSI_ESC_CLK_DIV(port),
+ esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+ intel_de_posting_read(dev_priv, ICL_DSI_ESC_CLK_DIV(port));
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ intel_de_write(dev_priv, ICL_DPHY_ESC_CLK_DIV(port),
+ esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+ intel_de_posting_read(dev_priv, ICL_DPHY_ESC_CLK_DIV(port));
+ }
+
+ if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ intel_de_write(dev_priv, ADL_MIPIO_DW(port, 8),
+ esc_clk_div_m_phy & TX_ESC_CLK_DIV_PHY);
+ intel_de_posting_read(dev_priv, ADL_MIPIO_DW(port, 8));
+ }
+ }
+}
+
+static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
+ struct intel_dsi *intel_dsi)
+{
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ drm_WARN_ON(&dev_priv->drm, intel_dsi->io_wakeref[port]);
+ intel_dsi->io_wakeref[port] =
+ intel_display_power_get(dev_priv,
+ port == PORT_A ?
+ POWER_DOMAIN_PORT_DDI_IO_A :
+ POWER_DOMAIN_PORT_DDI_IO_B);
+ }
+}
+
+static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ u32 tmp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
+ tmp |= COMBO_PHY_MODE_DSI;
+ intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
+ }
+
+ get_dsi_io_power_domains(dev_priv, intel_dsi);
+}
+
+static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum phy phy;
+
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ intel_combo_phy_power_up_lanes(dev_priv, phy, true,
+ intel_dsi->lane_count, false);
+}
+
+static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum phy phy;
+ u32 tmp;
+ int lane;
+
+ /* Step 4b(i) set loadgen select for transmit and aux lanes */
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
+ tmp &= ~LOADGEN_SELECT;
+ intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
+ for (lane = 0; lane <= 3; lane++) {
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy));
+ tmp &= ~LOADGEN_SELECT;
+ if (lane != 2)
+ tmp |= LOADGEN_SELECT;
+ intel_de_write(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy), tmp);
+ }
+ }
+
+ /* Step 4b(ii) set latency optimization for transmit and aux lanes */
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
+ tmp &= ~FRC_LATENCY_OPTIM_MASK;
+ tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
+ tmp &= ~FRC_LATENCY_OPTIM_MASK;
+ tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
+
+ /* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
+ if (IS_JSL_EHL(dev_priv) || (DISPLAY_VER(dev_priv) >= 12)) {
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_PCS_DW1_AUX(phy));
+ tmp &= ~LATENCY_OPTIM_MASK;
+ tmp |= LATENCY_OPTIM_VAL(0);
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
+ tmp);
+
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_PCS_DW1_LN(0, phy));
+ tmp &= ~LATENCY_OPTIM_MASK;
+ tmp |= LATENCY_OPTIM_VAL(0x1);
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy),
+ tmp);
+ }
+ }
+
+}
+
+static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ u32 tmp;
+ enum phy phy;
+
+ /* clear common keeper enable bit */
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
+ tmp &= ~COMMON_KEEPER_EN;
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy));
+ tmp &= ~COMMON_KEEPER_EN;
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), tmp);
+ }
+
+ /*
+ * Set SUS Clock Config bitfield to 11b
+ * Note: loadgen select program is done
+ * as part of lane phy sequence configuration
+ */
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
+ tmp |= SUS_CLOCK_CONFIG;
+ intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), tmp);
+ }
+
+ /* Clear training enable to change swing values */
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ tmp &= ~TX_TRAINING_EN;
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
+ tmp &= ~TX_TRAINING_EN;
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
+ }
+
+ /* Program swing and de-emphasis */
+ dsi_program_swing_and_deemphasis(encoder);
+
+ /* Set training enable to trigger update */
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ tmp |= TX_TRAINING_EN;
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
+ tmp |= TX_TRAINING_EN;
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
+ }
+}
+
+static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ u32 tmp;
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ tmp |= DDI_BUF_CTL_ENABLE;
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
+
+ if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
+ DDI_BUF_IS_IDLE),
+ 500))
+ drm_err(&dev_priv->drm, "DDI port:%c buffer idle\n",
+ port_name(port));
+ }
+}
+
+static void
+gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ u32 tmp;
+ enum port port;
+ enum phy phy;
+
+ /* Program T-INIT master registers */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port));
+ tmp &= ~DSI_T_INIT_MASTER_MASK;
+ tmp |= intel_dsi->init_count;
+ intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port), tmp);
+ }
+
+ /* Program DPHY clock lanes timings */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port),
+ intel_dsi->dphy_reg);
+
+ /* shadow register inside display core */
+ intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port),
+ intel_dsi->dphy_reg);
+ }
+
+ /* Program DPHY data lanes timings */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port),
+ intel_dsi->dphy_data_lane_reg);
+
+ /* shadow register inside display core */
+ intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port),
+ intel_dsi->dphy_data_lane_reg);
+ }
+
+ /*
+ * If DSI link operating at or below an 800 MHz,
+ * TA_SURE should be override and programmed to
+ * a value '0' inside TA_PARAM_REGISTERS otherwise
+ * leave all fields at HW default values.
+ */
+ if (DISPLAY_VER(dev_priv) == 11) {
+ if (afe_clk(encoder, crtc_state) <= 800000) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = intel_de_read(dev_priv,
+ DPHY_TA_TIMING_PARAM(port));
+ tmp &= ~TA_SURE_MASK;
+ tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+ intel_de_write(dev_priv,
+ DPHY_TA_TIMING_PARAM(port),
+ tmp);
+
+ /* shadow register inside display core */
+ tmp = intel_de_read(dev_priv,
+ DSI_TA_TIMING_PARAM(port));
+ tmp &= ~TA_SURE_MASK;
+ tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+ intel_de_write(dev_priv,
+ DSI_TA_TIMING_PARAM(port), tmp);
+ }
+ }
+ }
+
+ if (IS_JSL_EHL(dev_priv)) {
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy));
+ tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP;
+ intel_de_write(dev_priv, ICL_DPHY_CHKN(phy), tmp);
+ }
+ }
+}
+
+static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ u32 tmp;
+ enum phy phy;
+
+ mutex_lock(&dev_priv->display.dpll.lock);
+ tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
+ mutex_unlock(&dev_priv->display.dpll.lock);
+}
+
+static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ u32 tmp;
+ enum phy phy;
+
+ mutex_lock(&dev_priv->display.dpll.lock);
+ tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
+ mutex_unlock(&dev_priv->display.dpll.lock);
+}
+
+static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ bool clock_enabled = false;
+ enum phy phy;
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
+
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ if (!(tmp & ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)))
+ clock_enabled = true;
+ }
+
+ return clock_enabled;
+}
+
+static void gen11_dsi_map_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum phy phy;
+ u32 val;
+
+ mutex_lock(&dev_priv->display.dpll.lock);
+
+ val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
+ }
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
+
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ }
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
+
+ intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
+
+ mutex_unlock(&dev_priv->display.dpll.lock);
+}
+
+static void
+gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 tmp;
+ enum port port;
+ enum transcoder dsi_trans;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans));
+
+ if (intel_dsi->eotp_pkt)
+ tmp &= ~EOTP_DISABLED;
+ else
+ tmp |= EOTP_DISABLED;
+
+ /* enable link calibration if freq > 1.5Gbps */
+ if (afe_clk(encoder, pipe_config) >= 1500 * 1000) {
+ tmp &= ~LINK_CALIBRATION_MASK;
+ tmp |= CALIBRATION_ENABLED_INITIAL_ONLY;
+ }
+
+ /* configure continuous clock */
+ tmp &= ~CONTINUOUS_CLK_MASK;
+ if (intel_dsi->clock_stop)
+ tmp |= CLK_ENTER_LP_AFTER_DATA;
+ else
+ tmp |= CLK_HS_CONTINUOUS;
+
+ /* configure buffer threshold limit to minimum */
+ tmp &= ~PIX_BUF_THRESHOLD_MASK;
+ tmp |= PIX_BUF_THRESHOLD_1_4;
+
+ /* set virtual channel to '0' */
+ tmp &= ~PIX_VIRT_CHAN_MASK;
+ tmp |= PIX_VIRT_CHAN(0);
+
+ /* program BGR transmission */
+ if (intel_dsi->bgr_enabled)
+ tmp |= BGR_TRANSMISSION;
+
+ /* select pixel format */
+ tmp &= ~PIX_FMT_MASK;
+ if (pipe_config->dsc.compression_enable) {
+ tmp |= PIX_FMT_COMPRESSED;
+ } else {
+ switch (intel_dsi->pixel_format) {
+ default:
+ MISSING_CASE(intel_dsi->pixel_format);
+ fallthrough;
+ case MIPI_DSI_FMT_RGB565:
+ tmp |= PIX_FMT_RGB565;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ tmp |= PIX_FMT_RGB666_PACKED;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ tmp |= PIX_FMT_RGB666_LOOSE;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ tmp |= PIX_FMT_RGB888;
+ break;
+ }
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ if (is_vid_mode(intel_dsi))
+ tmp |= BLANKING_PACKET_ENABLE;
+ }
+
+ /* program DSI operation mode */
+ if (is_vid_mode(intel_dsi)) {
+ tmp &= ~OP_MODE_MASK;
+ switch (intel_dsi->video_mode) {
+ default:
+ MISSING_CASE(intel_dsi->video_mode);
+ fallthrough;
+ case NON_BURST_SYNC_EVENTS:
+ tmp |= VIDEO_MODE_SYNC_EVENT;
+ break;
+ case NON_BURST_SYNC_PULSE:
+ tmp |= VIDEO_MODE_SYNC_PULSE;
+ break;
+ }
+ } else {
+ /*
+ * FIXME: Retrieve this info from VBT.
+ * As per the spec when dsi transcoder is operating
+ * in TE GATE mode, TE comes from GPIO
+ * which is UTIL PIN for DSI 0.
+ * Also this GPIO would not be used for other
+ * purposes is an assumption.
+ */
+ tmp &= ~OP_MODE_MASK;
+ tmp |= CMD_MODE_TE_GATE;
+ tmp |= TE_SOURCE_GPIO;
+ }
+
+ intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
+ }
+
+ /* enable port sync mode if dual link */
+ if (intel_dsi->dual_link) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans));
+ tmp |= PORT_SYNC_MODE_ENABLE;
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ }
+
+ /* configure stream splitting */
+ configure_dual_link_mode(encoder, pipe_config);
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /* select data lane width */
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp &= ~DDI_PORT_WIDTH_MASK;
+ tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
+
+ /* select input pipe */
+ tmp &= ~TRANS_DDI_EDP_INPUT_MASK;
+ switch (pipe) {
+ default:
+ MISSING_CASE(pipe);
+ fallthrough;
+ case PIPE_A:
+ tmp |= TRANS_DDI_EDP_INPUT_A_ON;
+ break;
+ case PIPE_B:
+ tmp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+ break;
+ case PIPE_C:
+ tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+ break;
+ case PIPE_D:
+ tmp |= TRANS_DDI_EDP_INPUT_D_ONOFF;
+ break;
+ }
+
+ /* enable DDI buffer */
+ tmp |= TRANS_DDI_FUNC_ENABLE;
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ }
+
+ /* wait for link ready */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ if (wait_for_us((intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)) &
+ LINK_READY), 2500))
+ drm_err(&dev_priv->drm, "DSI link not ready\n");
+ }
+}
+
+static void
+gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ enum port port;
+ enum transcoder dsi_trans;
+ /* horizontal timings */
+ u16 htotal, hactive, hsync_start, hsync_end, hsync_size;
+ u16 hback_porch;
+ /* vertical timings */
+ u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
+ int mul = 1, div = 1;
+
+ /*
+ * Adjust horizontal timings (htotal, hsync_start, hsync_end) to account
+ * for slower link speed if DSC is enabled.
+ *
+ * The compression frequency ratio is the ratio between compressed and
+ * non-compressed link speeds, and simplifies down to the ratio between
+ * compressed and non-compressed bpp.
+ */
+ if (crtc_state->dsc.compression_enable) {
+ mul = crtc_state->dsc.compressed_bpp;
+ div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ }
+
+ hactive = adjusted_mode->crtc_hdisplay;
+
+ if (is_vid_mode(intel_dsi))
+ htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+ else
+ htotal = DIV_ROUND_UP((hactive + 160) * mul, div);
+
+ hsync_start = DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div);
+ hsync_end = DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div);
+ hsync_size = hsync_end - hsync_start;
+ hback_porch = (adjusted_mode->crtc_htotal -
+ adjusted_mode->crtc_hsync_end);
+ vactive = adjusted_mode->crtc_vdisplay;
+
+ if (is_vid_mode(intel_dsi)) {
+ vtotal = adjusted_mode->crtc_vtotal;
+ } else {
+ int bpp, line_time_us, byte_clk_period_ns;
+
+ if (crtc_state->dsc.compression_enable)
+ bpp = crtc_state->dsc.compressed_bpp;
+ else
+ bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ byte_clk_period_ns = 1000000 / afe_clk(encoder, crtc_state);
+ line_time_us = (htotal * (bpp / 8) * byte_clk_period_ns) / (1000 * intel_dsi->lane_count);
+ vtotal = vactive + DIV_ROUND_UP(400, line_time_us);
+ }
+ vsync_start = adjusted_mode->crtc_vsync_start;
+ vsync_end = adjusted_mode->crtc_vsync_end;
+ vsync_shift = hsync_start - htotal / 2;
+
+ if (intel_dsi->dual_link) {
+ hactive /= 2;
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ hactive += intel_dsi->pixel_overlap;
+ htotal /= 2;
+ }
+
+ /* minimum hactive as per bspec: 256 pixels */
+ if (adjusted_mode->crtc_hdisplay < 256)
+ drm_err(&dev_priv->drm, "hactive is less then 256 pixels\n");
+
+ /* if RGB666 format, then hactive must be multiple of 4 pixels */
+ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0)
+ drm_err(&dev_priv->drm,
+ "hactive pixels are not multiple of 4\n");
+
+ /* program TRANS_HTOTAL register */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ intel_de_write(dev_priv, HTOTAL(dsi_trans),
+ (hactive - 1) | ((htotal - 1) << 16));
+ }
+
+ /* TRANS_HSYNC register to be programmed only for video mode */
+ if (is_vid_mode(intel_dsi)) {
+ if (intel_dsi->video_mode == NON_BURST_SYNC_PULSE) {
+ /* BSPEC: hsync size should be atleast 16 pixels */
+ if (hsync_size < 16)
+ drm_err(&dev_priv->drm,
+ "hsync size < 16 pixels\n");
+ }
+
+ if (hback_porch < 16)
+ drm_err(&dev_priv->drm, "hback porch < 16 pixels\n");
+
+ if (intel_dsi->dual_link) {
+ hsync_start /= 2;
+ hsync_end /= 2;
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ intel_de_write(dev_priv, HSYNC(dsi_trans),
+ (hsync_start - 1) | ((hsync_end - 1) << 16));
+ }
+ }
+
+ /* program TRANS_VTOTAL register */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ /*
+ * FIXME: Programing this by assuming progressive mode, since
+ * non-interlaced info from VBT is not saved inside
+ * struct drm_display_mode.
+ * For interlace mode: program required pixel minus 2
+ */
+ intel_de_write(dev_priv, VTOTAL(dsi_trans),
+ (vactive - 1) | ((vtotal - 1) << 16));
+ }
+
+ if (vsync_end < vsync_start || vsync_end > vtotal)
+ drm_err(&dev_priv->drm, "Invalid vsync_end value\n");
+
+ if (vsync_start < vactive)
+ drm_err(&dev_priv->drm, "vsync_start less than vactive\n");
+
+ /* program TRANS_VSYNC register for video mode only */
+ if (is_vid_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ intel_de_write(dev_priv, VSYNC(dsi_trans),
+ (vsync_start - 1) | ((vsync_end - 1) << 16));
+ }
+ }
+
+ /*
+ * FIXME: It has to be programmed only for video modes and interlaced
+ * modes. Put the check condition here once interlaced
+ * info available as described above.
+ * program TRANS_VSYNCSHIFT register
+ */
+ if (is_vid_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans),
+ vsync_shift);
+ }
+ }
+
+ /* program TRANS_VBLANK register, should be same as vtotal programmed */
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ intel_de_write(dev_priv, VBLANK(dsi_trans),
+ (vactive - 1) | ((vtotal - 1) << 16));
+ }
+ }
+}
+
+static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
+ tmp |= PIPECONF_ENABLE;
+ intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
+
+ /* wait for transcoder to be enabled */
+ if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans),
+ PIPECONF_STATE_ENABLE, 10))
+ drm_err(&dev_priv->drm,
+ "DSI transcoder not enabled\n");
+ }
+}
+
+static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
+
+ /*
+ * escape clock count calculation:
+ * BYTE_CLK_COUNT = TIME_NS/(8 * UI)
+ * UI (nsec) = (10^6)/Bitrate
+ * TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate
+ * ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS
+ */
+ divisor = intel_dsi_tlpx_ns(intel_dsi) * afe_clk(encoder, crtc_state) * 1000;
+ mul = 8 * 1000000;
+ hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul,
+ divisor);
+ lp_rx_timeout = DIV_ROUND_UP(intel_dsi->lp_rx_timeout * mul, divisor);
+ ta_timeout = DIV_ROUND_UP(intel_dsi->turn_arnd_val * mul, divisor);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /* program hst_tx_timeout */
+ tmp = intel_de_read(dev_priv, DSI_HSTX_TO(dsi_trans));
+ tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
+ tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
+ intel_de_write(dev_priv, DSI_HSTX_TO(dsi_trans), tmp);
+
+ /* FIXME: DSI_CALIB_TO */
+
+ /* program lp_rx_host timeout */
+ tmp = intel_de_read(dev_priv, DSI_LPRX_HOST_TO(dsi_trans));
+ tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
+ tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
+ intel_de_write(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), tmp);
+
+ /* FIXME: DSI_PWAIT_TO */
+
+ /* program turn around timeout */
+ tmp = intel_de_read(dev_priv, DSI_TA_TO(dsi_trans));
+ tmp &= ~TA_TIMEOUT_VALUE_MASK;
+ tmp |= TA_TIMEOUT_VALUE(ta_timeout);
+ intel_de_write(dev_priv, DSI_TA_TO(dsi_trans), tmp);
+ }
+}
+
+static void gen11_dsi_config_util_pin(struct intel_encoder *encoder,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ u32 tmp;
+
+ /*
+ * used as TE i/p for DSI0,
+ * for dual link/DSI1 TE is from slave DSI1
+ * through GPIO.
+ */
+ if (is_vid_mode(intel_dsi) || (intel_dsi->ports & BIT(PORT_B)))
+ return;
+
+ tmp = intel_de_read(dev_priv, UTIL_PIN_CTL);
+
+ if (enable) {
+ tmp |= UTIL_PIN_DIRECTION_INPUT;
+ tmp |= UTIL_PIN_ENABLE;
+ } else {
+ tmp &= ~UTIL_PIN_ENABLE;
+ }
+ intel_de_write(dev_priv, UTIL_PIN_CTL, tmp);
+}
+
+static void
+gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ /* step 4a: power up all lanes of the DDI used by DSI */
+ gen11_dsi_power_up_lanes(encoder);
+
+ /* step 4b: configure lane sequencing of the Combo-PHY transmitters */
+ gen11_dsi_config_phy_lanes_sequence(encoder);
+
+ /* step 4c: configure voltage swing and skew */
+ gen11_dsi_voltage_swing_program_seq(encoder);
+
+ /* enable DDI buffer */
+ gen11_dsi_enable_ddi_buffer(encoder);
+
+ /* setup D-PHY timings */
+ gen11_dsi_setup_dphy_timings(encoder, crtc_state);
+
+ /* Since transcoder is configured to take events from GPIO */
+ gen11_dsi_config_util_pin(encoder, true);
+
+ /* step 4h: setup DSI protocol timeouts */
+ gen11_dsi_setup_timeouts(encoder, crtc_state);
+
+ /* Step (4h, 4i, 4j, 4k): Configure transcoder */
+ gen11_dsi_configure_transcoder(encoder, crtc_state);
+
+ /* Step 4l: Gate DDI clocks */
+ gen11_dsi_gate_clocks(encoder);
+}
+
+static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct mipi_dsi_device *dsi;
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+ int ret;
+
+ /* set maximum return packet size */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /*
+ * FIXME: This uses the number of DW's currently in the payload
+ * receive queue. This is probably not what we want here.
+ */
+ tmp = intel_de_read(dev_priv, DSI_CMD_RXCTL(dsi_trans));
+ tmp &= NUMBER_RX_PLOAD_DW_MASK;
+ /* multiply "Number Rx Payload DW" by 4 to get max value */
+ tmp = tmp * 4;
+ dsi = intel_dsi->dsi_hosts[port]->device;
+ ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp);
+ if (ret < 0)
+ drm_err(&dev_priv->drm,
+ "error setting max return pkt size%d\n", tmp);
+ }
+
+ /* panel power on related mipi dsi vbt sequences */
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
+ msleep(intel_dsi->panel_on_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
+ /* ensure all panel commands dispatched before enabling transcoder */
+ wait_for_cmds_dispatched_to_panel(encoder);
+}
+
+static void gen11_dsi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ /* step2: enable IO power */
+ gen11_dsi_enable_io_power(encoder);
+
+ /* step3: enable DSI PLL */
+ gen11_dsi_program_esc_clk_div(encoder, crtc_state);
+}
+
+static void gen11_dsi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ /* step3b */
+ gen11_dsi_map_pll(encoder, pipe_config);
+
+ /* step4: enable DSI port and DPHY */
+ gen11_dsi_enable_port_and_phy(encoder, pipe_config);
+
+ /* step5: program and powerup panel */
+ gen11_dsi_powerup_panel(encoder);
+
+ intel_dsc_dsi_pps_write(encoder, pipe_config);
+
+ /* step6c: configure transcoder timings */
+ gen11_dsi_set_transcoder_timings(encoder, pipe_config);
+}
+
+/*
+ * Wa_1409054076:icl,jsl,ehl
+ * When pipe A is disabled and MIPI DSI is enabled on pipe B,
+ * the AMT KVMR feature will incorrectly see pipe A as enabled.
+ * Set 0x42080 bit 23=1 before enabling DSI on pipe B and leave
+ * it set while DSI is enabled on pipe B
+ */
+static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B)
+ intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
+ IGNORE_KVMR_PIPE_A,
+ enable ? IGNORE_KVMR_PIPE_A : 0);
+}
+
+/*
+ * Wa_16012360555:adl-p
+ * SW will have to program the "LP to HS Wakeup Guardband"
+ * to account for the repeaters on the HS Request/Ready
+ * PPI signaling between the Display engine and the DPHY.
+ */
+static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+
+ if (DISPLAY_VER(i915) == 13) {
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(i915, TGL_DSI_CHKN_REG(port),
+ TGL_DSI_CHKN_LSHS_GB_MASK,
+ TGL_DSI_CHKN_LSHS_GB(4));
+ }
+}
+
+static void gen11_dsi_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
+
+ drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
+
+ /* Wa_1409054076:icl,jsl,ehl */
+ icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, true);
+
+ /* Wa_16012360555:adl-p */
+ adlp_set_lp_hs_wakeup_gb(encoder);
+
+ /* step6d: enable dsi transcoder */
+ gen11_dsi_enable_transcoder(encoder);
+
+ /* step7: enable backlight */
+ intel_backlight_enable(crtc_state, conn_state);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+
+ intel_crtc_vblank_on(crtc_state);
+}
+
+static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /* disable transcoder */
+ tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
+ tmp &= ~PIPECONF_ENABLE;
+ intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
+
+ /* wait for transcoder to be disabled */
+ if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans),
+ PIPECONF_STATE_ENABLE, 50))
+ drm_err(&dev_priv->drm,
+ "DSI trancoder not disabled\n");
+ }
+}
+
+static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
+
+ /* ensure cmds dispatched to panel */
+ wait_for_cmds_dispatched_to_panel(encoder);
+}
+
+static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+
+ /* disable periodic update mode */
+ if (is_cmd_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
+ tmp &= ~DSI_PERIODIC_FRAME_UPDATE_ENABLE;
+ intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
+ }
+ }
+
+ /* put dsi link in ULPS */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans));
+ tmp |= LINK_ENTER_ULPS;
+ tmp &= ~LINK_ULPS_TYPE_LP11;
+ intel_de_write(dev_priv, DSI_LP_MSG(dsi_trans), tmp);
+
+ if (wait_for_us((intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &
+ LINK_IN_ULPS),
+ 10))
+ drm_err(&dev_priv->drm, "DSI link not in ULPS\n");
+ }
+
+ /* disable ddi function */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp &= ~TRANS_DDI_FUNC_ENABLE;
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ }
+
+ /* disable port sync mode if dual link */
+ if (intel_dsi->dual_link) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans));
+ tmp &= ~PORT_SYNC_MODE_ENABLE;
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ }
+ }
+}
+
+static void gen11_dsi_disable_port(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ u32 tmp;
+ enum port port;
+
+ gen11_dsi_ungate_clocks(encoder);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ tmp &= ~DDI_BUF_CTL_ENABLE;
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
+
+ if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
+ DDI_BUF_IS_IDLE),
+ 8))
+ drm_err(&dev_priv->drm,
+ "DDI port:%c buffer not idle\n",
+ port_name(port));
+ }
+ gen11_dsi_gate_clocks(encoder);
+}
+
+static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ u32 tmp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ intel_wakeref_t wakeref;
+
+ wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
+ intel_display_power_put(dev_priv,
+ port == PORT_A ?
+ POWER_DOMAIN_PORT_DDI_IO_A :
+ POWER_DOMAIN_PORT_DDI_IO_B,
+ wakeref);
+ }
+
+ /* set mode to DDI */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
+ tmp &= ~COMBO_PHY_MODE_DSI;
+ intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
+ }
+}
+
+static void gen11_dsi_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(old_conn_state->crtc);
+
+ /* step1: turn off backlight */
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
+ intel_backlight_disable(old_conn_state);
+
+ /* step2d,e: disable transcoder and wait */
+ gen11_dsi_disable_transcoder(encoder);
+
+ /* Wa_1409054076:icl,jsl,ehl */
+ icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, false);
+
+ /* step2f,g: powerdown panel */
+ gen11_dsi_powerdown_panel(encoder);
+
+ /* step2h,i,j: deconfig trancoder */
+ gen11_dsi_deconfigure_trancoder(encoder);
+
+ /* step3: disable port */
+ gen11_dsi_disable_port(encoder);
+
+ gen11_dsi_config_util_pin(encoder, false);
+
+ /* step4: disable IO power */
+ gen11_dsi_disable_io_power(encoder);
+}
+
+static void gen11_dsi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_dsc_disable(old_crtc_state);
+
+ skl_scaler_disable(old_crtc_state);
+}
+
+static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ enum drm_mode_status status;
+
+ status = intel_cpu_transcoder_mode_valid(i915, mode);
+ if (status != MODE_OK)
+ return status;
+
+ /* FIXME: DSC? */
+ return intel_dsi_mode_valid(connector, mode);
+}
+
+static void gen11_dsi_get_timings(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+
+ if (pipe_config->dsc.compressed_bpp) {
+ int div = pipe_config->dsc.compressed_bpp;
+ int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ adjusted_mode->crtc_htotal =
+ DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+ adjusted_mode->crtc_hsync_start =
+ DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div);
+ adjusted_mode->crtc_hsync_end =
+ DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div);
+ }
+
+ if (intel_dsi->dual_link) {
+ adjusted_mode->crtc_hdisplay *= 2;
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ adjusted_mode->crtc_hdisplay -=
+ intel_dsi->pixel_overlap;
+ adjusted_mode->crtc_htotal *= 2;
+ }
+ adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
+ adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal;
+
+ if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
+ if (intel_dsi->dual_link) {
+ adjusted_mode->crtc_hsync_start *= 2;
+ adjusted_mode->crtc_hsync_end *= 2;
+ }
+ }
+ adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
+ adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
+}
+
+static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum transcoder dsi_trans;
+ u32 val;
+
+ if (intel_dsi->ports == BIT(PORT_B))
+ dsi_trans = TRANSCODER_DSI_1;
+ else
+ dsi_trans = TRANSCODER_DSI_0;
+
+ val = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans));
+ return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE);
+}
+
+static void gen11_dsi_get_cmd_mode_config(struct intel_dsi *intel_dsi,
+ struct intel_crtc_state *pipe_config)
+{
+ if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A)))
+ pipe_config->mode_flags |= I915_MODE_FLAG_DSI_USE_TE1 |
+ I915_MODE_FLAG_DSI_USE_TE0;
+ else if (intel_dsi->ports == BIT(PORT_B))
+ pipe_config->mode_flags |= I915_MODE_FLAG_DSI_USE_TE1;
+ else
+ pipe_config->mode_flags |= I915_MODE_FLAG_DSI_USE_TE0;
+}
+
+static void gen11_dsi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+ intel_ddi_get_clock(encoder, pipe_config, icl_ddi_combo_get_pll(encoder));
+
+ pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk;
+ if (intel_dsi->dual_link)
+ pipe_config->hw.adjusted_mode.crtc_clock *= 2;
+
+ gen11_dsi_get_timings(encoder, pipe_config);
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
+ pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+
+ /* Get the details on which TE should be enabled */
+ if (is_cmd_mode(intel_dsi))
+ gen11_dsi_get_cmd_mode_config(intel_dsi, pipe_config);
+
+ if (gen11_dsi_is_periodic_cmd_mode(intel_dsi))
+ pipe_config->mode_flags |= I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
+}
+
+static void gen11_dsi_sync_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *intel_crtc;
+ enum pipe pipe;
+
+ if (!crtc_state)
+ return;
+
+ intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ pipe = intel_crtc->pipe;
+
+ /* wa verify 1409054076:icl,jsl,ehl */
+ if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B &&
+ !(intel_de_read(dev_priv, CHICKEN_PAR1_1) & IGNORE_KVMR_PIPE_A))
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] BIOS left IGNORE_KVMR_PIPE_A cleared with pipe B enabled\n",
+ encoder->base.base.id,
+ encoder->base.name);
+}
+
+static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ int dsc_max_bpc = DISPLAY_VER(dev_priv) >= 12 ? 12 : 10;
+ bool use_dsc;
+ int ret;
+
+ use_dsc = intel_bios_get_dsc_params(encoder, crtc_state, dsc_max_bpc);
+ if (!use_dsc)
+ return 0;
+
+ if (crtc_state->pipe_bpp < 8 * 3)
+ return -EINVAL;
+
+ /* FIXME: split only when necessary */
+ if (crtc_state->dsc.slice_count > 1)
+ crtc_state->dsc.dsc_split = true;
+
+ vdsc_cfg->convert_rgb = true;
+
+ /* FIXME: initialize from VBT */
+ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+
+ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+
+ ret = intel_dsc_compute_params(crtc_state);
+ if (ret)
+ return ret;
+
+ /* DSI specific sanity checks on the common code */
+ drm_WARN_ON(&dev_priv->drm, vdsc_cfg->vbr_enable);
+ drm_WARN_ON(&dev_priv->drm, vdsc_cfg->simple_422);
+ drm_WARN_ON(&dev_priv->drm,
+ vdsc_cfg->pic_width % vdsc_cfg->slice_width);
+ drm_WARN_ON(&dev_priv->drm, vdsc_cfg->slice_height < 8);
+ drm_WARN_ON(&dev_priv->drm,
+ vdsc_cfg->pic_height % vdsc_cfg->slice_height);
+
+ ret = drm_dsc_compute_rc_parameters(vdsc_cfg);
+ if (ret)
+ return ret;
+
+ crtc_state->dsc.compression_enable = true;
+
+ return 0;
+}
+
+static int gen11_dsi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+ base);
+ struct intel_connector *intel_connector = intel_dsi->attached_connector;
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+ int ret;
+
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
+ ret = intel_panel_compute_config(intel_connector, adjusted_mode);
+ if (ret)
+ return ret;
+
+ ret = intel_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
+
+ adjusted_mode->flags = 0;
+
+ /* Dual link goes to trancoder DSI'0' */
+ if (intel_dsi->ports == BIT(PORT_B))
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_1;
+ else
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_0;
+
+ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB888)
+ pipe_config->pipe_bpp = 24;
+ else
+ pipe_config->pipe_bpp = 18;
+
+ pipe_config->clock_set = true;
+
+ if (gen11_dsi_dsc_compute_config(encoder, pipe_config))
+ drm_dbg_kms(&i915->drm, "Attempting to use DSC failed\n");
+
+ pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5;
+
+ /*
+ * In case of TE GATE cmd mode, we
+ * receive TE from the slave if
+ * dual link is enabled
+ */
+ if (is_cmd_mode(intel_dsi))
+ gen11_dsi_get_cmd_mode_config(intel_dsi, pipe_config);
+
+ return 0;
+}
+
+static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ get_dsi_io_power_domains(i915,
+ enc_to_intel_dsi(encoder));
+}
+
+static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum transcoder dsi_trans;
+ intel_wakeref_t wakeref;
+ enum port port;
+ bool ret = false;
+ u32 tmp;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
+ return false;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ *pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ *pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ *pipe = PIPE_C;
+ break;
+ case TRANS_DDI_EDP_INPUT_D_ONOFF:
+ *pipe = PIPE_D;
+ break;
+ default:
+ drm_err(&dev_priv->drm, "Invalid PIPE input\n");
+ goto out;
+ }
+
+ tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
+ ret = tmp & PIPECONF_ENABLE;
+ }
+out:
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+ return ret;
+}
+
+static bool gen11_dsi_initial_fastset_check(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->dsc.compression_enable) {
+ drm_dbg_kms(encoder->base.dev, "Forcing full modeset due to DSC being enabled\n");
+ crtc_state->uapi.mode_changed = true;
+
+ return false;
+ }
+
+ return true;
+}
+
+static void gen11_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+ intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = {
+ .destroy = gen11_dsi_encoder_destroy,
+};
+
+static const struct drm_connector_funcs gen11_dsi_connector_funcs = {
+ .detect = intel_panel_detect,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
+ .destroy = intel_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_digital_connector_atomic_get_property,
+ .atomic_set_property = intel_digital_connector_atomic_set_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = {
+ .get_modes = intel_dsi_get_modes,
+ .mode_valid = gen11_dsi_mode_valid,
+ .atomic_check = intel_digital_connector_atomic_check,
+};
+
+static int gen11_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static int gen11_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
+ struct mipi_dsi_packet dsi_pkt;
+ ssize_t ret;
+ bool enable_lpdt = false;
+
+ ret = mipi_dsi_create_packet(&dsi_pkt, msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+ enable_lpdt = true;
+
+ /* only long packet contains payload */
+ if (mipi_dsi_packet_format_is_long(msg->type)) {
+ ret = dsi_send_pkt_payld(intel_dsi_host, &dsi_pkt);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* send packet header */
+ ret = dsi_send_pkt_hdr(intel_dsi_host, &dsi_pkt, enable_lpdt);
+ if (ret < 0)
+ return ret;
+
+ //TODO: add payload receive code if needed
+
+ ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length;
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
+ .attach = gen11_dsi_host_attach,
+ .detach = gen11_dsi_host_detach,
+ .transfer = gen11_dsi_host_transfer,
+};
+
+#define ICL_PREPARE_CNT_MAX 0x7
+#define ICL_CLK_ZERO_CNT_MAX 0xf
+#define ICL_TRAIL_CNT_MAX 0x7
+#define ICL_TCLK_PRE_CNT_MAX 0x3
+#define ICL_TCLK_POST_CNT_MAX 0x7
+#define ICL_HS_ZERO_CNT_MAX 0xf
+#define ICL_EXIT_ZERO_CNT_MAX 0x7
+
+static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
+ u32 tlpx_ns;
+ u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+ u32 ths_prepare_ns, tclk_trail_ns;
+ u32 hs_zero_cnt;
+ u32 tclk_pre_cnt, tclk_post_cnt;
+
+ tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
+
+ tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
+ ths_prepare_ns = max(mipi_config->ths_prepare,
+ mipi_config->tclk_prepare);
+
+ /*
+ * prepare cnt in escape clocks
+ * this field represents a hexadecimal value with a precision
+ * of 1.2 – i.e. the most significant bit is the integer
+ * and the least significant 2 bits are fraction bits.
+ * so, the field can represent a range of 0.25 to 1.75
+ */
+ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
+ if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
+ drm_dbg_kms(&dev_priv->drm, "prepare_cnt out of range (%d)\n",
+ prepare_cnt);
+ prepare_cnt = ICL_PREPARE_CNT_MAX;
+ }
+
+ /* clk zero count in escape clocks */
+ clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
+ ths_prepare_ns, tlpx_ns);
+ if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
+ drm_dbg_kms(&dev_priv->drm,
+ "clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
+ clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
+ }
+
+ /* trail cnt in escape clocks*/
+ trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
+ if (trail_cnt > ICL_TRAIL_CNT_MAX) {
+ drm_dbg_kms(&dev_priv->drm, "trail_cnt out of range (%d)\n",
+ trail_cnt);
+ trail_cnt = ICL_TRAIL_CNT_MAX;
+ }
+
+ /* tclk pre count in escape clocks */
+ tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
+ if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
+ drm_dbg_kms(&dev_priv->drm,
+ "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
+ tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
+ }
+
+ /* tclk post count in escape clocks */
+ tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
+ if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
+ drm_dbg_kms(&dev_priv->drm,
+ "tclk_post_cnt out of range (%d)\n",
+ tclk_post_cnt);
+ tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
+ }
+
+ /* hs zero cnt in escape clocks */
+ hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
+ ths_prepare_ns, tlpx_ns);
+ if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
+ drm_dbg_kms(&dev_priv->drm, "hs_zero_cnt out of range (%d)\n",
+ hs_zero_cnt);
+ hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
+ }
+
+ /* hs exit zero cnt in escape clocks */
+ exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
+ if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
+ drm_dbg_kms(&dev_priv->drm,
+ "exit_zero_cnt out of range (%d)\n",
+ exit_zero_cnt);
+ exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
+ }
+
+ /* clock lane dphy timings */
+ intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
+ CLK_PREPARE(prepare_cnt) |
+ CLK_ZERO_OVERRIDE |
+ CLK_ZERO(clk_zero_cnt) |
+ CLK_PRE_OVERRIDE |
+ CLK_PRE(tclk_pre_cnt) |
+ CLK_POST_OVERRIDE |
+ CLK_POST(tclk_post_cnt) |
+ CLK_TRAIL_OVERRIDE |
+ CLK_TRAIL(trail_cnt));
+
+ /* data lanes dphy timings */
+ intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
+ HS_PREPARE(prepare_cnt) |
+ HS_ZERO_OVERRIDE |
+ HS_ZERO(hs_zero_cnt) |
+ HS_TRAIL_OVERRIDE |
+ HS_TRAIL(trail_cnt) |
+ HS_EXIT_OVERRIDE |
+ HS_EXIT(exit_zero_cnt));
+
+ intel_dsi_log_params(intel_dsi);
+}
+
+static void icl_dsi_add_properties(struct intel_connector *connector)
+{
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_preferred_fixed_mode(connector);
+ u32 allowed_scalers;
+
+ allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) |
+ BIT(DRM_MODE_SCALE_FULLSCREEN) |
+ BIT(DRM_MODE_SCALE_CENTER);
+
+ drm_connector_attach_scaling_mode_property(&connector->base,
+ allowed_scalers);
+
+ connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+
+ drm_connector_set_panel_orientation_with_quirk(&connector->base,
+ intel_dsi_get_panel_orientation(connector),
+ fixed_mode->hdisplay,
+ fixed_mode->vdisplay);
+}
+
+void icl_dsi_init(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_dsi *intel_dsi;
+ struct intel_encoder *encoder;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ enum port port;
+
+ if (!intel_bios_is_dsi_present(dev_priv, &port))
+ return;
+
+ intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+ if (!intel_dsi)
+ return;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
+ kfree(intel_dsi);
+ return;
+ }
+
+ encoder = &intel_dsi->base;
+ intel_dsi->attached_connector = intel_connector;
+ connector = &intel_connector->base;
+
+ /* register DSI encoder with DRM subsystem */
+ drm_encoder_init(dev, &encoder->base, &gen11_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
+
+ encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
+ encoder->pre_enable = gen11_dsi_pre_enable;
+ encoder->enable = gen11_dsi_enable;
+ encoder->disable = gen11_dsi_disable;
+ encoder->post_disable = gen11_dsi_post_disable;
+ encoder->port = port;
+ encoder->get_config = gen11_dsi_get_config;
+ encoder->sync_state = gen11_dsi_sync_state;
+ encoder->update_pipe = intel_backlight_update;
+ encoder->compute_config = gen11_dsi_compute_config;
+ encoder->get_hw_state = gen11_dsi_get_hw_state;
+ encoder->initial_fastset_check = gen11_dsi_initial_fastset_check;
+ encoder->type = INTEL_OUTPUT_DSI;
+ encoder->cloneable = 0;
+ encoder->pipe_mask = ~0;
+ encoder->power_domain = POWER_DOMAIN_PORT_DSI;
+ encoder->get_power_domains = gen11_dsi_get_power_domains;
+ encoder->disable_clock = gen11_dsi_gate_clocks;
+ encoder->is_clock_enabled = gen11_dsi_is_clock_enabled;
+
+ /* register DSI connector with DRM subsystem */
+ drm_connector_init(dev, connector, &gen11_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ /* attach connector to encoder */
+ intel_connector_attach_encoder(intel_connector, encoder);
+
+ encoder->devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+ intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL);
+
+ mutex_lock(&dev->mode_config.mutex);
+ intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (!intel_panel_preferred_fixed_mode(intel_connector)) {
+ drm_err(&dev_priv->drm, "DSI fixed mode info missing\n");
+ goto err;
+ }
+
+ intel_panel_init(intel_connector);
+
+ intel_backlight_setup(intel_connector, INVALID_PIPE);
+
+ if (intel_connector->panel.vbt.dsi.config->dual_link)
+ intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
+ else
+ intel_dsi->ports = BIT(port);
+
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ struct intel_dsi_host *host;
+
+ host = intel_dsi_host_init(intel_dsi, &gen11_dsi_host_ops, port);
+ if (!host)
+ goto err;
+
+ intel_dsi->dsi_hosts[port] = host;
+ }
+
+ if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
+ drm_dbg_kms(&dev_priv->drm, "no device found\n");
+ goto err;
+ }
+
+ icl_dphy_param_init(intel_dsi);
+
+ icl_dsi_add_properties(intel_connector);
+ return;
+
+err:
+ drm_connector_cleanup(connector);
+ drm_encoder_cleanup(&encoder->base);
+ kfree(intel_dsi);
+ kfree(intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.h b/drivers/gpu/drm/i915/display/icl_dsi.h
new file mode 100644
index 000000000..b4861b56b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/icl_dsi.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __ICL_DSI_H__
+#define __ICL_DSI_H__
+
+struct drm_i915_private;
+struct intel_crtc_state;
+
+void icl_dsi_init(struct drm_i915_private *i915);
+void icl_dsi_frame_update(struct intel_crtc_state *crtc_state);
+
+#endif /* __ICL_DSI_H__ */
diff --git a/drivers/gpu/drm/i915/display/icl_dsi_regs.h b/drivers/gpu/drm/i915/display/icl_dsi_regs.h
new file mode 100644
index 000000000..f78f28b8d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/icl_dsi_regs.h
@@ -0,0 +1,342 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __ICL_DSI_REGS_H__
+#define __ICL_DSI_REGS_H__
+
+#include "i915_reg_defs.h"
+
+/* Gen11 DSI */
+#define _MMIO_DSI(tc, dsi0, dsi1) _MMIO_TRANS((tc) - TRANSCODER_DSI_0, \
+ dsi0, dsi1)
+#define _ICL_DSI_ESC_CLK_DIV0 0x6b090
+#define _ICL_DSI_ESC_CLK_DIV1 0x6b890
+#define ICL_DSI_ESC_CLK_DIV(port) _MMIO_PORT((port), \
+ _ICL_DSI_ESC_CLK_DIV0, \
+ _ICL_DSI_ESC_CLK_DIV1)
+#define _ICL_DPHY_ESC_CLK_DIV0 0x162190
+#define _ICL_DPHY_ESC_CLK_DIV1 0x6C190
+#define ICL_DPHY_ESC_CLK_DIV(port) _MMIO_PORT((port), \
+ _ICL_DPHY_ESC_CLK_DIV0, \
+ _ICL_DPHY_ESC_CLK_DIV1)
+#define ICL_BYTE_CLK_PER_ESC_CLK_MASK (0x1f << 16)
+#define ICL_BYTE_CLK_PER_ESC_CLK_SHIFT 16
+#define ICL_ESC_CLK_DIV_MASK 0x1ff
+#define ICL_ESC_CLK_DIV_SHIFT 0
+#define DSI_MAX_ESC_CLK 20000 /* in KHz */
+
+#define _ADL_MIPIO_REG 0x180
+#define ADL_MIPIO_DW(port, dw) _MMIO(_ICL_COMBOPHY(port) + _ADL_MIPIO_REG + 4 * (dw))
+#define TX_ESC_CLK_DIV_PHY_SEL REGBIT(16)
+#define TX_ESC_CLK_DIV_PHY_MASK REG_GENMASK(23, 16)
+#define TX_ESC_CLK_DIV_PHY REG_FIELD_PREP(TX_ESC_CLK_DIV_PHY_MASK, 0x7f)
+
+#define _DSI_CMD_FRMCTL_0 0x6b034
+#define _DSI_CMD_FRMCTL_1 0x6b834
+#define DSI_CMD_FRMCTL(port) _MMIO_PORT(port, \
+ _DSI_CMD_FRMCTL_0,\
+ _DSI_CMD_FRMCTL_1)
+#define DSI_FRAME_UPDATE_REQUEST (1 << 31)
+#define DSI_PERIODIC_FRAME_UPDATE_ENABLE (1 << 29)
+#define DSI_NULL_PACKET_ENABLE (1 << 28)
+#define DSI_FRAME_IN_PROGRESS (1 << 0)
+
+#define _DSI_INTR_MASK_REG_0 0x6b070
+#define _DSI_INTR_MASK_REG_1 0x6b870
+#define DSI_INTR_MASK_REG(port) _MMIO_PORT(port, \
+ _DSI_INTR_MASK_REG_0,\
+ _DSI_INTR_MASK_REG_1)
+
+#define _DSI_INTR_IDENT_REG_0 0x6b074
+#define _DSI_INTR_IDENT_REG_1 0x6b874
+#define DSI_INTR_IDENT_REG(port) _MMIO_PORT(port, \
+ _DSI_INTR_IDENT_REG_0,\
+ _DSI_INTR_IDENT_REG_1)
+#define DSI_TE_EVENT (1 << 31)
+#define DSI_RX_DATA_OR_BTA_TERMINATED (1 << 30)
+#define DSI_TX_DATA (1 << 29)
+#define DSI_ULPS_ENTRY_DONE (1 << 28)
+#define DSI_NON_TE_TRIGGER_RECEIVED (1 << 27)
+#define DSI_HOST_CHKSUM_ERROR (1 << 26)
+#define DSI_HOST_MULTI_ECC_ERROR (1 << 25)
+#define DSI_HOST_SINGL_ECC_ERROR (1 << 24)
+#define DSI_HOST_CONTENTION_DETECTED (1 << 23)
+#define DSI_HOST_FALSE_CONTROL_ERROR (1 << 22)
+#define DSI_HOST_TIMEOUT_ERROR (1 << 21)
+#define DSI_HOST_LOW_POWER_TX_SYNC_ERROR (1 << 20)
+#define DSI_HOST_ESCAPE_MODE_ENTRY_ERROR (1 << 19)
+#define DSI_FRAME_UPDATE_DONE (1 << 16)
+#define DSI_PROTOCOL_VIOLATION_REPORTED (1 << 15)
+#define DSI_INVALID_TX_LENGTH (1 << 13)
+#define DSI_INVALID_VC (1 << 12)
+#define DSI_INVALID_DATA_TYPE (1 << 11)
+#define DSI_PERIPHERAL_CHKSUM_ERROR (1 << 10)
+#define DSI_PERIPHERAL_MULTI_ECC_ERROR (1 << 9)
+#define DSI_PERIPHERAL_SINGLE_ECC_ERROR (1 << 8)
+#define DSI_PERIPHERAL_CONTENTION_DETECTED (1 << 7)
+#define DSI_PERIPHERAL_FALSE_CTRL_ERROR (1 << 6)
+#define DSI_PERIPHERAL_TIMEOUT_ERROR (1 << 5)
+#define DSI_PERIPHERAL_LP_TX_SYNC_ERROR (1 << 4)
+#define DSI_PERIPHERAL_ESC_MODE_ENTRY_CMD_ERR (1 << 3)
+#define DSI_EOT_SYNC_ERROR (1 << 2)
+#define DSI_SOT_SYNC_ERROR (1 << 1)
+#define DSI_SOT_ERROR (1 << 0)
+
+/* ICL DSI MODE control */
+#define _ICL_DSI_IO_MODECTL_0 0x6B094
+#define _ICL_DSI_IO_MODECTL_1 0x6B894
+#define ICL_DSI_IO_MODECTL(port) _MMIO_PORT(port, \
+ _ICL_DSI_IO_MODECTL_0, \
+ _ICL_DSI_IO_MODECTL_1)
+#define COMBO_PHY_MODE_DSI (1 << 0)
+
+/* TGL DSI Chicken register */
+#define _TGL_DSI_CHKN_REG_0 0x6B0C0
+#define _TGL_DSI_CHKN_REG_1 0x6B8C0
+#define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \
+ _TGL_DSI_CHKN_REG_0, \
+ _TGL_DSI_CHKN_REG_1)
+#define TGL_DSI_CHKN_LSHS_GB_MASK REG_GENMASK(15, 12)
+#define TGL_DSI_CHKN_LSHS_GB(byte_clocks) REG_FIELD_PREP(TGL_DSI_CHKN_LSHS_GB_MASK, \
+ (byte_clocks))
+#define _ICL_DSI_T_INIT_MASTER_0 0x6b088
+#define _ICL_DSI_T_INIT_MASTER_1 0x6b888
+#define ICL_DSI_T_INIT_MASTER(port) _MMIO_PORT(port, \
+ _ICL_DSI_T_INIT_MASTER_0,\
+ _ICL_DSI_T_INIT_MASTER_1)
+#define DSI_T_INIT_MASTER_MASK REG_GENMASK(15, 0)
+
+#define _DPHY_CLK_TIMING_PARAM_0 0x162180
+#define _DPHY_CLK_TIMING_PARAM_1 0x6c180
+#define DPHY_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DPHY_CLK_TIMING_PARAM_0,\
+ _DPHY_CLK_TIMING_PARAM_1)
+#define _DSI_CLK_TIMING_PARAM_0 0x6b080
+#define _DSI_CLK_TIMING_PARAM_1 0x6b880
+#define DSI_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DSI_CLK_TIMING_PARAM_0,\
+ _DSI_CLK_TIMING_PARAM_1)
+#define CLK_PREPARE_OVERRIDE (1 << 31)
+#define CLK_PREPARE(x) ((x) << 28)
+#define CLK_PREPARE_MASK (0x7 << 28)
+#define CLK_PREPARE_SHIFT 28
+#define CLK_ZERO_OVERRIDE (1 << 27)
+#define CLK_ZERO(x) ((x) << 20)
+#define CLK_ZERO_MASK (0xf << 20)
+#define CLK_ZERO_SHIFT 20
+#define CLK_PRE_OVERRIDE (1 << 19)
+#define CLK_PRE(x) ((x) << 16)
+#define CLK_PRE_MASK (0x3 << 16)
+#define CLK_PRE_SHIFT 16
+#define CLK_POST_OVERRIDE (1 << 15)
+#define CLK_POST(x) ((x) << 8)
+#define CLK_POST_MASK (0x7 << 8)
+#define CLK_POST_SHIFT 8
+#define CLK_TRAIL_OVERRIDE (1 << 7)
+#define CLK_TRAIL(x) ((x) << 0)
+#define CLK_TRAIL_MASK (0xf << 0)
+#define CLK_TRAIL_SHIFT 0
+
+#define _DPHY_DATA_TIMING_PARAM_0 0x162184
+#define _DPHY_DATA_TIMING_PARAM_1 0x6c184
+#define DPHY_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DPHY_DATA_TIMING_PARAM_0,\
+ _DPHY_DATA_TIMING_PARAM_1)
+#define _DSI_DATA_TIMING_PARAM_0 0x6B084
+#define _DSI_DATA_TIMING_PARAM_1 0x6B884
+#define DSI_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DSI_DATA_TIMING_PARAM_0,\
+ _DSI_DATA_TIMING_PARAM_1)
+#define HS_PREPARE_OVERRIDE (1 << 31)
+#define HS_PREPARE(x) ((x) << 24)
+#define HS_PREPARE_MASK (0x7 << 24)
+#define HS_PREPARE_SHIFT 24
+#define HS_ZERO_OVERRIDE (1 << 23)
+#define HS_ZERO(x) ((x) << 16)
+#define HS_ZERO_MASK (0xf << 16)
+#define HS_ZERO_SHIFT 16
+#define HS_TRAIL_OVERRIDE (1 << 15)
+#define HS_TRAIL(x) ((x) << 8)
+#define HS_TRAIL_MASK (0x7 << 8)
+#define HS_TRAIL_SHIFT 8
+#define HS_EXIT_OVERRIDE (1 << 7)
+#define HS_EXIT(x) ((x) << 0)
+#define HS_EXIT_MASK (0x7 << 0)
+#define HS_EXIT_SHIFT 0
+
+#define _DPHY_TA_TIMING_PARAM_0 0x162188
+#define _DPHY_TA_TIMING_PARAM_1 0x6c188
+#define DPHY_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DPHY_TA_TIMING_PARAM_0,\
+ _DPHY_TA_TIMING_PARAM_1)
+#define _DSI_TA_TIMING_PARAM_0 0x6b098
+#define _DSI_TA_TIMING_PARAM_1 0x6b898
+#define DSI_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DSI_TA_TIMING_PARAM_0,\
+ _DSI_TA_TIMING_PARAM_1)
+#define TA_SURE_OVERRIDE (1 << 31)
+#define TA_SURE(x) ((x) << 16)
+#define TA_SURE_MASK (0x1f << 16)
+#define TA_SURE_SHIFT 16
+#define TA_GO_OVERRIDE (1 << 15)
+#define TA_GO(x) ((x) << 8)
+#define TA_GO_MASK (0xf << 8)
+#define TA_GO_SHIFT 8
+#define TA_GET_OVERRIDE (1 << 7)
+#define TA_GET(x) ((x) << 0)
+#define TA_GET_MASK (0xf << 0)
+#define TA_GET_SHIFT 0
+
+/* DSI transcoder configuration */
+#define _DSI_TRANS_FUNC_CONF_0 0x6b030
+#define _DSI_TRANS_FUNC_CONF_1 0x6b830
+#define DSI_TRANS_FUNC_CONF(tc) _MMIO_DSI(tc, \
+ _DSI_TRANS_FUNC_CONF_0,\
+ _DSI_TRANS_FUNC_CONF_1)
+#define OP_MODE_MASK (0x3 << 28)
+#define OP_MODE_SHIFT 28
+#define CMD_MODE_NO_GATE (0x0 << 28)
+#define CMD_MODE_TE_GATE (0x1 << 28)
+#define VIDEO_MODE_SYNC_EVENT (0x2 << 28)
+#define VIDEO_MODE_SYNC_PULSE (0x3 << 28)
+#define TE_SOURCE_GPIO (1 << 27)
+#define LINK_READY (1 << 20)
+#define PIX_FMT_MASK (0x3 << 16)
+#define PIX_FMT_SHIFT 16
+#define PIX_FMT_RGB565 (0x0 << 16)
+#define PIX_FMT_RGB666_PACKED (0x1 << 16)
+#define PIX_FMT_RGB666_LOOSE (0x2 << 16)
+#define PIX_FMT_RGB888 (0x3 << 16)
+#define PIX_FMT_RGB101010 (0x4 << 16)
+#define PIX_FMT_RGB121212 (0x5 << 16)
+#define PIX_FMT_COMPRESSED (0x6 << 16)
+#define BGR_TRANSMISSION (1 << 15)
+#define PIX_VIRT_CHAN(x) ((x) << 12)
+#define PIX_VIRT_CHAN_MASK (0x3 << 12)
+#define PIX_VIRT_CHAN_SHIFT 12
+#define PIX_BUF_THRESHOLD_MASK (0x3 << 10)
+#define PIX_BUF_THRESHOLD_SHIFT 10
+#define PIX_BUF_THRESHOLD_1_4 (0x0 << 10)
+#define PIX_BUF_THRESHOLD_1_2 (0x1 << 10)
+#define PIX_BUF_THRESHOLD_3_4 (0x2 << 10)
+#define PIX_BUF_THRESHOLD_FULL (0x3 << 10)
+#define CONTINUOUS_CLK_MASK (0x3 << 8)
+#define CONTINUOUS_CLK_SHIFT 8
+#define CLK_ENTER_LP_AFTER_DATA (0x0 << 8)
+#define CLK_HS_OR_LP (0x2 << 8)
+#define CLK_HS_CONTINUOUS (0x3 << 8)
+#define LINK_CALIBRATION_MASK (0x3 << 4)
+#define LINK_CALIBRATION_SHIFT 4
+#define CALIBRATION_DISABLED (0x0 << 4)
+#define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4)
+#define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4)
+#define BLANKING_PACKET_ENABLE (1 << 2)
+#define S3D_ORIENTATION_LANDSCAPE (1 << 1)
+#define EOTP_DISABLED (1 << 0)
+
+#define _DSI_CMD_RXCTL_0 0x6b0d4
+#define _DSI_CMD_RXCTL_1 0x6b8d4
+#define DSI_CMD_RXCTL(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_RXCTL_0,\
+ _DSI_CMD_RXCTL_1)
+#define READ_UNLOADS_DW (1 << 16)
+#define RECEIVED_UNASSIGNED_TRIGGER (1 << 15)
+#define RECEIVED_ACKNOWLEDGE_TRIGGER (1 << 14)
+#define RECEIVED_TEAR_EFFECT_TRIGGER (1 << 13)
+#define RECEIVED_RESET_TRIGGER (1 << 12)
+#define RECEIVED_PAYLOAD_WAS_LOST (1 << 11)
+#define RECEIVED_CRC_WAS_LOST (1 << 10)
+#define NUMBER_RX_PLOAD_DW_MASK (0xff << 0)
+#define NUMBER_RX_PLOAD_DW_SHIFT 0
+
+#define _DSI_CMD_TXCTL_0 0x6b0d0
+#define _DSI_CMD_TXCTL_1 0x6b8d0
+#define DSI_CMD_TXCTL(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_TXCTL_0,\
+ _DSI_CMD_TXCTL_1)
+#define KEEP_LINK_IN_HS (1 << 24)
+#define FREE_HEADER_CREDIT_MASK (0x1f << 8)
+#define FREE_HEADER_CREDIT_SHIFT 0x8
+#define FREE_PLOAD_CREDIT_MASK (0xff << 0)
+#define FREE_PLOAD_CREDIT_SHIFT 0
+#define MAX_HEADER_CREDIT 0x10
+#define MAX_PLOAD_CREDIT 0x40
+
+#define _DSI_CMD_TXHDR_0 0x6b100
+#define _DSI_CMD_TXHDR_1 0x6b900
+#define DSI_CMD_TXHDR(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_TXHDR_0,\
+ _DSI_CMD_TXHDR_1)
+#define PAYLOAD_PRESENT (1 << 31)
+#define LP_DATA_TRANSFER (1 << 30)
+#define VBLANK_FENCE (1 << 29)
+#define PARAM_WC_MASK (0xffff << 8)
+#define PARAM_WC_LOWER_SHIFT 8
+#define PARAM_WC_UPPER_SHIFT 16
+#define VC_MASK (0x3 << 6)
+#define VC_SHIFT 6
+#define DT_MASK (0x3f << 0)
+#define DT_SHIFT 0
+
+#define _DSI_CMD_TXPYLD_0 0x6b104
+#define _DSI_CMD_TXPYLD_1 0x6b904
+#define DSI_CMD_TXPYLD(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_TXPYLD_0,\
+ _DSI_CMD_TXPYLD_1)
+
+#define _DSI_LP_MSG_0 0x6b0d8
+#define _DSI_LP_MSG_1 0x6b8d8
+#define DSI_LP_MSG(tc) _MMIO_DSI(tc, \
+ _DSI_LP_MSG_0,\
+ _DSI_LP_MSG_1)
+#define LPTX_IN_PROGRESS (1 << 17)
+#define LINK_IN_ULPS (1 << 16)
+#define LINK_ULPS_TYPE_LP11 (1 << 8)
+#define LINK_ENTER_ULPS (1 << 0)
+
+/* DSI timeout registers */
+#define _DSI_HSTX_TO_0 0x6b044
+#define _DSI_HSTX_TO_1 0x6b844
+#define DSI_HSTX_TO(tc) _MMIO_DSI(tc, \
+ _DSI_HSTX_TO_0,\
+ _DSI_HSTX_TO_1)
+#define HSTX_TIMEOUT_VALUE_MASK (0xffff << 16)
+#define HSTX_TIMEOUT_VALUE_SHIFT 16
+#define HSTX_TIMEOUT_VALUE(x) ((x) << 16)
+#define HSTX_TIMED_OUT (1 << 0)
+
+#define _DSI_LPRX_HOST_TO_0 0x6b048
+#define _DSI_LPRX_HOST_TO_1 0x6b848
+#define DSI_LPRX_HOST_TO(tc) _MMIO_DSI(tc, \
+ _DSI_LPRX_HOST_TO_0,\
+ _DSI_LPRX_HOST_TO_1)
+#define LPRX_TIMED_OUT (1 << 16)
+#define LPRX_TIMEOUT_VALUE_MASK (0xffff << 0)
+#define LPRX_TIMEOUT_VALUE_SHIFT 0
+#define LPRX_TIMEOUT_VALUE(x) ((x) << 0)
+
+#define _DSI_PWAIT_TO_0 0x6b040
+#define _DSI_PWAIT_TO_1 0x6b840
+#define DSI_PWAIT_TO(tc) _MMIO_DSI(tc, \
+ _DSI_PWAIT_TO_0,\
+ _DSI_PWAIT_TO_1)
+#define PRESET_TIMEOUT_VALUE_MASK (0xffff << 16)
+#define PRESET_TIMEOUT_VALUE_SHIFT 16
+#define PRESET_TIMEOUT_VALUE(x) ((x) << 16)
+#define PRESPONSE_TIMEOUT_VALUE_MASK (0xffff << 0)
+#define PRESPONSE_TIMEOUT_VALUE_SHIFT 0
+#define PRESPONSE_TIMEOUT_VALUE(x) ((x) << 0)
+
+#define _DSI_TA_TO_0 0x6b04c
+#define _DSI_TA_TO_1 0x6b84c
+#define DSI_TA_TO(tc) _MMIO_DSI(tc, \
+ _DSI_TA_TO_0,\
+ _DSI_TA_TO_1)
+#define TA_TIMED_OUT (1 << 16)
+#define TA_TIMEOUT_VALUE_MASK (0xffff << 0)
+#define TA_TIMEOUT_VALUE_SHIFT 0
+#define TA_TIMEOUT_VALUE(x) ((x) << 0)
+
+#endif /* __ICL_DSI_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
new file mode 100644
index 000000000..9df78e7ca
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel ACPI functions
+ *
+ * _DSM related code stolen from nouveau_acpi.c.
+ */
+
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <acpi/video.h>
+
+#include "i915_drv.h"
+#include "intel_acpi.h"
+#include "intel_display_types.h"
+
+#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
+#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
+
+static const guid_t intel_dsm_guid =
+ GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f,
+ 0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c);
+
+#define INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED 0 /* No args */
+
+static const guid_t intel_dsm_guid2 =
+ GUID_INIT(0x3e5b41c6, 0xeb1d, 0x4260,
+ 0x9d, 0x15, 0xc7, 0x1f, 0xba, 0xda, 0xe4, 0x14);
+
+static char *intel_dsm_port_name(u8 id)
+{
+ switch (id) {
+ case 0:
+ return "Reserved";
+ case 1:
+ return "Analog VGA";
+ case 2:
+ return "LVDS";
+ case 3:
+ return "Reserved";
+ case 4:
+ return "HDMI/DVI_B";
+ case 5:
+ return "HDMI/DVI_C";
+ case 6:
+ return "HDMI/DVI_D";
+ case 7:
+ return "DisplayPort_A";
+ case 8:
+ return "DisplayPort_B";
+ case 9:
+ return "DisplayPort_C";
+ case 0xa:
+ return "DisplayPort_D";
+ case 0xb:
+ case 0xc:
+ case 0xd:
+ return "Reserved";
+ case 0xe:
+ return "WiDi";
+ default:
+ return "bad type";
+ }
+}
+
+static char *intel_dsm_mux_type(u8 type)
+{
+ switch (type) {
+ case 0:
+ return "unknown";
+ case 1:
+ return "No MUX, iGPU only";
+ case 2:
+ return "No MUX, dGPU only";
+ case 3:
+ return "MUXed between iGPU and dGPU";
+ default:
+ return "bad type";
+ }
+}
+
+static void intel_dsm_platform_mux_info(acpi_handle dhandle)
+{
+ int i;
+ union acpi_object *pkg, *connector_count;
+
+ pkg = acpi_evaluate_dsm_typed(dhandle, &intel_dsm_guid,
+ INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!pkg) {
+ DRM_DEBUG_DRIVER("failed to evaluate _DSM\n");
+ return;
+ }
+
+ if (!pkg->package.count) {
+ DRM_DEBUG_DRIVER("no connection in _DSM\n");
+ return;
+ }
+
+ connector_count = &pkg->package.elements[0];
+ DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
+ (unsigned long long)connector_count->integer.value);
+ for (i = 1; i < pkg->package.count; i++) {
+ union acpi_object *obj = &pkg->package.elements[i];
+ union acpi_object *connector_id;
+ union acpi_object *info;
+
+ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) {
+ DRM_DEBUG_DRIVER("Invalid object for MUX #%d\n", i);
+ continue;
+ }
+
+ connector_id = &obj->package.elements[0];
+ info = &obj->package.elements[1];
+ if (info->type != ACPI_TYPE_BUFFER || info->buffer.length < 4) {
+ DRM_DEBUG_DRIVER("Invalid info for MUX obj #%d\n", i);
+ continue;
+ }
+
+ DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
+ (unsigned long long)connector_id->integer.value);
+ DRM_DEBUG_DRIVER(" port id: %s\n",
+ intel_dsm_port_name(info->buffer.pointer[0]));
+ DRM_DEBUG_DRIVER(" display mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[1]));
+ DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[2]));
+ DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
+ intel_dsm_mux_type(info->buffer.pointer[3]));
+ }
+
+ ACPI_FREE(pkg);
+}
+
+static acpi_handle intel_dsm_pci_probe(struct pci_dev *pdev)
+{
+ acpi_handle dhandle;
+
+ dhandle = ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return NULL;
+
+ if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID,
+ 1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
+ DRM_DEBUG_KMS("no _DSM method for intel device\n");
+ return NULL;
+ }
+
+ intel_dsm_platform_mux_info(dhandle);
+
+ return dhandle;
+}
+
+static bool intel_dsm_detect(void)
+{
+ acpi_handle dhandle = NULL;
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+ struct pci_dev *pdev = NULL;
+ int vga_count = 0;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+ dhandle = intel_dsm_pci_probe(pdev) ?: dhandle;
+ }
+
+ if (vga_count == 2 && dhandle) {
+ acpi_get_name(dhandle, ACPI_FULL_PATHNAME, &buffer);
+ DRM_DEBUG_DRIVER("vga_switcheroo: detected DSM switching method %s handle\n",
+ acpi_method_name);
+ return true;
+ }
+
+ return false;
+}
+
+void intel_register_dsm_handler(void)
+{
+ if (!intel_dsm_detect())
+ return;
+}
+
+void intel_unregister_dsm_handler(void)
+{
+}
+
+void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ acpi_handle dhandle;
+ union acpi_object *obj;
+
+ dhandle = ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return;
+
+ obj = acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID,
+ INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL);
+ if (obj)
+ ACPI_FREE(obj);
+}
+
+/*
+ * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
+ * Attached to the Display Adapter).
+ */
+#define ACPI_DISPLAY_INDEX_SHIFT 0
+#define ACPI_DISPLAY_INDEX_MASK (0xf << 0)
+#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4
+#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4)
+#define ACPI_DISPLAY_TYPE_SHIFT 8
+#define ACPI_DISPLAY_TYPE_MASK (0xf << 8)
+#define ACPI_DISPLAY_TYPE_OTHER (0 << 8)
+#define ACPI_DISPLAY_TYPE_VGA (1 << 8)
+#define ACPI_DISPLAY_TYPE_TV (2 << 8)
+#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8)
+#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8)
+#define ACPI_VENDOR_SPECIFIC_SHIFT 12
+#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12)
+#define ACPI_BIOS_CAN_DETECT (1 << 16)
+#define ACPI_DEPENDS_ON_VGA (1 << 17)
+#define ACPI_PIPE_ID_SHIFT 18
+#define ACPI_PIPE_ID_MASK (7 << 18)
+#define ACPI_DEVICE_ID_SCHEME (1ULL << 31)
+
+static u32 acpi_display_type(struct intel_connector *connector)
+{
+ u32 display_type;
+
+ switch (connector->base.connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_DVIA:
+ display_type = ACPI_DISPLAY_TYPE_VGA;
+ break;
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Component:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ case DRM_MODE_CONNECTOR_TV:
+ display_type = ACPI_DISPLAY_TYPE_TV;
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_DSI:
+ display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
+ break;
+ case DRM_MODE_CONNECTOR_Unknown:
+ case DRM_MODE_CONNECTOR_VIRTUAL:
+ display_type = ACPI_DISPLAY_TYPE_OTHER;
+ break;
+ default:
+ MISSING_CASE(connector->base.connector_type);
+ display_type = ACPI_DISPLAY_TYPE_OTHER;
+ break;
+ }
+
+ return display_type;
+}
+
+void intel_acpi_device_id_update(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *drm_dev = &dev_priv->drm;
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ u8 display_index[16] = {};
+
+ /* Populate the ACPI IDs for all connectors for a given drm_device */
+ drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ u32 device_id, type;
+
+ device_id = acpi_display_type(connector);
+
+ /* Use display type specific display index. */
+ type = (device_id & ACPI_DISPLAY_TYPE_MASK)
+ >> ACPI_DISPLAY_TYPE_SHIFT;
+ device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
+
+ connector->acpi_device_id = device_id;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
+/* NOTE: The connector order must be final before this is called. */
+void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_device *drm_dev = &i915->drm;
+ struct fwnode_handle *fwnode = NULL;
+ struct drm_connector *connector;
+ struct acpi_device *adev;
+
+ drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ /* Always getting the next, even when the last was not used. */
+ fwnode = device_get_next_child_node(drm_dev->dev, fwnode);
+ if (!fwnode)
+ break;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_DSI:
+ /*
+ * Integrated displays have a specific address 0x1f on
+ * most Intel platforms, but not on all of them.
+ */
+ adev = acpi_find_child_device(ACPI_COMPANION(drm_dev->dev),
+ 0x1f, 0);
+ if (adev) {
+ connector->fwnode =
+ fwnode_handle_get(acpi_fwnode_handle(adev));
+ break;
+ }
+ fallthrough;
+ default:
+ connector->fwnode = fwnode_handle_get(fwnode);
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ /*
+ * device_get_next_child_node() takes a reference on the fwnode, if
+ * we stopped iterating because we are out of connectors we need to
+ * put this, otherwise fwnode is NULL and the put is a no-op.
+ */
+ fwnode_handle_put(fwnode);
+}
+
+void intel_acpi_video_register(struct drm_i915_private *i915)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+
+ acpi_video_register();
+
+ /*
+ * If i915 is driving an internal panel without registering its native
+ * backlight handler try to register the acpi_video backlight.
+ * For panels not driven by i915 another GPU driver may still register
+ * a native backlight later and acpi_video_register_backlight() should
+ * only be called after any native backlights have been registered.
+ */
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_panel *panel = &to_intel_connector(connector)->panel;
+
+ if (panel->backlight.funcs && !panel->backlight.device) {
+ acpi_video_register_backlight();
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h
new file mode 100644
index 000000000..6a0007452
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_acpi.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ACPI_H__
+#define __INTEL_ACPI_H__
+
+struct drm_i915_private;
+
+#ifdef CONFIG_ACPI
+void intel_register_dsm_handler(void);
+void intel_unregister_dsm_handler(void);
+void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915);
+void intel_acpi_device_id_update(struct drm_i915_private *i915);
+void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915);
+void intel_acpi_video_register(struct drm_i915_private *i915);
+#else
+static inline void intel_register_dsm_handler(void) { return; }
+static inline void intel_unregister_dsm_handler(void) { return; }
+static inline
+void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915) { return; }
+static inline
+void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; }
+static inline
+void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915) { return; }
+static inline
+void intel_acpi_video_register(struct drm_i915_private *i915) { return; }
+#endif /* CONFIG_ACPI */
+
+#endif /* __INTEL_ACPI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
new file mode 100644
index 000000000..a502af0b6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: atomic modeset support
+ *
+ * The functions here implement the state management and hardware programming
+ * dispatch required by the atomic modeset infrastructure.
+ * See intel_atomic_plane.c for the plane-specific atomic functionality.
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_atomic.h"
+#include "intel_cdclk.h"
+#include "intel_display_types.h"
+#include "intel_global_state.h"
+#include "intel_hdcp.h"
+#include "intel_psr.h"
+#include "intel_fb.h"
+#include "skl_universal_plane.h"
+
+/**
+ * intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property.
+ * @connector: Connector to get the property for.
+ * @state: Connector state to retrieve the property from.
+ * @property: Property to retrieve.
+ * @val: Return value for the property.
+ *
+ * Returns the atomic property value for a digital connector.
+ */
+int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ u64 *val)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(state);
+
+ if (property == dev_priv->display.properties.force_audio)
+ *val = intel_conn_state->force_audio;
+ else if (property == dev_priv->display.properties.broadcast_rgb)
+ *val = intel_conn_state->broadcast_rgb;
+ else {
+ drm_dbg_atomic(&dev_priv->drm,
+ "Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_digital_connector_atomic_set_property - hook for connector->atomic_set_property.
+ * @connector: Connector to set the property for.
+ * @state: Connector state to set the property on.
+ * @property: Property to set.
+ * @val: New value for the property.
+ *
+ * Sets the atomic property value for a digital connector.
+ */
+int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ u64 val)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(state);
+
+ if (property == dev_priv->display.properties.force_audio) {
+ intel_conn_state->force_audio = val;
+ return 0;
+ }
+
+ if (property == dev_priv->display.properties.broadcast_rgb) {
+ intel_conn_state->broadcast_rgb = val;
+ return 0;
+ }
+
+ drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
+ return -EINVAL;
+}
+
+int intel_digital_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *new_state =
+ drm_atomic_get_new_connector_state(state, conn);
+ struct intel_digital_connector_state *new_conn_state =
+ to_intel_digital_connector_state(new_state);
+ struct drm_connector_state *old_state =
+ drm_atomic_get_old_connector_state(state, conn);
+ struct intel_digital_connector_state *old_conn_state =
+ to_intel_digital_connector_state(old_state);
+ struct drm_crtc_state *crtc_state;
+
+ intel_hdcp_atomic_check(conn, old_state, new_state);
+
+ if (!new_state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
+
+ /*
+ * These properties are handled by fastset, and might not end
+ * up in a modeset.
+ */
+ if (new_conn_state->force_audio != old_conn_state->force_audio ||
+ new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
+ new_conn_state->base.colorspace != old_conn_state->base.colorspace ||
+ new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
+ new_conn_state->base.content_type != old_conn_state->base.content_type ||
+ new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode ||
+ new_conn_state->base.privacy_screen_sw_state != old_conn_state->base.privacy_screen_sw_state ||
+ !drm_connector_atomic_hdr_metadata_equal(old_state, new_state))
+ crtc_state->mode_changed = true;
+
+ return 0;
+}
+
+/**
+ * intel_digital_connector_duplicate_state - duplicate connector state
+ * @connector: digital connector
+ *
+ * Allocates and returns a copy of the connector state (both common and
+ * digital connector specific) for the specified connector.
+ *
+ * Returns: The newly allocated connector state, or NULL on failure.
+ */
+struct drm_connector_state *
+intel_digital_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct intel_digital_connector_state *state;
+
+ state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector, &state->base);
+ return &state->base;
+}
+
+/**
+ * intel_connector_needs_modeset - check if connector needs a modeset
+ * @state: the atomic state corresponding to this modeset
+ * @connector: the connector
+ */
+bool
+intel_connector_needs_modeset(struct intel_atomic_state *state,
+ struct drm_connector *connector)
+{
+ const struct drm_connector_state *old_conn_state, *new_conn_state;
+
+ old_conn_state = drm_atomic_get_old_connector_state(&state->base, connector);
+ new_conn_state = drm_atomic_get_new_connector_state(&state->base, connector);
+
+ return old_conn_state->crtc != new_conn_state->crtc ||
+ (new_conn_state->crtc &&
+ drm_atomic_crtc_needs_modeset(drm_atomic_get_new_crtc_state(&state->base,
+ new_conn_state->crtc)));
+}
+
+/**
+ * intel_any_crtc_needs_modeset - check if any CRTC needs a modeset
+ * @state: the atomic state corresponding to this modeset
+ *
+ * Returns true if any CRTC in @state needs a modeset.
+ */
+bool intel_any_crtc_needs_modeset(struct intel_atomic_state *state)
+{
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (intel_crtc_needs_modeset(crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
+struct intel_digital_connector_state *
+intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ struct drm_connector_state *conn_state;
+
+ conn_state = drm_atomic_get_connector_state(&state->base,
+ &connector->base);
+ if (IS_ERR(conn_state))
+ return ERR_CAST(conn_state);
+
+ return to_intel_digital_connector_state(conn_state);
+}
+
+/**
+ * intel_crtc_duplicate_state - duplicate crtc state
+ * @crtc: drm crtc
+ *
+ * Allocates and returns a copy of the crtc state (both common and
+ * Intel-specific) for the specified crtc.
+ *
+ * Returns: The newly allocated crtc state, or NULL on failure.
+ */
+struct drm_crtc_state *
+intel_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state = to_intel_crtc_state(crtc->state);
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = kmemdup(old_crtc_state, sizeof(*crtc_state), GFP_KERNEL);
+ if (!crtc_state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->uapi);
+
+ /* copy color blobs */
+ if (crtc_state->hw.degamma_lut)
+ drm_property_blob_get(crtc_state->hw.degamma_lut);
+ if (crtc_state->hw.ctm)
+ drm_property_blob_get(crtc_state->hw.ctm);
+ if (crtc_state->hw.gamma_lut)
+ drm_property_blob_get(crtc_state->hw.gamma_lut);
+
+ crtc_state->update_pipe = false;
+ crtc_state->disable_lp_wm = false;
+ crtc_state->disable_cxsr = false;
+ crtc_state->update_wm_pre = false;
+ crtc_state->update_wm_post = false;
+ crtc_state->fifo_changed = false;
+ crtc_state->preload_luts = false;
+ crtc_state->inherited = false;
+ crtc_state->wm.need_postvbl_update = false;
+ crtc_state->do_async_flip = false;
+ crtc_state->fb_bits = 0;
+ crtc_state->update_planes = 0;
+ crtc_state->dsb = NULL;
+
+ return &crtc_state->uapi;
+}
+
+static void intel_crtc_put_color_blobs(struct intel_crtc_state *crtc_state)
+{
+ drm_property_blob_put(crtc_state->hw.degamma_lut);
+ drm_property_blob_put(crtc_state->hw.gamma_lut);
+ drm_property_blob_put(crtc_state->hw.ctm);
+}
+
+void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state)
+{
+ intel_crtc_put_color_blobs(crtc_state);
+}
+
+/**
+ * intel_crtc_destroy_state - destroy crtc state
+ * @crtc: drm crtc
+ * @state: the state to destroy
+ *
+ * Destroys the crtc state (both common and Intel-specific) for the
+ * specified crtc.
+ */
+void
+intel_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
+
+ drm_WARN_ON(crtc->dev, crtc_state->dsb);
+
+ __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
+ intel_crtc_free_hw_state(crtc_state);
+ kfree(crtc_state);
+}
+
+struct drm_atomic_state *
+intel_atomic_state_alloc(struct drm_device *dev)
+{
+ struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
+ kfree(state);
+ return NULL;
+ }
+
+ return &state->base;
+}
+
+void intel_atomic_state_free(struct drm_atomic_state *_state)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
+
+ drm_atomic_state_default_release(&state->base);
+ kfree(state->global_objs);
+
+ i915_sw_fence_fini(&state->commit_ready);
+
+ kfree(state);
+}
+
+void intel_atomic_state_clear(struct drm_atomic_state *s)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(s);
+
+ drm_atomic_state_default_clear(&state->base);
+ intel_atomic_clear_global_state(state);
+
+ state->dpll_set = state->modeset = false;
+}
+
+struct intel_crtc_state *
+intel_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_crtc_state *crtc_state;
+ crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
+ if (IS_ERR(crtc_state))
+ return ERR_CAST(crtc_state);
+
+ return to_intel_crtc_state(crtc_state);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
new file mode 100644
index 000000000..e506f6a87
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ATOMIC_H__
+#define __INTEL_ATOMIC_H__
+
+#include <linux/types.h>
+
+struct drm_atomic_state;
+struct drm_connector;
+struct drm_connector_state;
+struct drm_crtc;
+struct drm_crtc_state;
+struct drm_device;
+struct drm_i915_private;
+struct drm_property;
+struct intel_atomic_state;
+struct intel_connector;
+struct intel_crtc;
+struct intel_crtc_state;
+
+int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ u64 *val);
+int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ u64 val);
+int intel_digital_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *state);
+struct drm_connector_state *
+intel_digital_connector_duplicate_state(struct drm_connector *connector);
+bool intel_connector_needs_modeset(struct intel_atomic_state *state,
+ struct drm_connector *connector);
+bool intel_any_crtc_needs_modeset(struct intel_atomic_state *state);
+struct intel_digital_connector_state *
+intel_atomic_get_digital_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector);
+
+struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
+void intel_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state);
+struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
+void intel_atomic_state_free(struct drm_atomic_state *state);
+void intel_atomic_state_clear(struct drm_atomic_state *state);
+
+struct intel_crtc_state *
+intel_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct intel_crtc *crtc);
+
+#endif /* __INTEL_ATOMIC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
new file mode 100644
index 000000000..82826454b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -0,0 +1,1120 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: atomic plane helpers
+ *
+ * The functions here are used by the atomic plane helper functions to
+ * implement legacy plane updates (i.e., drm_plane->update_plane() and
+ * drm_plane->disable_plane()). This allows plane updates to use the
+ * atomic state infrastructure and perform plane updates as separate
+ * prepare/check/commit/cleanup steps.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include "gt/intel_rps.h"
+
+#include "intel_atomic_plane.h"
+#include "intel_cdclk.h"
+#include "intel_display_trace.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_fb_pin.h"
+#include "intel_sprite.h"
+#include "skl_scaler.h"
+#include "skl_watermark.h"
+
+static void intel_plane_state_reset(struct intel_plane_state *plane_state,
+ struct intel_plane *plane)
+{
+ memset(plane_state, 0, sizeof(*plane_state));
+
+ __drm_atomic_helper_plane_state_reset(&plane_state->uapi, &plane->base);
+
+ plane_state->scaler_id = -1;
+}
+
+struct intel_plane *intel_plane_alloc(void)
+{
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+ if (!plane_state) {
+ kfree(plane);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ intel_plane_state_reset(plane_state, plane);
+
+ plane->base.state = &plane_state->uapi;
+
+ return plane;
+}
+
+void intel_plane_free(struct intel_plane *plane)
+{
+ intel_plane_destroy_state(&plane->base, plane->base.state);
+ kfree(plane);
+}
+
+/**
+ * intel_plane_duplicate_state - duplicate plane state
+ * @plane: drm plane
+ *
+ * Allocates and returns a copy of the plane state (both common and
+ * Intel-specific) for the specified plane.
+ *
+ * Returns: The newly allocated plane state, or NULL on failure.
+ */
+struct drm_plane_state *
+intel_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct intel_plane_state *intel_state;
+
+ intel_state = to_intel_plane_state(plane->state);
+ intel_state = kmemdup(intel_state, sizeof(*intel_state), GFP_KERNEL);
+
+ if (!intel_state)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &intel_state->uapi);
+
+ intel_state->ggtt_vma = NULL;
+ intel_state->dpt_vma = NULL;
+ intel_state->flags = 0;
+
+ /* add reference to fb */
+ if (intel_state->hw.fb)
+ drm_framebuffer_get(intel_state->hw.fb);
+
+ return &intel_state->uapi;
+}
+
+/**
+ * intel_plane_destroy_state - destroy plane state
+ * @plane: drm plane
+ * @state: state object to destroy
+ *
+ * Destroys the plane state (both common and Intel-specific) for the
+ * specified plane.
+ */
+void
+intel_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct intel_plane_state *plane_state = to_intel_plane_state(state);
+
+ drm_WARN_ON(plane->dev, plane_state->ggtt_vma);
+ drm_WARN_ON(plane->dev, plane_state->dpt_vma);
+
+ __drm_atomic_helper_plane_destroy_state(&plane_state->uapi);
+ if (plane_state->hw.fb)
+ drm_framebuffer_put(plane_state->hw.fb);
+ kfree(plane_state);
+}
+
+unsigned int intel_adjusted_rate(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ unsigned int rate)
+{
+ unsigned int src_w, src_h, dst_w, dst_h;
+
+ src_w = drm_rect_width(src) >> 16;
+ src_h = drm_rect_height(src) >> 16;
+ dst_w = drm_rect_width(dst);
+ dst_h = drm_rect_height(dst);
+
+ /* Downscaling limits the maximum pixel rate */
+ dst_w = min(src_w, dst_w);
+ dst_h = min(src_h, dst_h);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(rate, src_w * src_h),
+ dst_w * dst_h);
+}
+
+unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ /*
+ * Note we don't check for plane visibility here as
+ * we want to use this when calculating the cursor
+ * watermarks even if the cursor is fully offscreen.
+ * That depends on the src/dst rectangles being
+ * correctly populated whenever the watermark code
+ * considers the cursor to be visible, whether or not
+ * it is actually visible.
+ *
+ * See: intel_wm_plane_visible() and intel_check_cursor()
+ */
+
+ return intel_adjusted_rate(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
+ crtc_state->pixel_rate);
+}
+
+unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ return intel_plane_pixel_rate(crtc_state, plane_state) *
+ fb->format->cpp[color_plane];
+}
+
+static bool
+use_min_ddb(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ return DISPLAY_VER(i915) >= 13 &&
+ crtc_state->uapi.async_flip &&
+ plane->async_flip;
+}
+
+static unsigned int
+intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int width, height;
+
+ if (plane->id == PLANE_CURSOR)
+ return 0;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ /*
+ * We calculate extra ddb based on ratio plane rate/total data rate
+ * in case, in some cases we should not allocate extra ddb for the plane,
+ * so do not count its data rate, if this is the case.
+ */
+ if (use_min_ddb(crtc_state, plane))
+ return 0;
+
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
+ height = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ /* UV plane does 1/2 pixel sub-sampling */
+ if (color_plane == 1) {
+ width /= 2;
+ height /= 2;
+ }
+
+ return width * height * fb->format->cpp[color_plane];
+}
+
+int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane,
+ bool *need_cdclk_calc)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
+ const struct intel_cdclk_state *cdclk_state;
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
+
+ if (!plane_state->uapi.visible || !plane->min_cdclk)
+ return 0;
+
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ new_crtc_state->min_cdclk[plane->id] =
+ plane->min_cdclk(new_crtc_state, plane_state);
+
+ /*
+ * No need to check against the cdclk state if
+ * the min cdclk for the plane doesn't increase.
+ *
+ * Ie. we only ever increase the cdclk due to plane
+ * requirements. This can reduce back and forth
+ * display blinking due to constant cdclk changes.
+ */
+ if (new_crtc_state->min_cdclk[plane->id] <=
+ old_crtc_state->min_cdclk[plane->id])
+ return 0;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ /*
+ * No need to recalculate the cdclk state if
+ * the min cdclk for the pipe doesn't increase.
+ *
+ * Ie. we only ever increase the cdclk due to plane
+ * requirements. This can reduce back and forth
+ * display blinking due to constant cdclk changes.
+ */
+ if (new_crtc_state->min_cdclk[plane->id] <=
+ cdclk_state->min_cdclk[crtc->pipe])
+ return 0;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n",
+ plane->base.base.id, plane->base.name,
+ new_crtc_state->min_cdclk[plane->id],
+ crtc->base.base.id, crtc->base.name,
+ cdclk_state->min_cdclk[crtc->pipe]);
+ *need_cdclk_calc = true;
+
+ return 0;
+}
+
+static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
+{
+ if (plane_state->hw.fb)
+ drm_framebuffer_put(plane_state->hw.fb);
+
+ memset(&plane_state->hw, 0, sizeof(plane_state->hw));
+}
+
+void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
+ const struct intel_plane_state *from_plane_state,
+ struct intel_crtc *crtc)
+{
+ intel_plane_clear_hw_state(plane_state);
+
+ /*
+ * For the bigjoiner slave uapi.crtc will point at
+ * the master crtc. So we explicitly assign the right
+ * slave crtc to hw.crtc. uapi.crtc!=NULL simply indicates
+ * the plane is logically enabled on the uapi level.
+ */
+ plane_state->hw.crtc = from_plane_state->uapi.crtc ? &crtc->base : NULL;
+
+ plane_state->hw.fb = from_plane_state->uapi.fb;
+ if (plane_state->hw.fb)
+ drm_framebuffer_get(plane_state->hw.fb);
+
+ plane_state->hw.alpha = from_plane_state->uapi.alpha;
+ plane_state->hw.pixel_blend_mode =
+ from_plane_state->uapi.pixel_blend_mode;
+ plane_state->hw.rotation = from_plane_state->uapi.rotation;
+ plane_state->hw.color_encoding = from_plane_state->uapi.color_encoding;
+ plane_state->hw.color_range = from_plane_state->uapi.color_range;
+ plane_state->hw.scaling_filter = from_plane_state->uapi.scaling_filter;
+
+ plane_state->uapi.src = drm_plane_state_src(&from_plane_state->uapi);
+ plane_state->uapi.dst = drm_plane_state_dest(&from_plane_state->uapi);
+}
+
+void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
+ const struct intel_plane_state *from_plane_state)
+{
+ intel_plane_clear_hw_state(plane_state);
+
+ memcpy(&plane_state->hw, &from_plane_state->hw,
+ sizeof(plane_state->hw));
+
+ if (plane_state->hw.fb)
+ drm_framebuffer_get(plane_state->hw.fb);
+}
+
+void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+
+ crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->scaled_planes &= ~BIT(plane->id);
+ crtc_state->nv12_planes &= ~BIT(plane->id);
+ crtc_state->c8_planes &= ~BIT(plane->id);
+ crtc_state->data_rate[plane->id] = 0;
+ crtc_state->data_rate_y[plane->id] = 0;
+ crtc_state->rel_data_rate[plane->id] = 0;
+ crtc_state->rel_data_rate_y[plane->id] = 0;
+ crtc_state->min_cdclk[plane->id] = 0;
+
+ plane_state->uapi.visible = false;
+}
+
+/* FIXME nuke when all wm code is atomic */
+static bool intel_wm_need_update(const struct intel_plane_state *cur,
+ struct intel_plane_state *new)
+{
+ /* Update watermarks on tiling or size changes. */
+ if (new->uapi.visible != cur->uapi.visible)
+ return true;
+
+ if (!cur->hw.fb || !new->hw.fb)
+ return false;
+
+ if (cur->hw.fb->modifier != new->hw.fb->modifier ||
+ cur->hw.rotation != new->hw.rotation ||
+ drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
+ drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
+ drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
+ drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
+ return true;
+
+ return false;
+}
+
+static bool intel_plane_is_scaled(const struct intel_plane_state *plane_state)
+{
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ int dst_w = drm_rect_width(&plane_state->uapi.dst);
+ int dst_h = drm_rect_height(&plane_state->uapi.dst);
+
+ return src_w != dst_w || src_h != dst_h;
+}
+
+static bool intel_plane_do_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ if (!plane->async_flip)
+ return false;
+
+ if (!new_crtc_state->uapi.async_flip)
+ return false;
+
+ /*
+ * In platforms after DISPLAY13, we might need to override
+ * first async flip in order to change watermark levels
+ * as part of optimization.
+ * So for those, we are checking if this is a first async flip.
+ * For platforms earlier than DISPLAY13 we always do async flip.
+ */
+ return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip;
+}
+
+static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state,
+ const struct intel_plane_state *old_plane_state,
+ struct intel_plane_state *new_plane_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
+ bool was_crtc_enabled = old_crtc_state->hw.active;
+ bool is_crtc_enabled = new_crtc_state->hw.active;
+ bool turn_off, turn_on, visible, was_visible;
+ int ret;
+
+ if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
+ ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
+ if (ret)
+ return ret;
+ }
+
+ was_visible = old_plane_state->uapi.visible;
+ visible = new_plane_state->uapi.visible;
+
+ if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
+ was_visible = false;
+
+ /*
+ * Visibility is calculated as if the crtc was on, but
+ * after scaler setup everything depends on it being off
+ * when the crtc isn't active.
+ *
+ * FIXME this is wrong for watermarks. Watermarks should also
+ * be computed as if the pipe would be active. Perhaps move
+ * per-plane wm computation to the .check_plane() hook, and
+ * only combine the results from all planes in the current place?
+ */
+ if (!is_crtc_enabled) {
+ intel_plane_set_invisible(new_crtc_state, new_plane_state);
+ visible = false;
+ }
+
+ if (!was_visible && !visible)
+ return 0;
+
+ turn_off = was_visible && (!visible || mode_changed);
+ turn_on = visible && (!was_visible || mode_changed);
+
+ drm_dbg_atomic(&dev_priv->drm,
+ "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
+ crtc->base.base.id, crtc->base.name,
+ plane->base.base.id, plane->base.name,
+ was_visible, visible,
+ turn_off, turn_on, mode_changed);
+
+ if (turn_on) {
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ new_crtc_state->update_wm_pre = true;
+
+ /* must disable cxsr around plane enable/disable */
+ if (plane->id != PLANE_CURSOR)
+ new_crtc_state->disable_cxsr = true;
+ } else if (turn_off) {
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ new_crtc_state->update_wm_post = true;
+
+ /* must disable cxsr around plane enable/disable */
+ if (plane->id != PLANE_CURSOR)
+ new_crtc_state->disable_cxsr = true;
+ } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
+ /* FIXME bollocks */
+ new_crtc_state->update_wm_pre = true;
+ new_crtc_state->update_wm_post = true;
+ }
+ }
+
+ if (visible || was_visible)
+ new_crtc_state->fb_bits |= plane->frontbuffer_bit;
+
+ /*
+ * ILK/SNB DVSACNTR/Sprite Enable
+ * IVB SPR_CTL/Sprite Enable
+ * "When in Self Refresh Big FIFO mode, a write to enable the
+ * plane will be internally buffered and delayed while Big FIFO
+ * mode is exiting."
+ *
+ * Which means that enabling the sprite can take an extra frame
+ * when we start in big FIFO mode (LP1+). Thus we need to drop
+ * down to LP0 and wait for vblank in order to make sure the
+ * sprite gets enabled on the next vblank after the register write.
+ * Doing otherwise would risk enabling the sprite one frame after
+ * we've already signalled flip completion. We can resume LP1+
+ * once the sprite has been enabled.
+ *
+ *
+ * WaCxSRDisabledForSpriteScaling:ivb
+ * IVB SPR_SCALE/Scaling Enable
+ * "Low Power watermarks must be disabled for at least one
+ * frame before enabling sprite scaling, and kept disabled
+ * until sprite scaling is disabled."
+ *
+ * ILK/SNB DVSASCALE/Scaling Enable
+ * "When in Self Refresh Big FIFO mode, scaling enable will be
+ * masked off while Big FIFO mode is exiting."
+ *
+ * Despite the w/a only being listed for IVB we assume that
+ * the ILK/SNB note has similar ramifications, hence we apply
+ * the w/a on all three platforms.
+ *
+ * With experimental results seems this is needed also for primary
+ * plane, not only sprite plane.
+ */
+ if (plane->id != PLANE_CURSOR &&
+ (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)) &&
+ (turn_on || (!intel_plane_is_scaled(old_plane_state) &&
+ intel_plane_is_scaled(new_plane_state))))
+ new_crtc_state->disable_lp_wm = true;
+
+ if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
+ new_crtc_state->do_async_flip = true;
+
+ return 0;
+}
+
+int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state,
+ const struct intel_plane_state *old_plane_state,
+ struct intel_plane_state *new_plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = new_plane_state->hw.fb;
+ int ret;
+
+ intel_plane_set_invisible(new_crtc_state, new_plane_state);
+ new_crtc_state->enabled_planes &= ~BIT(plane->id);
+
+ if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
+ return 0;
+
+ ret = plane->check_plane(new_crtc_state, new_plane_state);
+ if (ret)
+ return ret;
+
+ if (fb)
+ new_crtc_state->enabled_planes |= BIT(plane->id);
+
+ /* FIXME pre-g4x don't work like this */
+ if (new_plane_state->uapi.visible)
+ new_crtc_state->active_planes |= BIT(plane->id);
+
+ if (new_plane_state->uapi.visible &&
+ intel_plane_is_scaled(new_plane_state))
+ new_crtc_state->scaled_planes |= BIT(plane->id);
+
+ if (new_plane_state->uapi.visible &&
+ intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
+ new_crtc_state->nv12_planes |= BIT(plane->id);
+
+ if (new_plane_state->uapi.visible &&
+ fb->format->format == DRM_FORMAT_C8)
+ new_crtc_state->c8_planes |= BIT(plane->id);
+
+ if (new_plane_state->uapi.visible || old_plane_state->uapi.visible)
+ new_crtc_state->update_planes |= BIT(plane->id);
+
+ if (new_plane_state->uapi.visible &&
+ intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
+ new_crtc_state->data_rate_y[plane->id] =
+ intel_plane_data_rate(new_crtc_state, new_plane_state, 0);
+ new_crtc_state->data_rate[plane->id] =
+ intel_plane_data_rate(new_crtc_state, new_plane_state, 1);
+
+ new_crtc_state->rel_data_rate_y[plane->id] =
+ intel_plane_relative_data_rate(new_crtc_state,
+ new_plane_state, 0);
+ new_crtc_state->rel_data_rate[plane->id] =
+ intel_plane_relative_data_rate(new_crtc_state,
+ new_plane_state, 1);
+ } else if (new_plane_state->uapi.visible) {
+ new_crtc_state->data_rate[plane->id] =
+ intel_plane_data_rate(new_crtc_state, new_plane_state, 0);
+
+ new_crtc_state->rel_data_rate[plane->id] =
+ intel_plane_relative_data_rate(new_crtc_state,
+ new_plane_state, 0);
+ }
+
+ return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state,
+ old_plane_state, new_plane_state);
+}
+
+static struct intel_plane *
+intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ if (plane->id == plane_id)
+ return plane;
+ }
+
+ return NULL;
+}
+
+int intel_plane_atomic_check(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+ const struct intel_plane_state *old_plane_state =
+ intel_atomic_get_old_plane_state(state, plane);
+ const struct intel_plane_state *new_master_plane_state;
+ struct intel_crtc *crtc = intel_crtc_for_pipe(i915, plane->pipe);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (new_crtc_state && intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
+ struct intel_crtc *master_crtc =
+ intel_master_crtc(new_crtc_state);
+ struct intel_plane *master_plane =
+ intel_crtc_get_plane(master_crtc, plane->id);
+
+ new_master_plane_state =
+ intel_atomic_get_new_plane_state(state, master_plane);
+ } else {
+ new_master_plane_state = new_plane_state;
+ }
+
+ intel_plane_copy_uapi_to_hw_state(new_plane_state,
+ new_master_plane_state,
+ crtc);
+
+ new_plane_state->uapi.visible = false;
+ if (!new_crtc_state)
+ return 0;
+
+ return intel_plane_atomic_check_with_state(old_crtc_state,
+ new_crtc_state,
+ old_plane_state,
+ new_plane_state);
+}
+
+static struct intel_plane *
+skl_next_plane_to_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct skl_ddb_entry ddb[I915_MAX_PLANES],
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES],
+ unsigned int *update_mask)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ if (*update_mask == 0)
+ return NULL;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ enum plane_id plane_id = plane->id;
+
+ if (crtc->pipe != plane->pipe ||
+ !(*update_mask & BIT(plane_id)))
+ continue;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb[plane_id],
+ ddb, I915_MAX_PLANES, plane_id) ||
+ skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
+ ddb_y, I915_MAX_PLANES, plane_id))
+ continue;
+
+ *update_mask &= ~BIT(plane_id);
+ ddb[plane_id] = crtc_state->wm.skl.plane_ddb[plane_id];
+ ddb_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
+
+ return plane;
+ }
+
+ /* should never happen */
+ drm_WARN_ON(state->base.dev, 1);
+
+ return NULL;
+}
+
+void intel_plane_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ trace_intel_plane_update_noarm(&plane->base, crtc);
+
+ if (plane->update_noarm)
+ plane->update_noarm(plane, crtc_state, plane_state);
+}
+
+void intel_plane_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ trace_intel_plane_update_arm(&plane->base, crtc);
+
+ if (crtc_state->do_async_flip && plane->async_flip)
+ plane->async_flip(plane, crtc_state, plane_state, true);
+ else
+ plane->update_arm(plane, crtc_state, plane_state);
+}
+
+void intel_plane_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ trace_intel_plane_disable_arm(&plane->base, crtc);
+ plane->disable_arm(plane, crtc_state);
+}
+
+void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u32 update_mask = new_crtc_state->update_planes;
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ if (new_crtc_state->do_async_flip)
+ return;
+
+ /*
+ * Since we only write non-arming registers here,
+ * the order does not matter even for skl+.
+ */
+ for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
+
+ /* TODO: for mailbox updates this should be skipped */
+ if (new_plane_state->uapi.visible ||
+ new_plane_state->planar_slave)
+ intel_plane_update_noarm(plane, new_crtc_state, new_plane_state);
+ }
+}
+
+static void skl_crtc_planes_update_arm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct skl_ddb_entry ddb[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+ u32 update_mask = new_crtc_state->update_planes;
+ struct intel_plane *plane;
+
+ memcpy(ddb, old_crtc_state->wm.skl.plane_ddb,
+ sizeof(old_crtc_state->wm.skl.plane_ddb));
+ memcpy(ddb_y, old_crtc_state->wm.skl.plane_ddb_y,
+ sizeof(old_crtc_state->wm.skl.plane_ddb_y));
+
+ while ((plane = skl_next_plane_to_commit(state, crtc, ddb, ddb_y, &update_mask))) {
+ struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+
+ /*
+ * TODO: for mailbox updates intel_plane_update_noarm()
+ * would have to be called here as well.
+ */
+ if (new_plane_state->uapi.visible ||
+ new_plane_state->planar_slave)
+ intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
+ else
+ intel_plane_disable_arm(plane, new_crtc_state);
+ }
+}
+
+static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u32 update_mask = new_crtc_state->update_planes;
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
+
+ /*
+ * TODO: for mailbox updates intel_plane_update_noarm()
+ * would have to be called here as well.
+ */
+ if (new_plane_state->uapi.visible)
+ intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
+ else
+ intel_plane_disable_arm(plane, new_crtc_state);
+ }
+}
+
+void intel_crtc_planes_update_arm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (DISPLAY_VER(i915) >= 9)
+ skl_crtc_planes_update_arm(state, crtc);
+ else
+ i9xx_crtc_planes_update_arm(state, crtc);
+}
+
+int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
+ struct intel_crtc_state *crtc_state,
+ int min_scale, int max_scale,
+ bool can_position)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_rect *src = &plane_state->uapi.src;
+ struct drm_rect *dst = &plane_state->uapi.dst;
+ const struct drm_rect *clip = &crtc_state->pipe_src;
+ unsigned int rotation = plane_state->hw.rotation;
+ int hscale, vscale;
+
+ if (!fb) {
+ plane_state->uapi.visible = false;
+ return 0;
+ }
+
+ drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
+
+ /* Check scaling */
+ hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
+ vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
+ if (hscale < 0 || vscale < 0) {
+ drm_dbg_kms(&i915->drm, "Invalid scaling of plane\n");
+ drm_rect_debug_print("src: ", src, true);
+ drm_rect_debug_print("dst: ", dst, false);
+ return -ERANGE;
+ }
+
+ /*
+ * FIXME: This might need further adjustment for seamless scaling
+ * with phase information, for the 2p2 and 2p1 scenarios.
+ */
+ plane_state->uapi.visible = drm_rect_clip_scaled(src, dst, clip);
+
+ drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
+
+ if (!can_position && plane_state->uapi.visible &&
+ !drm_rect_equals(dst, clip)) {
+ drm_dbg_kms(&i915->drm, "Plane must cover entire CRTC\n");
+ drm_rect_debug_print("dst: ", dst, false);
+ drm_rect_debug_print("clip: ", clip, false);
+ return -EINVAL;
+ }
+
+ /* final plane coordinates will be relative to the plane's pipe */
+ drm_rect_translate(dst, -clip->x1, -clip->y1);
+
+ return 0;
+}
+
+struct wait_rps_boost {
+ struct wait_queue_entry wait;
+
+ struct drm_crtc *crtc;
+ struct i915_request *request;
+};
+
+static int do_rps_boost(struct wait_queue_entry *_wait,
+ unsigned mode, int sync, void *key)
+{
+ struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
+ struct i915_request *rq = wait->request;
+
+ /*
+ * If we missed the vblank, but the request is already running it
+ * is reasonable to assume that it will complete before the next
+ * vblank without our intervention, so leave RPS alone.
+ */
+ if (!i915_request_started(rq))
+ intel_rps_boost(rq);
+ i915_request_put(rq);
+
+ drm_crtc_vblank_put(wait->crtc);
+
+ list_del(&wait->wait.entry);
+ kfree(wait);
+ return 1;
+}
+
+static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence)
+{
+ struct wait_rps_boost *wait;
+
+ if (!dma_fence_is_i915(fence))
+ return;
+
+ if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
+ return;
+
+ if (drm_crtc_vblank_get(crtc))
+ return;
+
+ wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait) {
+ drm_crtc_vblank_put(crtc);
+ return;
+ }
+
+ wait->request = to_request(dma_fence_get(fence));
+ wait->crtc = crtc;
+
+ wait->wait.func = do_rps_boost;
+ wait->wait.flags = 0;
+
+ add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
+}
+
+/**
+ * intel_prepare_plane_fb - Prepare fb for usage on plane
+ * @_plane: drm plane to prepare for
+ * @_new_plane_state: the plane state being prepared
+ *
+ * Prepares a framebuffer for usage on a display plane. Generally this
+ * involves pinning the underlying object and updating the frontbuffer tracking
+ * bits. Some older platforms need special physical address handling for
+ * cursor planes.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int
+intel_prepare_plane_fb(struct drm_plane *_plane,
+ struct drm_plane_state *_new_plane_state)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
+ struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_plane_state *new_plane_state =
+ to_intel_plane_state(_new_plane_state);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_plane_state->uapi.state);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct intel_plane_state *old_plane_state =
+ intel_atomic_get_old_plane_state(state, plane);
+ struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
+ int ret;
+
+ if (old_obj) {
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state,
+ to_intel_crtc(old_plane_state->hw.crtc));
+
+ /* Big Hammer, we also need to ensure that any pending
+ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+ * current scanout is retired before unpinning the old
+ * framebuffer. Note that we rely on userspace rendering
+ * into the buffer attached to the pipe they are waiting
+ * on. If not, userspace generates a GPU hang with IPEHR
+ * point to the MI_WAIT_FOR_EVENT.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
+ */
+ if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
+ ret = i915_sw_fence_await_reservation(&state->commit_ready,
+ old_obj->base.resv, NULL,
+ false, 0,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (new_plane_state->uapi.fence) { /* explicit fencing */
+ i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
+ &attr);
+ ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
+ new_plane_state->uapi.fence,
+ i915_fence_timeout(dev_priv),
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!obj)
+ return 0;
+
+
+ ret = intel_plane_pin_fb(new_plane_state);
+ if (ret)
+ return ret;
+
+ i915_gem_object_wait_priority(obj, 0, &attr);
+
+ if (!new_plane_state->uapi.fence) { /* implicit fencing */
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+
+ ret = i915_sw_fence_await_reservation(&state->commit_ready,
+ obj->base.resv, NULL,
+ false,
+ i915_fence_timeout(dev_priv),
+ GFP_KERNEL);
+ if (ret < 0)
+ goto unpin_fb;
+
+ dma_resv_iter_begin(&cursor, obj->base.resv,
+ DMA_RESV_USAGE_WRITE);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ add_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ fence);
+ }
+ dma_resv_iter_end(&cursor);
+ } else {
+ add_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ new_plane_state->uapi.fence);
+ }
+
+ /*
+ * We declare pageflips to be interactive and so merit a small bias
+ * towards upclocking to deliver the frame on time. By only changing
+ * the RPS thresholds to sample more regularly and aim for higher
+ * clocks we can hopefully deliver low power workloads (like kodi)
+ * that are not quite steady state without resorting to forcing
+ * maximum clocks following a vblank miss (see do_rps_boost()).
+ */
+ if (!state->rps_interactive) {
+ intel_rps_mark_interactive(&to_gt(dev_priv)->rps, true);
+ state->rps_interactive = true;
+ }
+
+ return 0;
+
+unpin_fb:
+ intel_plane_unpin_fb(new_plane_state);
+
+ return ret;
+}
+
+/**
+ * intel_cleanup_plane_fb - Cleans up an fb after plane use
+ * @plane: drm plane to clean up for
+ * @_old_plane_state: the state from the previous modeset
+ *
+ * Cleans up a framebuffer that has just been removed from a plane.
+ */
+static void
+intel_cleanup_plane_fb(struct drm_plane *plane,
+ struct drm_plane_state *_old_plane_state)
+{
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(_old_plane_state);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(old_plane_state->uapi.state);
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
+
+ if (!obj)
+ return;
+
+ if (state->rps_interactive) {
+ intel_rps_mark_interactive(&to_gt(dev_priv)->rps, false);
+ state->rps_interactive = false;
+ }
+
+ /* Should only be called after a successful intel_prepare_plane_fb()! */
+ intel_plane_unpin_fb(old_plane_state);
+}
+
+static const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
+ .prepare_fb = intel_prepare_plane_fb,
+ .cleanup_fb = intel_cleanup_plane_fb,
+};
+
+void intel_plane_helper_add(struct intel_plane *plane)
+{
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
new file mode 100644
index 000000000..74b6d3b16
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ATOMIC_PLANE_H__
+#define __INTEL_ATOMIC_PLANE_H__
+
+#include <linux/types.h>
+
+struct drm_plane;
+struct drm_property;
+struct drm_rect;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane;
+struct intel_plane_state;
+enum plane_id;
+
+unsigned int intel_adjusted_rate(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ unsigned int rate);
+unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+
+unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane);
+void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
+ const struct intel_plane_state *from_plane_state,
+ struct intel_crtc *crtc);
+void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
+ const struct intel_plane_state *from_plane_state);
+void intel_plane_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_plane_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_plane_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+struct intel_plane *intel_plane_alloc(void);
+void intel_plane_free(struct intel_plane *plane);
+struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
+void intel_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_crtc_planes_update_arm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *old_plane_state,
+ struct intel_plane_state *intel_state);
+int intel_plane_atomic_check(struct intel_atomic_state *state,
+ struct intel_plane *plane);
+int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane,
+ bool *need_cdclk_calc);
+int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
+ struct intel_crtc_state *crtc_state,
+ int min_scale, int max_scale,
+ bool can_position);
+void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
+void intel_plane_helper_add(struct intel_plane *plane);
+
+#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
new file mode 100644
index 000000000..aacbc6da8
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -0,0 +1,1410 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/component.h>
+#include <linux/kernel.h>
+
+#include <drm/drm_edid.h>
+#include <drm/i915_component.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_audio.h"
+#include "intel_audio_regs.h"
+#include "intel_cdclk.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_lpe_audio.h"
+
+/**
+ * DOC: High Definition Audio over HDMI and Display Port
+ *
+ * The graphics and audio drivers together support High Definition Audio over
+ * HDMI and Display Port. The audio programming sequences are divided into audio
+ * codec and controller enable and disable sequences. The graphics driver
+ * handles the audio codec sequences, while the audio driver handles the audio
+ * controller sequences.
+ *
+ * The disable sequences must be performed before disabling the transcoder or
+ * port. The enable sequences may only be performed after enabling the
+ * transcoder and port, and after completed link training. Therefore the audio
+ * enable/disable sequences are part of the modeset sequence.
+ *
+ * The codec and controller sequences could be done either parallel or serial,
+ * but generally the ELDV/PD change in the codec sequence indicates to the audio
+ * driver that the controller sequence should start. Indeed, most of the
+ * co-operation between the graphics and audio drivers is handled via audio
+ * related registers. (The notable exception is the power management, not
+ * covered here.)
+ *
+ * The struct &i915_audio_component is used to interact between the graphics
+ * and audio drivers. The struct &i915_audio_component_ops @ops in it is
+ * defined in graphics driver and called in audio driver. The
+ * struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver.
+ */
+
+struct intel_audio_funcs {
+ void (*audio_codec_enable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+ void (*audio_codec_disable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
+};
+
+/* DP N/M table */
+#define LC_810M 810000
+#define LC_540M 540000
+#define LC_270M 270000
+#define LC_162M 162000
+
+struct dp_aud_n_m {
+ int sample_rate;
+ int clock;
+ u16 m;
+ u16 n;
+};
+
+struct hdmi_aud_ncts {
+ int sample_rate;
+ int clock;
+ int n;
+ int cts;
+};
+
+/* Values according to DP 1.4 Table 2-104 */
+static const struct dp_aud_n_m dp_aud_n_m[] = {
+ { 32000, LC_162M, 1024, 10125 },
+ { 44100, LC_162M, 784, 5625 },
+ { 48000, LC_162M, 512, 3375 },
+ { 64000, LC_162M, 2048, 10125 },
+ { 88200, LC_162M, 1568, 5625 },
+ { 96000, LC_162M, 1024, 3375 },
+ { 128000, LC_162M, 4096, 10125 },
+ { 176400, LC_162M, 3136, 5625 },
+ { 192000, LC_162M, 2048, 3375 },
+ { 32000, LC_270M, 1024, 16875 },
+ { 44100, LC_270M, 784, 9375 },
+ { 48000, LC_270M, 512, 5625 },
+ { 64000, LC_270M, 2048, 16875 },
+ { 88200, LC_270M, 1568, 9375 },
+ { 96000, LC_270M, 1024, 5625 },
+ { 128000, LC_270M, 4096, 16875 },
+ { 176400, LC_270M, 3136, 9375 },
+ { 192000, LC_270M, 2048, 5625 },
+ { 32000, LC_540M, 1024, 33750 },
+ { 44100, LC_540M, 784, 18750 },
+ { 48000, LC_540M, 512, 11250 },
+ { 64000, LC_540M, 2048, 33750 },
+ { 88200, LC_540M, 1568, 18750 },
+ { 96000, LC_540M, 1024, 11250 },
+ { 128000, LC_540M, 4096, 33750 },
+ { 176400, LC_540M, 3136, 18750 },
+ { 192000, LC_540M, 2048, 11250 },
+ { 32000, LC_810M, 1024, 50625 },
+ { 44100, LC_810M, 784, 28125 },
+ { 48000, LC_810M, 512, 16875 },
+ { 64000, LC_810M, 2048, 50625 },
+ { 88200, LC_810M, 1568, 28125 },
+ { 96000, LC_810M, 1024, 16875 },
+ { 128000, LC_810M, 4096, 50625 },
+ { 176400, LC_810M, 3136, 28125 },
+ { 192000, LC_810M, 2048, 16875 },
+};
+
+static const struct dp_aud_n_m *
+audio_config_dp_get_n_m(const struct intel_crtc_state *crtc_state, int rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
+ if (rate == dp_aud_n_m[i].sample_rate &&
+ crtc_state->port_clock == dp_aud_n_m[i].clock)
+ return &dp_aud_n_m[i];
+ }
+
+ return NULL;
+}
+
+static const struct {
+ int clock;
+ u32 config;
+} hdmi_audio_clock[] = {
+ { 25175, AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
+ { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
+ { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
+ { 27027, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
+ { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
+ { 54054, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
+ { 74176, AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
+ { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
+ { 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
+ { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+ { 296703, AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 },
+ { 297000, AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 },
+ { 593407, AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 },
+ { 594000, AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 },
+};
+
+/* HDMI N/CTS table */
+#define TMDS_297M 297000
+#define TMDS_296M 296703
+#define TMDS_594M 594000
+#define TMDS_593M 593407
+
+static const struct hdmi_aud_ncts hdmi_aud_ncts_24bpp[] = {
+ { 32000, TMDS_296M, 5824, 421875 },
+ { 32000, TMDS_297M, 3072, 222750 },
+ { 32000, TMDS_593M, 5824, 843750 },
+ { 32000, TMDS_594M, 3072, 445500 },
+ { 44100, TMDS_296M, 4459, 234375 },
+ { 44100, TMDS_297M, 4704, 247500 },
+ { 44100, TMDS_593M, 8918, 937500 },
+ { 44100, TMDS_594M, 9408, 990000 },
+ { 88200, TMDS_296M, 8918, 234375 },
+ { 88200, TMDS_297M, 9408, 247500 },
+ { 88200, TMDS_593M, 17836, 937500 },
+ { 88200, TMDS_594M, 18816, 990000 },
+ { 176400, TMDS_296M, 17836, 234375 },
+ { 176400, TMDS_297M, 18816, 247500 },
+ { 176400, TMDS_593M, 35672, 937500 },
+ { 176400, TMDS_594M, 37632, 990000 },
+ { 48000, TMDS_296M, 5824, 281250 },
+ { 48000, TMDS_297M, 5120, 247500 },
+ { 48000, TMDS_593M, 5824, 562500 },
+ { 48000, TMDS_594M, 6144, 594000 },
+ { 96000, TMDS_296M, 11648, 281250 },
+ { 96000, TMDS_297M, 10240, 247500 },
+ { 96000, TMDS_593M, 11648, 562500 },
+ { 96000, TMDS_594M, 12288, 594000 },
+ { 192000, TMDS_296M, 23296, 281250 },
+ { 192000, TMDS_297M, 20480, 247500 },
+ { 192000, TMDS_593M, 23296, 562500 },
+ { 192000, TMDS_594M, 24576, 594000 },
+};
+
+/* Appendix C - N & CTS values for deep color from HDMI 2.0 spec*/
+/* HDMI N/CTS table for 10 bit deep color(30 bpp)*/
+#define TMDS_371M 371250
+#define TMDS_370M 370878
+
+static const struct hdmi_aud_ncts hdmi_aud_ncts_30bpp[] = {
+ { 32000, TMDS_370M, 5824, 527344 },
+ { 32000, TMDS_371M, 6144, 556875 },
+ { 44100, TMDS_370M, 8918, 585938 },
+ { 44100, TMDS_371M, 4704, 309375 },
+ { 88200, TMDS_370M, 17836, 585938 },
+ { 88200, TMDS_371M, 9408, 309375 },
+ { 176400, TMDS_370M, 35672, 585938 },
+ { 176400, TMDS_371M, 18816, 309375 },
+ { 48000, TMDS_370M, 11648, 703125 },
+ { 48000, TMDS_371M, 5120, 309375 },
+ { 96000, TMDS_370M, 23296, 703125 },
+ { 96000, TMDS_371M, 10240, 309375 },
+ { 192000, TMDS_370M, 46592, 703125 },
+ { 192000, TMDS_371M, 20480, 309375 },
+};
+
+/* HDMI N/CTS table for 12 bit deep color(36 bpp)*/
+#define TMDS_445_5M 445500
+#define TMDS_445M 445054
+
+static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
+ { 32000, TMDS_445M, 5824, 632813 },
+ { 32000, TMDS_445_5M, 4096, 445500 },
+ { 44100, TMDS_445M, 8918, 703125 },
+ { 44100, TMDS_445_5M, 4704, 371250 },
+ { 88200, TMDS_445M, 17836, 703125 },
+ { 88200, TMDS_445_5M, 9408, 371250 },
+ { 176400, TMDS_445M, 35672, 703125 },
+ { 176400, TMDS_445_5M, 18816, 371250 },
+ { 48000, TMDS_445M, 5824, 421875 },
+ { 48000, TMDS_445_5M, 5120, 371250 },
+ { 96000, TMDS_445M, 11648, 421875 },
+ { 96000, TMDS_445_5M, 10240, 371250 },
+ { 192000, TMDS_445M, 23296, 421875 },
+ { 192000, TMDS_445_5M, 20480, 371250 },
+};
+
+/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
+static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
+ if (adjusted_mode->crtc_clock == hdmi_audio_clock[i].clock)
+ break;
+ }
+
+ if (DISPLAY_VER(dev_priv) < 12 && adjusted_mode->crtc_clock > 148500)
+ i = ARRAY_SIZE(hdmi_audio_clock);
+
+ if (i == ARRAY_SIZE(hdmi_audio_clock)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
+ adjusted_mode->crtc_clock);
+ i = 1;
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+ hdmi_audio_clock[i].clock,
+ hdmi_audio_clock[i].config);
+
+ return hdmi_audio_clock[i].config;
+}
+
+static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
+ int rate)
+{
+ const struct hdmi_aud_ncts *hdmi_ncts_table;
+ int i, size;
+
+ if (crtc_state->pipe_bpp == 36) {
+ hdmi_ncts_table = hdmi_aud_ncts_36bpp;
+ size = ARRAY_SIZE(hdmi_aud_ncts_36bpp);
+ } else if (crtc_state->pipe_bpp == 30) {
+ hdmi_ncts_table = hdmi_aud_ncts_30bpp;
+ size = ARRAY_SIZE(hdmi_aud_ncts_30bpp);
+ } else {
+ hdmi_ncts_table = hdmi_aud_ncts_24bpp;
+ size = ARRAY_SIZE(hdmi_aud_ncts_24bpp);
+ }
+
+ for (i = 0; i < size; i++) {
+ if (rate == hdmi_ncts_table[i].sample_rate &&
+ crtc_state->port_clock == hdmi_ncts_table[i].clock) {
+ return hdmi_ncts_table[i].n;
+ }
+ }
+ return 0;
+}
+
+static bool intel_eld_uptodate(struct drm_connector *connector,
+ i915_reg_t reg_eldv, u32 bits_eldv,
+ i915_reg_t reg_elda, u32 bits_elda,
+ i915_reg_t reg_edid)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ const u8 *eld = connector->eld;
+ u32 tmp;
+ int i;
+
+ tmp = intel_de_read(dev_priv, reg_eldv);
+ tmp &= bits_eldv;
+
+ if (!tmp)
+ return false;
+
+ tmp = intel_de_read(dev_priv, reg_elda);
+ tmp &= ~bits_elda;
+ intel_de_write(dev_priv, reg_elda, tmp);
+
+ for (i = 0; i < drm_eld_size(eld) / 4; i++)
+ if (intel_de_read(dev_priv, reg_edid) != *((const u32 *)eld + i))
+ return false;
+
+ return true;
+}
+
+static void g4x_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 eldv, tmp;
+
+ tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID);
+ if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
+ eldv = G4X_ELDV_DEVCL_DEVBLC;
+ else
+ eldv = G4X_ELDV_DEVCTG;
+
+ /* Invalidate ELD */
+ tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
+ tmp &= ~eldv;
+ intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
+}
+
+static void g4x_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_connector *connector = conn_state->connector;
+ const u8 *eld = connector->eld;
+ u32 eldv;
+ u32 tmp;
+ int len, i;
+
+ tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID);
+ if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
+ eldv = G4X_ELDV_DEVCL_DEVBLC;
+ else
+ eldv = G4X_ELDV_DEVCTG;
+
+ if (intel_eld_uptodate(connector,
+ G4X_AUD_CNTL_ST, eldv,
+ G4X_AUD_CNTL_ST, G4X_ELD_ADDR_MASK,
+ G4X_HDMIW_HDMIEDID))
+ return;
+
+ tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
+ tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
+ len = (tmp >> 9) & 0x1f; /* ELD buffer size */
+ intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
+
+ len = min(drm_eld_size(eld) / 4, len);
+ for (i = 0; i < len; i++)
+ intel_de_write(dev_priv, G4X_HDMIW_HDMIEDID,
+ *((const u32 *)eld + i));
+
+ tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
+ tmp |= eldv;
+ intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
+}
+
+static void
+hsw_dp_audio_config_update(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ enum port port = encoder->port;
+ const struct dp_aud_n_m *nm;
+ int rate;
+ u32 tmp;
+
+ rate = acomp ? acomp->aud_sample_rate[port] : 0;
+ nm = audio_config_dp_get_n_m(crtc_state, rate);
+ if (nm)
+ drm_dbg_kms(&dev_priv->drm, "using Maud %u, Naud %u\n", nm->m,
+ nm->n);
+ else
+ drm_dbg_kms(&dev_priv->drm, "using automatic Maud, Naud\n");
+
+ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+
+ if (nm) {
+ tmp &= ~AUD_CONFIG_N_MASK;
+ tmp |= AUD_CONFIG_N(nm->n);
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ }
+
+ intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
+
+ tmp = intel_de_read(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+ tmp &= ~AUD_CONFIG_M_MASK;
+ tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+ tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
+
+ if (nm) {
+ tmp |= nm->m;
+ tmp |= AUD_M_CTS_M_VALUE_INDEX;
+ tmp |= AUD_M_CTS_M_PROG_ENABLE;
+ }
+
+ intel_de_write(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+}
+
+static void
+hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ enum port port = encoder->port;
+ int n, rate;
+ u32 tmp;
+
+ rate = acomp ? acomp->aud_sample_rate[port] : 0;
+
+ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp |= audio_config_hdmi_pixel_clock(crtc_state);
+
+ n = audio_config_hdmi_get_n(crtc_state, rate);
+ if (n != 0) {
+ drm_dbg_kms(&dev_priv->drm, "using N %d\n", n);
+
+ tmp &= ~AUD_CONFIG_N_MASK;
+ tmp |= AUD_CONFIG_N(n);
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ } else {
+ drm_dbg_kms(&dev_priv->drm, "using automatic N\n");
+ }
+
+ intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
+
+ /*
+ * Let's disable "Enable CTS or M Prog bit"
+ * and let HW calculate the value
+ */
+ tmp = intel_de_read(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+ tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
+ tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
+ intel_de_write(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+}
+
+static void
+hsw_audio_config_update(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ hsw_dp_audio_config_update(encoder, crtc_state);
+ else
+ hsw_hdmi_audio_config_update(encoder, crtc_state);
+}
+
+static void hsw_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+ u32 tmp;
+
+ mutex_lock(&dev_priv->display.audio.mutex);
+
+ /* Disable timestamps */
+ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+ tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+ if (intel_crtc_has_dp_encoder(old_crtc_state))
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
+
+ /* Invalidate ELD */
+ tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
+ tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
+ tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
+ intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+ mutex_unlock(&dev_priv->display.audio.mutex);
+}
+
+static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ unsigned int link_clks_available, link_clks_required;
+ unsigned int tu_data, tu_line, link_clks_active;
+ unsigned int h_active, h_total, hblank_delta, pixel_clk;
+ unsigned int fec_coeff, cdclk, vdsc_bpp;
+ unsigned int link_clk, lanes;
+ unsigned int hblank_rise;
+
+ h_active = crtc_state->hw.adjusted_mode.crtc_hdisplay;
+ h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
+ pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
+ vdsc_bpp = crtc_state->dsc.compressed_bpp;
+ cdclk = i915->display.cdclk.hw.cdclk;
+ /* fec= 0.972261, using rounding multiplier of 1000000 */
+ fec_coeff = 972261;
+ link_clk = crtc_state->port_clock;
+ lanes = crtc_state->lane_count;
+
+ drm_dbg_kms(&i915->drm, "h_active = %u link_clk = %u :"
+ "lanes = %u vdsc_bpp = %u cdclk = %u\n",
+ h_active, link_clk, lanes, vdsc_bpp, cdclk);
+
+ if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bpp || !cdclk))
+ return 0;
+
+ link_clks_available = (h_total - h_active) * link_clk / pixel_clk - 28;
+ link_clks_required = DIV_ROUND_UP(192000 * h_total, 1000 * pixel_clk) * (48 / lanes + 2);
+
+ if (link_clks_available > link_clks_required)
+ hblank_delta = 32;
+ else
+ hblank_delta = DIV64_U64_ROUND_UP(mul_u32_u32(5 * (link_clk + cdclk), pixel_clk),
+ mul_u32_u32(link_clk, cdclk));
+
+ tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bpp * 8, 1000000),
+ mul_u32_u32(link_clk * lanes, fec_coeff));
+ tu_line = div64_u64(h_active * mul_u32_u32(link_clk, fec_coeff),
+ mul_u32_u32(64 * pixel_clk, 1000000));
+ link_clks_active = (tu_line - 1) * 64 + tu_data;
+
+ hblank_rise = (link_clks_active + 6 * DIV_ROUND_UP(link_clks_active, 250) + 4) * pixel_clk / link_clk;
+
+ return h_active - hblank_rise + hblank_delta;
+}
+
+static unsigned int calc_samples_room(const struct intel_crtc_state *crtc_state)
+{
+ unsigned int h_active, h_total, pixel_clk;
+ unsigned int link_clk, lanes;
+
+ h_active = crtc_state->hw.adjusted_mode.hdisplay;
+ h_total = crtc_state->hw.adjusted_mode.htotal;
+ pixel_clk = crtc_state->hw.adjusted_mode.clock;
+ link_clk = crtc_state->port_clock;
+ lanes = crtc_state->lane_count;
+
+ return ((h_total - h_active) * link_clk - 12 * pixel_clk) /
+ (pixel_clk * (48 / lanes + 2));
+}
+
+static void enable_audio_dsc_wa(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ unsigned int hblank_early_prog, samples_room;
+ unsigned int val;
+
+ if (DISPLAY_VER(i915) < 11)
+ return;
+
+ val = intel_de_read(i915, AUD_CONFIG_BE);
+
+ if (DISPLAY_VER(i915) == 11)
+ val |= HBLANK_EARLY_ENABLE_ICL(pipe);
+ else if (DISPLAY_VER(i915) >= 12)
+ val |= HBLANK_EARLY_ENABLE_TGL(pipe);
+
+ if (crtc_state->dsc.compression_enable &&
+ crtc_state->hw.adjusted_mode.hdisplay >= 3840 &&
+ crtc_state->hw.adjusted_mode.vdisplay >= 2160) {
+ /* Get hblank early enable value required */
+ val &= ~HBLANK_START_COUNT_MASK(pipe);
+ hblank_early_prog = calc_hblank_early_prog(encoder, crtc_state);
+ if (hblank_early_prog < 32)
+ val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_32);
+ else if (hblank_early_prog < 64)
+ val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_64);
+ else if (hblank_early_prog < 96)
+ val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_96);
+ else
+ val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_128);
+
+ /* Get samples room value required */
+ val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
+ samples_room = calc_samples_room(crtc_state);
+ if (samples_room < 3)
+ val |= NUMBER_SAMPLES_PER_LINE(pipe, samples_room);
+ else /* Program 0 i.e "All Samples available in buffer" */
+ val |= NUMBER_SAMPLES_PER_LINE(pipe, 0x0);
+ }
+
+ intel_de_write(i915, AUD_CONFIG_BE, val);
+}
+
+#undef ROUNDING_FACTOR
+
+static void hsw_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_connector *connector = conn_state->connector;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ const u8 *eld = connector->eld;
+ u32 tmp;
+ int len, i;
+
+ mutex_lock(&dev_priv->display.audio.mutex);
+
+ /* Enable Audio WA for 4k DSC usecases */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
+ enable_audio_dsc_wa(encoder, crtc_state);
+
+ /* Enable audio presence detect, invalidate ELD */
+ tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
+ tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder);
+ tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
+ intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+ /*
+ * FIXME: We're supposed to wait for vblank here, but we have vblanks
+ * disabled during the mode set. The proper fix would be to push the
+ * rest of the setup into a vblank work item, queued here, but the
+ * infrastructure is not there yet.
+ */
+
+ /* Reset ELD write address */
+ tmp = intel_de_read(dev_priv, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
+ tmp &= ~IBX_ELD_ADDRESS_MASK;
+ intel_de_write(dev_priv, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp);
+
+ /* Up to 84 bytes of hw ELD buffer */
+ len = min(drm_eld_size(eld), 84);
+ for (i = 0; i < len / 4; i++)
+ intel_de_write(dev_priv, HSW_AUD_EDID_DATA(cpu_transcoder),
+ *((const u32 *)eld + i));
+
+ /* ELD valid */
+ tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
+ tmp |= AUDIO_ELD_VALID(cpu_transcoder);
+ intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+ /* Enable timestamps */
+ hsw_audio_config_update(encoder, crtc_state);
+
+ mutex_unlock(&dev_priv->display.audio.mutex);
+}
+
+static void ilk_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ enum port port = encoder->port;
+ u32 tmp, eldv;
+ i915_reg_t aud_config, aud_cntrl_st2;
+
+ if (drm_WARN_ON(&dev_priv->drm, port == PORT_A))
+ return;
+
+ if (HAS_PCH_IBX(dev_priv)) {
+ aud_config = IBX_AUD_CFG(pipe);
+ aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ aud_config = VLV_AUD_CFG(pipe);
+ aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+ } else {
+ aud_config = CPT_AUD_CFG(pipe);
+ aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ }
+
+ /* Disable timestamps */
+ tmp = intel_de_read(dev_priv, aud_config);
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp |= AUD_CONFIG_N_PROG_ENABLE;
+ tmp &= ~AUD_CONFIG_UPPER_N_MASK;
+ tmp &= ~AUD_CONFIG_LOWER_N_MASK;
+ if (intel_crtc_has_dp_encoder(old_crtc_state))
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ intel_de_write(dev_priv, aud_config, tmp);
+
+ eldv = IBX_ELD_VALID(port);
+
+ /* Invalidate ELD */
+ tmp = intel_de_read(dev_priv, aud_cntrl_st2);
+ tmp &= ~eldv;
+ intel_de_write(dev_priv, aud_cntrl_st2, tmp);
+}
+
+static void ilk_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_connector *connector = conn_state->connector;
+ enum pipe pipe = crtc->pipe;
+ enum port port = encoder->port;
+ const u8 *eld = connector->eld;
+ u32 tmp, eldv;
+ int len, i;
+ i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
+
+ if (drm_WARN_ON(&dev_priv->drm, port == PORT_A))
+ return;
+
+ /*
+ * FIXME: We're supposed to wait for vblank here, but we have vblanks
+ * disabled during the mode set. The proper fix would be to push the
+ * rest of the setup into a vblank work item, queued here, but the
+ * infrastructure is not there yet.
+ */
+
+ if (HAS_PCH_IBX(dev_priv)) {
+ hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+ aud_config = IBX_AUD_CFG(pipe);
+ aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+ } else if (IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
+ hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
+ aud_config = VLV_AUD_CFG(pipe);
+ aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
+ } else {
+ hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+ aud_config = CPT_AUD_CFG(pipe);
+ aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
+ aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ }
+
+ eldv = IBX_ELD_VALID(port);
+
+ /* Invalidate ELD */
+ tmp = intel_de_read(dev_priv, aud_cntrl_st2);
+ tmp &= ~eldv;
+ intel_de_write(dev_priv, aud_cntrl_st2, tmp);
+
+ /* Reset ELD write address */
+ tmp = intel_de_read(dev_priv, aud_cntl_st);
+ tmp &= ~IBX_ELD_ADDRESS_MASK;
+ intel_de_write(dev_priv, aud_cntl_st, tmp);
+
+ /* Up to 84 bytes of hw ELD buffer */
+ len = min(drm_eld_size(eld), 84);
+ for (i = 0; i < len / 4; i++)
+ intel_de_write(dev_priv, hdmiw_hdmiedid,
+ *((const u32 *)eld + i));
+
+ /* ELD valid */
+ tmp = intel_de_read(dev_priv, aud_cntrl_st2);
+ tmp |= eldv;
+ intel_de_write(dev_priv, aud_cntrl_st2, tmp);
+
+ /* Enable timestamps */
+ tmp = intel_de_read(dev_priv, aud_config);
+ tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
+ tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
+ tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ tmp |= AUD_CONFIG_N_VALUE_INDEX;
+ else
+ tmp |= audio_config_hdmi_pixel_clock(crtc_state);
+ intel_de_write(dev_priv, aud_config, tmp);
+}
+
+/**
+ * intel_audio_codec_enable - Enable the audio codec for HD audio
+ * @encoder: encoder on which to enable audio
+ * @crtc_state: pointer to the current crtc state.
+ * @conn_state: pointer to the current connector state.
+ *
+ * The enable sequences may only be performed after enabling the transcoder and
+ * port, and after completed link training.
+ */
+void intel_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_connector *connector = conn_state->connector;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ enum port port = encoder->port;
+ enum pipe pipe = crtc->pipe;
+
+ if (!crtc_state->has_audio)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on pipe %c, %u bytes ELD\n",
+ connector->base.id, connector->name,
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe), drm_eld_size(connector->eld));
+
+ /* FIXME precompute the ELD in .compute_config() */
+ if (!connector->eld[0])
+ drm_dbg_kms(&dev_priv->drm,
+ "Bogus ELD on [CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+ connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+
+ if (dev_priv->display.funcs.audio)
+ dev_priv->display.funcs.audio->audio_codec_enable(encoder,
+ crtc_state,
+ conn_state);
+
+ mutex_lock(&dev_priv->display.audio.mutex);
+ encoder->audio_connector = connector;
+
+ /* referred in audio callbacks */
+ dev_priv->display.audio.encoder_map[pipe] = encoder;
+ mutex_unlock(&dev_priv->display.audio.mutex);
+
+ if (acomp && acomp->base.audio_ops &&
+ acomp->base.audio_ops->pin_eld_notify) {
+ /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+ pipe = -1;
+ acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
+ (int) port, (int) pipe);
+ }
+
+ intel_lpe_audio_notify(dev_priv, pipe, port, connector->eld,
+ crtc_state->port_clock,
+ intel_crtc_has_dp_encoder(crtc_state));
+}
+
+/**
+ * intel_audio_codec_disable - Disable the audio codec for HD audio
+ * @encoder: encoder on which to disable audio
+ * @old_crtc_state: pointer to the old crtc state.
+ * @old_conn_state: pointer to the old connector state.
+ *
+ * The disable sequences must be performed before disabling the transcoder or
+ * port.
+ */
+void intel_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_connector *connector = old_conn_state->connector;
+ enum port port = encoder->port;
+ enum pipe pipe = crtc->pipe;
+
+ if (!old_crtc_state->has_audio)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on pipe %c\n",
+ connector->base.id, connector->name,
+ encoder->base.base.id, encoder->base.name, pipe_name(pipe));
+
+ if (dev_priv->display.funcs.audio)
+ dev_priv->display.funcs.audio->audio_codec_disable(encoder,
+ old_crtc_state,
+ old_conn_state);
+
+ mutex_lock(&dev_priv->display.audio.mutex);
+ encoder->audio_connector = NULL;
+ dev_priv->display.audio.encoder_map[pipe] = NULL;
+ mutex_unlock(&dev_priv->display.audio.mutex);
+
+ if (acomp && acomp->base.audio_ops &&
+ acomp->base.audio_ops->pin_eld_notify) {
+ /* audio drivers expect pipe = -1 to indicate Non-MST cases */
+ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
+ pipe = -1;
+ acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
+ (int) port, (int) pipe);
+ }
+
+ intel_lpe_audio_notify(dev_priv, pipe, port, NULL, 0, false);
+}
+
+static const struct intel_audio_funcs g4x_audio_funcs = {
+ .audio_codec_enable = g4x_audio_codec_enable,
+ .audio_codec_disable = g4x_audio_codec_disable,
+};
+
+static const struct intel_audio_funcs ilk_audio_funcs = {
+ .audio_codec_enable = ilk_audio_codec_enable,
+ .audio_codec_disable = ilk_audio_codec_disable,
+};
+
+static const struct intel_audio_funcs hsw_audio_funcs = {
+ .audio_codec_enable = hsw_audio_codec_enable,
+ .audio_codec_disable = hsw_audio_codec_disable,
+};
+
+/**
+ * intel_audio_hooks_init - Set up chip specific audio hooks
+ * @dev_priv: device private
+ */
+void intel_audio_hooks_init(struct drm_i915_private *dev_priv)
+{
+ if (IS_G4X(dev_priv)) {
+ dev_priv->display.funcs.audio = &g4x_audio_funcs;
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.funcs.audio = &ilk_audio_funcs;
+ } else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) {
+ dev_priv->display.funcs.audio = &hsw_audio_funcs;
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ dev_priv->display.funcs.audio = &ilk_audio_funcs;
+ }
+}
+
+struct aud_ts_cdclk_m_n {
+ u8 m;
+ u16 n;
+};
+
+void intel_audio_cdclk_change_pre(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 13)
+ intel_de_rmw(i915, AUD_TS_CDCLK_M, AUD_TS_CDCLK_M_EN, 0);
+}
+
+static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts)
+{
+ if (refclk == 24000)
+ aud_ts->m = 12;
+ else
+ aud_ts->m = 15;
+
+ aud_ts->n = cdclk * aud_ts->m / 24000;
+}
+
+void intel_audio_cdclk_change_post(struct drm_i915_private *i915)
+{
+ struct aud_ts_cdclk_m_n aud_ts;
+
+ if (DISPLAY_VER(i915) >= 13) {
+ get_aud_ts_cdclk_m_n(i915->display.cdclk.hw.ref, i915->display.cdclk.hw.cdclk, &aud_ts);
+
+ intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n);
+ intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN);
+ drm_dbg_kms(&i915->drm, "aud_ts_cdclk set to M=%u, N=%u\n", aud_ts.m, aud_ts.n);
+ }
+}
+
+static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ bool enable)
+{
+ struct intel_cdclk_state *cdclk_state;
+ int ret;
+
+ /* need to hold at least one crtc lock for the global state */
+ ret = drm_modeset_lock(&crtc->base.mutex, state->base.acquire_ctx);
+ if (ret)
+ return ret;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0;
+
+ return drm_atomic_commit(&state->base);
+}
+
+static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct intel_crtc *crtc;
+ int ret;
+
+ crtc = intel_first_crtc(dev_priv);
+ if (!crtc)
+ return;
+
+ drm_modeset_acquire_init(&ctx, 0);
+ state = drm_atomic_state_alloc(&dev_priv->drm);
+ if (drm_WARN_ON(&dev_priv->drm, !state))
+ return;
+
+ state->acquire_ctx = &ctx;
+
+retry:
+ ret = glk_force_audio_cdclk_commit(to_intel_atomic_state(state), crtc,
+ enable);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_WARN_ON(&dev_priv->drm, ret);
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+static unsigned long i915_audio_component_get_power(struct device *kdev)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ intel_wakeref_t ret;
+
+ /* Catch potential impedance mismatches before they occur! */
+ BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
+
+ ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK);
+
+ if (dev_priv->display.audio.power_refcount++ == 0) {
+ if (DISPLAY_VER(dev_priv) >= 9) {
+ intel_de_write(dev_priv, AUD_FREQ_CNTRL,
+ dev_priv->display.audio.freq_cntrl);
+ drm_dbg_kms(&dev_priv->drm,
+ "restored AUD_FREQ_CNTRL to 0x%x\n",
+ dev_priv->display.audio.freq_cntrl);
+ }
+
+ /* Force CDCLK to 2*BCLK as long as we need audio powered. */
+ if (IS_GEMINILAKE(dev_priv))
+ glk_force_audio_cdclk(dev_priv, true);
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ intel_de_write(dev_priv, AUD_PIN_BUF_CTL,
+ (intel_de_read(dev_priv, AUD_PIN_BUF_CTL) | AUD_PIN_BUF_ENABLE));
+ }
+
+ return ret;
+}
+
+static void i915_audio_component_put_power(struct device *kdev,
+ unsigned long cookie)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+
+ /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
+ if (--dev_priv->display.audio.power_refcount == 0)
+ if (IS_GEMINILAKE(dev_priv))
+ glk_force_audio_cdclk(dev_priv, false);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK, cookie);
+}
+
+static void i915_audio_component_codec_wake_override(struct device *kdev,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ unsigned long cookie;
+ u32 tmp;
+
+ if (DISPLAY_VER(dev_priv) < 9)
+ return;
+
+ cookie = i915_audio_component_get_power(kdev);
+
+ /*
+ * Enable/disable generating the codec wake signal, overriding the
+ * internal logic to generate the codec wake to controller.
+ */
+ tmp = intel_de_read(dev_priv, HSW_AUD_CHICKENBIT);
+ tmp &= ~SKL_AUD_CODEC_WAKE_SIGNAL;
+ intel_de_write(dev_priv, HSW_AUD_CHICKENBIT, tmp);
+ usleep_range(1000, 1500);
+
+ if (enable) {
+ tmp = intel_de_read(dev_priv, HSW_AUD_CHICKENBIT);
+ tmp |= SKL_AUD_CODEC_WAKE_SIGNAL;
+ intel_de_write(dev_priv, HSW_AUD_CHICKENBIT, tmp);
+ usleep_range(1000, 1500);
+ }
+
+ i915_audio_component_put_power(kdev, cookie);
+}
+
+/* Get CDCLK in kHz */
+static int i915_audio_component_get_cdclk_freq(struct device *kdev)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DDI(dev_priv)))
+ return -ENODEV;
+
+ return dev_priv->display.cdclk.hw.cdclk;
+}
+
+/*
+ * get the intel_encoder according to the parameter port and pipe
+ * intel_encoder is saved by the index of pipe
+ * MST & (pipe >= 0): return the audio.encoder_map[pipe],
+ * when port is matched
+ * MST & (pipe < 0): this is invalid
+ * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry)
+ * will get the right intel_encoder with port matched
+ * Non-MST & (pipe < 0): get the right intel_encoder with port matched
+ */
+static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
+ int port, int pipe)
+{
+ struct intel_encoder *encoder;
+
+ /* MST */
+ if (pipe >= 0) {
+ if (drm_WARN_ON(&dev_priv->drm,
+ pipe >= ARRAY_SIZE(dev_priv->display.audio.encoder_map)))
+ return NULL;
+
+ encoder = dev_priv->display.audio.encoder_map[pipe];
+ /*
+ * when bootup, audio driver may not know it is
+ * MST or not. So it will poll all the port & pipe
+ * combinations
+ */
+ if (encoder != NULL && encoder->port == port &&
+ encoder->type == INTEL_OUTPUT_DP_MST)
+ return encoder;
+ }
+
+ /* Non-MST */
+ if (pipe > 0)
+ return NULL;
+
+ for_each_pipe(dev_priv, pipe) {
+ encoder = dev_priv->display.audio.encoder_map[pipe];
+ if (encoder == NULL)
+ continue;
+
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (port == encoder->port)
+ return encoder;
+ }
+
+ return NULL;
+}
+
+static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
+ int pipe, int rate)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+ unsigned long cookie;
+ int err = 0;
+
+ if (!HAS_DDI(dev_priv))
+ return 0;
+
+ cookie = i915_audio_component_get_power(kdev);
+ mutex_lock(&dev_priv->display.audio.mutex);
+
+ /* 1. get the pipe */
+ encoder = get_saved_enc(dev_priv, port, pipe);
+ if (!encoder || !encoder->base.crtc) {
+ drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
+ port_name(port));
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ crtc = to_intel_crtc(encoder->base.crtc);
+
+ /* port must be valid now, otherwise the pipe will be invalid */
+ acomp->aud_sample_rate[port] = rate;
+
+ hsw_audio_config_update(encoder, crtc->config);
+
+ unlock:
+ mutex_unlock(&dev_priv->display.audio.mutex);
+ i915_audio_component_put_power(kdev, cookie);
+ return err;
+}
+
+static int i915_audio_component_get_eld(struct device *kdev, int port,
+ int pipe, bool *enabled,
+ unsigned char *buf, int max_bytes)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ struct intel_encoder *intel_encoder;
+ const u8 *eld;
+ int ret = -EINVAL;
+
+ mutex_lock(&dev_priv->display.audio.mutex);
+
+ intel_encoder = get_saved_enc(dev_priv, port, pipe);
+ if (!intel_encoder) {
+ drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
+ port_name(port));
+ mutex_unlock(&dev_priv->display.audio.mutex);
+ return ret;
+ }
+
+ ret = 0;
+ *enabled = intel_encoder->audio_connector != NULL;
+ if (*enabled) {
+ eld = intel_encoder->audio_connector->eld;
+ ret = drm_eld_size(eld);
+ memcpy(buf, eld, min(max_bytes, ret));
+ }
+
+ mutex_unlock(&dev_priv->display.audio.mutex);
+ return ret;
+}
+
+static const struct drm_audio_component_ops i915_audio_component_ops = {
+ .owner = THIS_MODULE,
+ .get_power = i915_audio_component_get_power,
+ .put_power = i915_audio_component_put_power,
+ .codec_wake_override = i915_audio_component_codec_wake_override,
+ .get_cdclk_freq = i915_audio_component_get_cdclk_freq,
+ .sync_audio_rate = i915_audio_component_sync_audio_rate,
+ .get_eld = i915_audio_component_get_eld,
+};
+
+static int i915_audio_component_bind(struct device *i915_kdev,
+ struct device *hda_kdev, void *data)
+{
+ struct i915_audio_component *acomp = data;
+ struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+ int i;
+
+ if (drm_WARN_ON(&dev_priv->drm, acomp->base.ops || acomp->base.dev))
+ return -EEXIST;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ !device_link_add(hda_kdev, i915_kdev,
+ DL_FLAG_STATELESS)))
+ return -ENOMEM;
+
+ drm_modeset_lock_all(&dev_priv->drm);
+ acomp->base.ops = &i915_audio_component_ops;
+ acomp->base.dev = i915_kdev;
+ BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
+ for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
+ acomp->aud_sample_rate[i] = 0;
+ dev_priv->display.audio.component = acomp;
+ drm_modeset_unlock_all(&dev_priv->drm);
+
+ return 0;
+}
+
+static void i915_audio_component_unbind(struct device *i915_kdev,
+ struct device *hda_kdev, void *data)
+{
+ struct i915_audio_component *acomp = data;
+ struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+
+ drm_modeset_lock_all(&dev_priv->drm);
+ acomp->base.ops = NULL;
+ acomp->base.dev = NULL;
+ dev_priv->display.audio.component = NULL;
+ drm_modeset_unlock_all(&dev_priv->drm);
+
+ device_link_remove(hda_kdev, i915_kdev);
+
+ if (dev_priv->display.audio.power_refcount)
+ drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n",
+ dev_priv->display.audio.power_refcount);
+}
+
+static const struct component_ops i915_audio_component_bind_ops = {
+ .bind = i915_audio_component_bind,
+ .unbind = i915_audio_component_unbind,
+};
+
+#define AUD_FREQ_TMODE_SHIFT 14
+#define AUD_FREQ_4T 0
+#define AUD_FREQ_8T (2 << AUD_FREQ_TMODE_SHIFT)
+#define AUD_FREQ_PULLCLKS(x) (((x) & 0x3) << 11)
+#define AUD_FREQ_BCLK_96M BIT(4)
+
+#define AUD_FREQ_GEN12 (AUD_FREQ_8T | AUD_FREQ_PULLCLKS(0) | AUD_FREQ_BCLK_96M)
+#define AUD_FREQ_TGL_BROKEN (AUD_FREQ_8T | AUD_FREQ_PULLCLKS(2) | AUD_FREQ_BCLK_96M)
+
+/**
+ * i915_audio_component_init - initialize and register the audio component
+ * @dev_priv: i915 device instance
+ *
+ * This will register with the component framework a child component which
+ * will bind dynamically to the snd_hda_intel driver's corresponding master
+ * component when the latter is registered. During binding the child
+ * initializes an instance of struct i915_audio_component which it receives
+ * from the master. The master can then start to use the interface defined by
+ * this struct. Each side can break the binding at any point by deregistering
+ * its own component after which each side's component unbind callback is
+ * called.
+ *
+ * We ignore any error during registration and continue with reduced
+ * functionality (i.e. without HDMI audio).
+ */
+static void i915_audio_component_init(struct drm_i915_private *dev_priv)
+{
+ u32 aud_freq, aud_freq_init;
+ int ret;
+
+ ret = component_add_typed(dev_priv->drm.dev,
+ &i915_audio_component_bind_ops,
+ I915_COMPONENT_AUDIO);
+ if (ret < 0) {
+ drm_err(&dev_priv->drm,
+ "failed to add audio component (%d)\n", ret);
+ /* continue with reduced functionality */
+ return;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 9) {
+ aud_freq_init = intel_de_read(dev_priv, AUD_FREQ_CNTRL);
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ aud_freq = AUD_FREQ_GEN12;
+ else
+ aud_freq = aud_freq_init;
+
+ /* use BIOS provided value for TGL and RKL unless it is a known bad value */
+ if ((IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv)) &&
+ aud_freq_init != AUD_FREQ_TGL_BROKEN)
+ aud_freq = aud_freq_init;
+
+ drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
+ aud_freq, aud_freq_init);
+
+ dev_priv->display.audio.freq_cntrl = aud_freq;
+ }
+
+ /* init with current cdclk */
+ intel_audio_cdclk_change_post(dev_priv);
+
+ dev_priv->display.audio.component_registered = true;
+}
+
+/**
+ * i915_audio_component_cleanup - deregister the audio component
+ * @dev_priv: i915 device instance
+ *
+ * Deregisters the audio component, breaking any existing binding to the
+ * corresponding snd_hda_intel driver's master component.
+ */
+static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->display.audio.component_registered)
+ return;
+
+ component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
+ dev_priv->display.audio.component_registered = false;
+}
+
+/**
+ * intel_audio_init() - Initialize the audio driver either using
+ * component framework or using lpe audio bridge
+ * @dev_priv: the i915 drm device private data
+ *
+ */
+void intel_audio_init(struct drm_i915_private *dev_priv)
+{
+ if (intel_lpe_audio_init(dev_priv) < 0)
+ i915_audio_component_init(dev_priv);
+}
+
+/**
+ * intel_audio_deinit() - deinitialize the audio driver
+ * @dev_priv: the i915 drm device private data
+ *
+ */
+void intel_audio_deinit(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->display.audio.lpe.platdev != NULL)
+ intel_lpe_audio_teardown(dev_priv);
+ else
+ i915_audio_component_cleanup(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
new file mode 100644
index 000000000..63b22131d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_AUDIO_H__
+#define __INTEL_AUDIO_H__
+
+struct drm_connector_state;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_encoder;
+
+void intel_audio_hooks_init(struct drm_i915_private *dev_priv);
+void intel_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
+void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
+void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
+void intel_audio_init(struct drm_i915_private *dev_priv);
+void intel_audio_deinit(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio_regs.h b/drivers/gpu/drm/i915/display/intel_audio_regs.h
new file mode 100644
index 000000000..d1e5844e3
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_audio_regs.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_AUDIO_REGS_H__
+#define __INTEL_AUDIO_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
+#define INTEL_AUDIO_DEVCL 0x808629FB
+#define INTEL_AUDIO_DEVBLC 0x80862801
+#define INTEL_AUDIO_DEVCTG 0x80862802
+
+#define G4X_AUD_CNTL_ST _MMIO(0x620B4)
+#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
+#define G4X_ELDV_DEVCTG (1 << 14)
+#define G4X_ELD_ADDR_MASK (0xf << 5)
+#define G4X_ELD_ACK (1 << 4)
+#define G4X_HDMIW_HDMIEDID _MMIO(0x6210C)
+
+#define _IBX_HDMIW_HDMIEDID_A 0xE2050
+#define _IBX_HDMIW_HDMIEDID_B 0xE2150
+#define IBX_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _IBX_HDMIW_HDMIEDID_A, \
+ _IBX_HDMIW_HDMIEDID_B)
+#define _IBX_AUD_CNTL_ST_A 0xE20B4
+#define _IBX_AUD_CNTL_ST_B 0xE21B4
+#define IBX_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CNTL_ST_A, \
+ _IBX_AUD_CNTL_ST_B)
+#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
+#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
+#define IBX_ELD_ACK (1 << 4)
+#define IBX_AUD_CNTL_ST2 _MMIO(0xE20C0)
+#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4))
+#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4))
+
+#define _CPT_HDMIW_HDMIEDID_A 0xE5050
+#define _CPT_HDMIW_HDMIEDID_B 0xE5150
+#define CPT_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _CPT_HDMIW_HDMIEDID_A, _CPT_HDMIW_HDMIEDID_B)
+#define _CPT_AUD_CNTL_ST_A 0xE50B4
+#define _CPT_AUD_CNTL_ST_B 0xE51B4
+#define CPT_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CNTL_ST_A, _CPT_AUD_CNTL_ST_B)
+#define CPT_AUD_CNTRL_ST2 _MMIO(0xE50C0)
+
+#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
+#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
+#define VLV_HDMIW_HDMIEDID(pipe) _MMIO_PIPE(pipe, _VLV_HDMIW_HDMIEDID_A, _VLV_HDMIW_HDMIEDID_B)
+#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
+#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
+#define VLV_AUD_CNTL_ST(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CNTL_ST_A, _VLV_AUD_CNTL_ST_B)
+#define VLV_AUD_CNTL_ST2 _MMIO(VLV_DISPLAY_BASE + 0x620C0)
+
+#define _IBX_AUD_CONFIG_A 0xe2000
+#define _IBX_AUD_CONFIG_B 0xe2100
+#define IBX_AUD_CFG(pipe) _MMIO_PIPE(pipe, _IBX_AUD_CONFIG_A, _IBX_AUD_CONFIG_B)
+#define _CPT_AUD_CONFIG_A 0xe5000
+#define _CPT_AUD_CONFIG_B 0xe5100
+#define CPT_AUD_CFG(pipe) _MMIO_PIPE(pipe, _CPT_AUD_CONFIG_A, _CPT_AUD_CONFIG_B)
+#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
+#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
+#define VLV_AUD_CFG(pipe) _MMIO_PIPE(pipe, _VLV_AUD_CONFIG_A, _VLV_AUD_CONFIG_B)
+
+#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
+#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
+#define AUD_CONFIG_UPPER_N_SHIFT 20
+#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
+#define AUD_CONFIG_LOWER_N_SHIFT 4
+#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
+#define AUD_CONFIG_N_MASK (AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK)
+#define AUD_CONFIG_N(n) \
+ (((((n) >> 12) & 0xff) << AUD_CONFIG_UPPER_N_SHIFT) | \
+ (((n) & 0xfff) << AUD_CONFIG_LOWER_N_SHIFT))
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 (10 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 (11 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 (12 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 (13 << 16)
+#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+
+#define _HSW_AUD_CONFIG_A 0x65000
+#define _HSW_AUD_CONFIG_B 0x65100
+#define HSW_AUD_CFG(trans) _MMIO_TRANS(trans, _HSW_AUD_CONFIG_A, _HSW_AUD_CONFIG_B)
+
+#define _HSW_AUD_MISC_CTRL_A 0x65010
+#define _HSW_AUD_MISC_CTRL_B 0x65110
+#define HSW_AUD_MISC_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_MISC_CTRL_A, _HSW_AUD_MISC_CTRL_B)
+
+#define _HSW_AUD_M_CTS_ENABLE_A 0x65028
+#define _HSW_AUD_M_CTS_ENABLE_B 0x65128
+#define HSW_AUD_M_CTS_ENABLE(trans) _MMIO_TRANS(trans, _HSW_AUD_M_CTS_ENABLE_A, _HSW_AUD_M_CTS_ENABLE_B)
+#define AUD_M_CTS_M_VALUE_INDEX (1 << 21)
+#define AUD_M_CTS_M_PROG_ENABLE (1 << 20)
+#define AUD_CONFIG_M_MASK 0xfffff
+
+#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
+#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
+#define HSW_AUD_DIP_ELD_CTRL(trans) _MMIO_TRANS(trans, _HSW_AUD_DIP_ELD_CTRL_ST_A, _HSW_AUD_DIP_ELD_CTRL_ST_B)
+
+/* Audio Digital Converter */
+#define _HSW_AUD_DIG_CNVT_1 0x65080
+#define _HSW_AUD_DIG_CNVT_2 0x65180
+#define AUD_DIG_CNVT(trans) _MMIO_TRANS(trans, _HSW_AUD_DIG_CNVT_1, _HSW_AUD_DIG_CNVT_2)
+#define DIP_PORT_SEL_MASK 0x3
+
+#define _HSW_AUD_EDID_DATA_A 0x65050
+#define _HSW_AUD_EDID_DATA_B 0x65150
+#define HSW_AUD_EDID_DATA(trans) _MMIO_TRANS(trans, _HSW_AUD_EDID_DATA_A, _HSW_AUD_EDID_DATA_B)
+
+#define HSW_AUD_PIPE_CONV_CFG _MMIO(0x6507c)
+#define HSW_AUD_PIN_ELD_CP_VLD _MMIO(0x650c0)
+#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4))
+#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4))
+#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
+#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
+
+#define _AUD_TCA_DP_2DOT0_CTRL 0x650bc
+#define _AUD_TCB_DP_2DOT0_CTRL 0x651bc
+#define AUD_DP_2DOT0_CTRL(trans) _MMIO_TRANS(trans, _AUD_TCA_DP_2DOT0_CTRL, _AUD_TCB_DP_2DOT0_CTRL)
+#define AUD_ENABLE_SDP_SPLIT REG_BIT(31)
+
+#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
+#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
+
+#define AUD_FREQ_CNTRL _MMIO(0x65900)
+#define AUD_PIN_BUF_CTL _MMIO(0x48414)
+#define AUD_PIN_BUF_ENABLE REG_BIT(31)
+
+#define AUD_TS_CDCLK_M _MMIO(0x65ea0)
+#define AUD_TS_CDCLK_M_EN REG_BIT(31)
+#define AUD_TS_CDCLK_N _MMIO(0x65ea4)
+
+/* Display Audio Config Reg */
+#define AUD_CONFIG_BE _MMIO(0x65ef0)
+#define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe)))
+#define HBLANK_EARLY_ENABLE_TGL(pipe) (0x1 << (24 + (pipe)))
+#define HBLANK_START_COUNT_MASK(pipe) (0x7 << (3 + ((pipe) * 6)))
+#define HBLANK_START_COUNT(pipe, val) (((val) & 0x7) << (3 + ((pipe)) * 6))
+#define NUMBER_SAMPLES_PER_LINE_MASK(pipe) (0x3 << ((pipe) * 6))
+#define NUMBER_SAMPLES_PER_LINE(pipe, val) (((val) & 0x3) << ((pipe) * 6))
+
+#define HBLANK_START_COUNT_8 0
+#define HBLANK_START_COUNT_16 1
+#define HBLANK_START_COUNT_32 2
+#define HBLANK_START_COUNT_64 3
+#define HBLANK_START_COUNT_96 4
+#define HBLANK_START_COUNT_128 5
+
+#endif /* __INTEL_AUDIO_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
new file mode 100644
index 000000000..beba39a38
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -0,0 +1,1794 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/backlight.h>
+#include <linux/kernel.h>
+#include <linux/pwm.h>
+#include <linux/string_helpers.h>
+
+#include <acpi/video.h>
+
+#include "intel_backlight.h"
+#include "intel_backlight_regs.h"
+#include "intel_connector.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dp_aux_backlight.h"
+#include "intel_dsi_dcs_backlight.h"
+#include "intel_panel.h"
+#include "intel_pci_config.h"
+#include "intel_pps.h"
+#include "intel_quirks.h"
+
+/**
+ * scale - scale values from one range to another
+ * @source_val: value in range [@source_min..@source_max]
+ * @source_min: minimum legal value for @source_val
+ * @source_max: maximum legal value for @source_val
+ * @target_min: corresponding target value for @source_min
+ * @target_max: corresponding target value for @source_max
+ *
+ * Return @source_val in range [@source_min..@source_max] scaled to range
+ * [@target_min..@target_max].
+ */
+static u32 scale(u32 source_val,
+ u32 source_min, u32 source_max,
+ u32 target_min, u32 target_max)
+{
+ u64 target_val;
+
+ WARN_ON(source_min > source_max);
+ WARN_ON(target_min > target_max);
+
+ /* defensive */
+ source_val = clamp(source_val, source_min, source_max);
+
+ /* avoid overflows */
+ target_val = mul_u32_u32(source_val - source_min,
+ target_max - target_min);
+ target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min);
+ target_val += target_min;
+
+ return target_val;
+}
+
+/*
+ * Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
+ * to [hw_min..hw_max].
+ */
+static u32 clamp_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+ u32 hw_level;
+
+ hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max);
+ hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max);
+
+ return hw_level;
+}
+
+/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
+static u32 scale_hw_to_user(struct intel_connector *connector,
+ u32 hw_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(hw_level, panel->backlight.min, panel->backlight.max,
+ 0, user_max);
+}
+
+u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
+
+ if (dev_priv->params.invert_brightness < 0)
+ return val;
+
+ if (dev_priv->params.invert_brightness > 0 ||
+ intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS)) {
+ return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
+ }
+
+ return val;
+}
+
+void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val);
+ panel->backlight.pwm_funcs->set(conn_state, val);
+}
+
+u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+ val = scale(val, panel->backlight.min, panel->backlight.max,
+ panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
+
+ return intel_backlight_invert_pwm_level(connector, val);
+}
+
+u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+ if (dev_priv->params.invert_brightness > 0 ||
+ (dev_priv->params.invert_brightness == 0 && intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS)))
+ val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
+
+ return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
+static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 val;
+
+ val = intel_de_read(dev_priv, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (DISPLAY_VER(dev_priv) < 4)
+ val >>= 1;
+
+ if (panel->backlight.combination_mode) {
+ u8 lbpc;
+
+ pci_read_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, &lbpc);
+ val *= lbpc;
+ }
+
+ return val;
+}
+
+static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ return 0;
+
+ return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ return intel_de_read(dev_priv,
+ BXT_BLC_PWM_DUTY(panel->backlight.controller));
+}
+
+static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct intel_panel *panel = &connector->panel;
+ struct pwm_state state;
+
+ pwm_get_state(panel->backlight.pwm, &state);
+ return pwm_get_relative_duty_cycle(&state, 100);
+}
+
+static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ u32 val = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, val | level);
+}
+
+static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL, tmp | level);
+}
+
+static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 tmp, mask;
+
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
+
+ if (panel->backlight.combination_mode) {
+ u8 lbpc;
+
+ lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
+ level /= lbpc;
+ pci_write_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, lbpc);
+ }
+
+ if (DISPLAY_VER(dev_priv) == 4) {
+ mask = BACKLIGHT_DUTY_CYCLE_MASK;
+ } else {
+ level <<= 1;
+ mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
+ }
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_CTL) & ~mask;
+ intel_de_write(dev_priv, BLC_PWM_CTL, tmp | level);
+}
+
+static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), tmp | level);
+}
+
+static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
+}
+
+static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
+
+ pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
+ pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+}
+
+static void
+intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level);
+
+ panel->backlight.funcs->set(conn_state, level);
+}
+
+/* set backlight brightness to level in range [0..max], assuming hw min is
+ * respected.
+ */
+void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
+ u32 user_level, u32 user_max)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 hw_level;
+
+ /*
+ * Lack of crtc may occur during driver init because
+ * connection_mutex isn't held across the entire backlight
+ * setup + modeset readout, and the BIOS can issue the
+ * requests at any time.
+ */
+ if (!panel->backlight.present || !conn_state->crtc)
+ return;
+
+ mutex_lock(&dev_priv->display.backlight.lock);
+
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+
+ hw_level = clamp_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
+
+ if (panel->backlight.device)
+ panel->backlight.device->props.brightness =
+ scale_hw_to_user(connector,
+ panel->backlight.level,
+ panel->backlight.device->props.max_brightness);
+
+ if (panel->backlight.enabled)
+ intel_panel_actually_set_backlight(conn_state, hw_level);
+
+ mutex_unlock(&dev_priv->display.backlight.lock);
+}
+
+static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, level);
+
+ /*
+ * Although we don't support or enable CPU PWM with LPT/SPT based
+ * systems, it may have been enabled prior to loading the
+ * driver. Disable to avoid warnings on LCPLL disable.
+ *
+ * This needs rework if we need to add support for CPU PWM on PCH split
+ * platforms.
+ */
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ if (tmp & BLM_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "cpu backlight was enabled, disabling\n");
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+ tmp & ~BLM_PWM_ENABLE);
+ }
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+}
+
+static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, val);
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ intel_backlight_set_pwm_level(old_conn_state, val);
+}
+
+static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev);
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, val);
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+}
+
+static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, val);
+
+ tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
+ tmp & ~BLM_PWM_ENABLE);
+}
+
+static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, val);
+
+ tmp = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ tmp & ~BXT_BLC_PWM_ENABLE);
+
+ if (panel->backlight.controller == 1) {
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
+ val &= ~UTIL_PIN_ENABLE;
+ intel_de_write(dev_priv, UTIL_PIN_CTL, val);
+ }
+}
+
+static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, val);
+
+ tmp = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ tmp & ~BXT_BLC_PWM_ENABLE);
+}
+
+static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ intel_backlight_set_pwm_level(old_conn_state, level);
+
+ panel->backlight.pwm_state.enabled = false;
+ pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+}
+
+void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ if (!panel->backlight.present)
+ return;
+
+ /*
+ * Do not disable backlight on the vga_switcheroo path. When switching
+ * away from i915, the other client may depend on i915 to handle the
+ * backlight. This will leave the backlight on unnecessarily when
+ * another client is not activated.
+ */
+ if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Skipping backlight disable on vga switch\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->display.backlight.lock);
+
+ if (panel->backlight.device)
+ panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
+ panel->backlight.enabled = false;
+ panel->backlight.funcs->disable(old_conn_state, 0);
+
+ mutex_unlock(&dev_priv->display.backlight.lock);
+}
+
+static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 pch_ctl1, pch_ctl2, schicken;
+
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
+ pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ }
+
+ if (HAS_PCH_LPT(dev_priv)) {
+ schicken = intel_de_read(dev_priv, SOUTH_CHICKEN2);
+ if (panel->backlight.alternate_pwm_increment)
+ schicken |= LPT_PWM_GRANULARITY;
+ else
+ schicken &= ~LPT_PWM_GRANULARITY;
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, schicken);
+ } else {
+ schicken = intel_de_read(dev_priv, SOUTH_CHICKEN1);
+ if (panel->backlight.alternate_pwm_increment)
+ schicken |= SPT_PWM_GRANULARITY;
+ else
+ schicken &= ~SPT_PWM_GRANULARITY;
+ intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
+ }
+
+ pch_ctl2 = panel->backlight.pwm_level_max << 16;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
+
+ pch_ctl1 = 0;
+ if (panel->backlight.active_low_pwm)
+ pch_ctl1 |= BLM_PCH_POLARITY;
+
+ /* After LPT, override is the default. */
+ if (HAS_PCH_LPT(dev_priv))
+ pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
+
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_PWM_ENABLE);
+
+ /* This won't stick until the above enable. */
+ intel_backlight_set_pwm_level(conn_state, level);
+}
+
+static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2;
+
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ if (cpu_ctl2 & BLM_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "cpu backlight already enabled\n");
+ cpu_ctl2 &= ~BLM_PWM_ENABLE;
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ }
+
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
+ pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ }
+
+ if (cpu_transcoder == TRANSCODER_EDP)
+ cpu_ctl2 = BLM_TRANSCODER_EDP;
+ else
+ cpu_ctl2 = BLM_PIPE(cpu_transcoder);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_posting_read(dev_priv, BLC_PWM_CPU_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+
+ /* This won't stick until the above enable. */
+ intel_backlight_set_pwm_level(conn_state, level);
+
+ pch_ctl2 = panel->backlight.pwm_level_max << 16;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
+
+ pch_ctl1 = 0;
+ if (panel->backlight.active_low_pwm)
+ pch_ctl1 |= BLM_PCH_POLARITY;
+
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, freq;
+
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
+ if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ intel_de_write(dev_priv, BLC_PWM_CTL, 0);
+ }
+
+ freq = panel->backlight.pwm_level_max;
+ if (panel->backlight.combination_mode)
+ freq /= 0xff;
+
+ ctl = freq << 17;
+ if (panel->backlight.combination_mode)
+ ctl |= BLM_LEGACY_MODE;
+ if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm)
+ ctl |= BLM_POLARITY_PNV;
+
+ intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
+ intel_de_posting_read(dev_priv, BLC_PWM_CTL);
+
+ /* XXX: combine this into above write? */
+ intel_backlight_set_pwm_level(conn_state, level);
+
+ /*
+ * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
+ * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
+ * that has backlight.
+ */
+ if (DISPLAY_VER(dev_priv) == 2)
+ intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
+}
+
+static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
+ u32 ctl, ctl2, freq;
+
+ ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ if (ctl2 & BLM_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ ctl2 &= ~BLM_PWM_ENABLE;
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
+ }
+
+ freq = panel->backlight.pwm_level_max;
+ if (panel->backlight.combination_mode)
+ freq /= 0xff;
+
+ ctl = freq << 16;
+ intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
+
+ ctl2 = BLM_PIPE(pipe);
+ if (panel->backlight.combination_mode)
+ ctl2 |= BLM_COMBINATION_MODE;
+ if (panel->backlight.active_low_pwm)
+ ctl2 |= BLM_POLARITY_I965;
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
+ intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+
+ intel_backlight_set_pwm_level(conn_state, level);
+}
+
+static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ u32 ctl, ctl2;
+
+ ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ if (ctl2 & BLM_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ ctl2 &= ~BLM_PWM_ENABLE;
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ }
+
+ ctl = panel->backlight.pwm_level_max << 16;
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
+
+ /* XXX: combine this into above write? */
+ intel_backlight_set_pwm_level(conn_state, level);
+
+ ctl2 = 0;
+ if (panel->backlight.active_low_pwm)
+ ctl2 |= BLM_POLARITY_I965;
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_posting_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
+ ctl2 | BLM_PWM_ENABLE);
+}
+
+static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ u32 pwm_ctl, val;
+
+ /* Controller 1 uses the utility pin. */
+ if (panel->backlight.controller == 1) {
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
+ if (val & UTIL_PIN_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "util pin already enabled\n");
+ val &= ~UTIL_PIN_ENABLE;
+ intel_de_write(dev_priv, UTIL_PIN_CTL, val);
+ }
+
+ val = 0;
+ if (panel->backlight.util_pin_active_low)
+ val |= UTIL_PIN_POLARITY;
+ intel_de_write(dev_priv, UTIL_PIN_CTL,
+ val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
+ }
+
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ }
+
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ panel->backlight.pwm_level_max);
+
+ intel_backlight_set_pwm_level(conn_state, level);
+
+ pwm_ctl = 0;
+ if (panel->backlight.active_low_pwm)
+ pwm_ctl |= BXT_BLC_PWM_POLARITY;
+
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ intel_de_posting_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl | BXT_BLC_PWM_ENABLE);
+}
+
+static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 pwm_ctl;
+
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ }
+
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ panel->backlight.pwm_level_max);
+
+ intel_backlight_set_pwm_level(conn_state, level);
+
+ pwm_ctl = 0;
+ if (panel->backlight.active_low_pwm)
+ pwm_ctl |= BXT_BLC_PWM_POLARITY;
+
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ intel_de_posting_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl | BXT_BLC_PWM_ENABLE);
+}
+
+static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
+ panel->backlight.pwm_state.enabled = true;
+ pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+}
+
+static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ WARN_ON(panel->backlight.max == 0);
+
+ if (panel->backlight.level <= panel->backlight.min) {
+ panel->backlight.level = panel->backlight.max;
+ if (panel->backlight.device)
+ panel->backlight.device->props.brightness =
+ scale_hw_to_user(connector,
+ panel->backlight.level,
+ panel->backlight.device->props.max_brightness);
+ }
+
+ panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level);
+ panel->backlight.enabled = true;
+ if (panel->backlight.device)
+ panel->backlight.device->props.power = FB_BLANK_UNBLANK;
+}
+
+void intel_backlight_enable(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+
+ if (!panel->backlight.present)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe));
+
+ mutex_lock(&dev_priv->display.backlight.lock);
+
+ __intel_backlight_enable(crtc_state, conn_state);
+
+ mutex_unlock(&dev_priv->display.backlight.lock);
+}
+
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+static u32 intel_panel_get_backlight(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 val = 0;
+
+ mutex_lock(&dev_priv->display.backlight.lock);
+
+ if (panel->backlight.enabled)
+ val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
+
+ mutex_unlock(&dev_priv->display.backlight.lock);
+
+ drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val);
+ return val;
+}
+
+/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
+static u32 scale_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(user_level, 0, user_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
+/* set backlight brightness to level in range [0..max], scaling wrt hw min */
+static void intel_panel_set_backlight(const struct drm_connector_state *conn_state,
+ u32 user_level, u32 user_max)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 hw_level;
+
+ if (!panel->backlight.present)
+ return;
+
+ mutex_lock(&dev_priv->display.backlight.lock);
+
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+
+ hw_level = scale_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
+
+ if (panel->backlight.enabled)
+ intel_panel_actually_set_backlight(conn_state, hw_level);
+
+ mutex_unlock(&dev_priv->display.backlight.lock);
+}
+
+static int intel_backlight_device_update_status(struct backlight_device *bd)
+{
+ struct intel_connector *connector = bl_get_data(bd);
+ struct intel_panel *panel = &connector->panel;
+ struct drm_device *dev = connector->base.dev;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
+ bd->props.brightness, bd->props.max_brightness);
+ intel_panel_set_backlight(connector->base.state, bd->props.brightness,
+ bd->props.max_brightness);
+
+ /*
+ * Allow flipping bl_power as a sub-state of enabled. Sadly the
+ * backlight class device does not make it easy to differentiate
+ * between callbacks for brightness and bl_power, so our backlight_power
+ * callback needs to take this into account.
+ */
+ if (panel->backlight.enabled) {
+ if (panel->backlight.power) {
+ bool enable = bd->props.power == FB_BLANK_UNBLANK &&
+ bd->props.brightness != 0;
+ panel->backlight.power(connector, enable);
+ }
+ } else {
+ bd->props.power = FB_BLANK_POWERDOWN;
+ }
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ return 0;
+}
+
+static int intel_backlight_device_get_brightness(struct backlight_device *bd)
+{
+ struct intel_connector *connector = bl_get_data(bd);
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
+ u32 hw_level;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ hw_level = intel_panel_get_backlight(connector);
+ ret = scale_hw_to_user(connector,
+ hw_level, bd->props.max_brightness);
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ }
+
+ return ret;
+}
+
+static const struct backlight_ops intel_backlight_device_ops = {
+ .update_status = intel_backlight_device_update_status,
+ .get_brightness = intel_backlight_device_get_brightness,
+};
+
+int intel_backlight_device_register(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ struct backlight_properties props;
+ struct backlight_device *bd;
+ const char *name;
+ int ret = 0;
+
+ if (WARN_ON(panel->backlight.device))
+ return -ENODEV;
+
+ if (!panel->backlight.present)
+ return 0;
+
+ WARN_ON(panel->backlight.max == 0);
+
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(&i915->drm, "Skipping intel_backlight registration\n");
+ return 0;
+ }
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+
+ /*
+ * Note: Everything should work even if the backlight device max
+ * presented to the userspace is arbitrarily chosen.
+ */
+ props.max_brightness = panel->backlight.max;
+ props.brightness = scale_hw_to_user(connector,
+ panel->backlight.level,
+ props.max_brightness);
+
+ if (panel->backlight.enabled)
+ props.power = FB_BLANK_UNBLANK;
+ else
+ props.power = FB_BLANK_POWERDOWN;
+
+ name = kstrdup("intel_backlight", GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ bd = backlight_device_get_by_name(name);
+ if (bd) {
+ put_device(&bd->dev);
+ /*
+ * Using the same name independent of the drm device or connector
+ * prevents registration of multiple backlight devices in the
+ * driver. However, we need to use the default name for backward
+ * compatibility. Use unique names for subsequent backlight devices as a
+ * fallback when the default name already exists.
+ */
+ kfree(name);
+ name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
+ i915->drm.primary->index, connector->base.name);
+ if (!name)
+ return -ENOMEM;
+ }
+ bd = backlight_device_register(name, connector->base.kdev, connector,
+ &intel_backlight_device_ops, &props);
+
+ if (IS_ERR(bd)) {
+ drm_err(&i915->drm,
+ "[CONNECTOR:%d:%s] backlight device %s register failed: %ld\n",
+ connector->base.base.id, connector->base.name, name, PTR_ERR(bd));
+ ret = PTR_ERR(bd);
+ goto out;
+ }
+
+ panel->backlight.device = bd;
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] backlight device %s registered\n",
+ connector->base.base.id, connector->base.name, name);
+
+out:
+ kfree(name);
+
+ return ret;
+}
+
+void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->backlight.device) {
+ backlight_device_unregister(panel->backlight.device);
+ panel->backlight.device = NULL;
+ }
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+/*
+ * CNP: PWM clock frequency is 19.2 MHz or 24 MHz.
+ * PWM increment = 1
+ */
+static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ pwm_freq_hz);
+}
+
+/*
+ * BXT: PWM clock frequency = 19.2 MHz.
+ */
+static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ return DIV_ROUND_CLOSEST(KHz(19200), pwm_freq_hz);
+}
+
+/*
+ * SPT: This value represents the period of the PWM stream in clock periods
+ * multiplied by 16 (default increment) or 128 (alternate increment selected in
+ * SCHICKEN_1 bit 0). PWM clock is 24 MHz.
+ */
+static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct intel_panel *panel = &connector->panel;
+ u32 mul;
+
+ if (panel->backlight.alternate_pwm_increment)
+ mul = 128;
+ else
+ mul = 16;
+
+ return DIV_ROUND_CLOSEST(MHz(24), pwm_freq_hz * mul);
+}
+
+/*
+ * LPT: This value represents the period of the PWM stream in clock periods
+ * multiplied by 128 (default increment) or 16 (alternate increment, selected in
+ * LPT SOUTH_CHICKEN2 register bit 5).
+ */
+static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 mul, clock;
+
+ if (panel->backlight.alternate_pwm_increment)
+ mul = 16;
+ else
+ mul = 128;
+
+ if (HAS_PCH_LPT_H(dev_priv))
+ clock = MHz(135); /* LPT:H */
+ else
+ clock = MHz(24); /* LPT:LP */
+
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
+}
+
+/*
+ * ILK/SNB/IVB: This value represents the period of the PWM stream in PCH
+ * display raw clocks multiplied by 128.
+ */
+static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ pwm_freq_hz * 128);
+}
+
+/*
+ * Gen2: This field determines the number of time base events (display core
+ * clock frequency/32) in total for a complete cycle of modulated backlight
+ * control.
+ *
+ * Gen3: A time base event equals the display core clock ([DevPNV] HRAW clock)
+ * divided by 32.
+ */
+static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ int clock;
+
+ if (IS_PINEVIEW(dev_priv))
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
+ else
+ clock = KHz(dev_priv->display.cdclk.hw.cdclk);
+
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
+}
+
+/*
+ * Gen4: This value represents the period of the PWM stream in display core
+ * clocks ([DevCTG] HRAW clocks) multiplied by 128.
+ *
+ */
+static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ int clock;
+
+ if (IS_G4X(dev_priv))
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
+ else
+ clock = KHz(dev_priv->display.cdclk.hw.cdclk);
+
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
+}
+
+/*
+ * VLV: This value represents the period of the PWM stream in display core
+ * clocks ([DevCTG] 200MHz HRAW clocks) multiplied by 128 or 25MHz S0IX clocks
+ * multiplied by 16. CHV uses a 19.2MHz S0IX clock.
+ */
+static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ int mul, clock;
+
+ if ((intel_de_read(dev_priv, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
+ if (IS_CHERRYVIEW(dev_priv))
+ clock = KHz(19200);
+ else
+ clock = MHz(25);
+ mul = 16;
+ } else {
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
+ mul = 128;
+ }
+
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
+}
+
+static u16 get_vbt_pwm_freq(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u16 pwm_freq_hz = connector->panel.vbt.backlight.pwm_freq_hz;
+
+ if (pwm_freq_hz) {
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT defined backlight frequency %u Hz\n",
+ pwm_freq_hz);
+ } else {
+ pwm_freq_hz = 200;
+ drm_dbg_kms(&dev_priv->drm,
+ "default backlight frequency %u Hz\n",
+ pwm_freq_hz);
+ }
+
+ return pwm_freq_hz;
+}
+
+static u32 get_backlight_max_vbt(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u16 pwm_freq_hz = get_vbt_pwm_freq(connector);
+ u32 pwm;
+
+ if (!panel->backlight.pwm_funcs->hz_to_pwm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "backlight frequency conversion not supported\n");
+ return 0;
+ }
+
+ pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz);
+ if (!pwm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "backlight frequency conversion failed\n");
+ return 0;
+ }
+
+ return pwm;
+}
+
+/*
+ * Note: The setup hooks can't assume pipe is set!
+ */
+static u32 get_backlight_min_vbt(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ int min;
+
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
+
+ /*
+ * XXX: If the vbt value is 255, it makes min equal to max, which leads
+ * to problems. There are such machines out there. Either our
+ * interpretation is wrong or the vbt has bogus data. Or both. Safeguard
+ * against this by letting the minimum be at most (arbitrarily chosen)
+ * 25% of the max.
+ */
+ min = clamp_t(int, connector->panel.vbt.backlight.min_brightness, 0, 64);
+ if (min != connector->panel.vbt.backlight.min_brightness) {
+ drm_dbg_kms(&dev_priv->drm,
+ "clamping VBT min backlight %d/255 to %d/255\n",
+ connector->panel.vbt.backlight.min_brightness, min);
+ }
+
+ /* vbt value is a coefficient in range [0..255] */
+ return scale(min, 0, 255, 0, panel->backlight.pwm_level_max);
+}
+
+static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+ bool alt, cpu_mode;
+
+ if (HAS_PCH_LPT(dev_priv))
+ alt = intel_de_read(dev_priv, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+ else
+ alt = intel_de_read(dev_priv, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
+ panel->backlight.alternate_pwm_increment = alt;
+
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+ pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
+ panel->backlight.pwm_level_max = pch_ctl2 >> 16;
+
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
+
+ cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(dev_priv) &&
+ !(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
+ (cpu_ctl2 & BLM_PWM_ENABLE);
+
+ if (cpu_mode) {
+ val = pch_get_backlight(connector, unused);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "CPU backlight register was enabled, switching to PCH override\n");
+
+ /* Write converted CPU PWM value to PCH override register */
+ lpt_set_backlight(connector->base.state, val);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
+
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+ cpu_ctl2 & ~BLM_PWM_ENABLE);
+ }
+
+ return 0;
+}
+
+static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2;
+
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+ pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
+ panel->backlight.pwm_level_max = pch_ctl2 >> 16;
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
+ (pch_ctl1 & BLM_PCH_PWM_ENABLE);
+
+ return 0;
+}
+
+static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, val;
+
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
+
+ if (DISPLAY_VER(dev_priv) == 2 || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
+
+ if (IS_PINEVIEW(dev_priv))
+ panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
+
+ panel->backlight.pwm_level_max = ctl >> 17;
+
+ if (!panel->backlight.pwm_level_max) {
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+ panel->backlight.pwm_level_max >>= 1;
+ }
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ if (panel->backlight.combination_mode)
+ panel->backlight.pwm_level_max *= 0xff;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ val = i9xx_get_backlight(connector, unused);
+ val = intel_backlight_invert_pwm_level(connector, val);
+ val = clamp(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
+
+ panel->backlight.pwm_enabled = val != 0;
+
+ return 0;
+}
+
+static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, ctl2;
+
+ ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
+ panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
+ panel->backlight.pwm_level_max = ctl >> 16;
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ if (panel->backlight.combination_mode)
+ panel->backlight.pwm_level_max *= 0xff;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
+
+ return 0;
+}
+
+static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, ctl2;
+
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ return -ENODEV;
+
+ ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+ ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
+ panel->backlight.pwm_level_max = ctl >> 16;
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
+
+ return 0;
+}
+
+static int
+bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 pwm_ctl, val;
+
+ panel->backlight.controller = connector->panel.vbt.backlight.controller;
+
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+
+ /* Controller 1 uses the utility pin. */
+ if (panel->backlight.controller == 1) {
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
+ panel->backlight.util_pin_active_low =
+ val & UTIL_PIN_POLARITY;
+ }
+
+ panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
+ panel->backlight.pwm_level_max =
+ intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+
+ return 0;
+}
+
+static int
+cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 pwm_ctl;
+
+ /*
+ * CNP has the BXT implementation of backlight, but with only one
+ * controller. TODO: ICP has multiple controllers but we only use
+ * controller 0 for now.
+ */
+ panel->backlight.controller = 0;
+
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+
+ panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
+ panel->backlight.pwm_level_max =
+ intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+
+ return 0;
+}
+
+static int ext_pwm_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_panel *panel = &connector->panel;
+ const char *desc;
+ u32 level;
+
+ /* Get the right PWM chip for DSI backlight according to VBT */
+ if (connector->panel.vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+ panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight");
+ desc = "PMIC";
+ } else {
+ panel->backlight.pwm = pwm_get(dev->dev, "pwm_soc_backlight");
+ desc = "SoC";
+ }
+
+ if (IS_ERR(panel->backlight.pwm)) {
+ drm_err(&dev_priv->drm, "Failed to get the %s PWM chip\n",
+ desc);
+ panel->backlight.pwm = NULL;
+ return -ENODEV;
+ }
+
+ panel->backlight.pwm_level_max = 100; /* 100% */
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ if (pwm_is_enabled(panel->backlight.pwm)) {
+ /* PWM is already enabled, use existing settings */
+ pwm_get_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+
+ level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state,
+ 100);
+ level = intel_backlight_invert_pwm_level(connector, level);
+ panel->backlight.pwm_enabled = true;
+
+ drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
+ get_vbt_pwm_freq(connector), level);
+ } else {
+ /* Set period from VBT frequency, leave other settings at 0. */
+ panel->backlight.pwm_state.period =
+ NSEC_PER_SEC / get_vbt_pwm_freq(connector);
+ }
+
+ drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
+ desc);
+ return 0;
+}
+
+static void intel_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->set(conn_state,
+ intel_backlight_invert_pwm_level(connector, level));
+}
+
+static u32 intel_pwm_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return intel_backlight_invert_pwm_level(connector,
+ panel->backlight.pwm_funcs->get(connector, pipe));
+}
+
+static void intel_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->enable(crtc_state, conn_state,
+ intel_backlight_invert_pwm_level(connector, level));
+}
+
+static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->disable(conn_state,
+ intel_backlight_invert_pwm_level(connector, level));
+}
+
+static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct intel_panel *panel = &connector->panel;
+ int ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+
+ if (ret < 0)
+ return ret;
+
+ panel->backlight.min = panel->backlight.pwm_level_min;
+ panel->backlight.max = panel->backlight.pwm_level_max;
+ panel->backlight.level = intel_pwm_get_backlight(connector, pipe);
+ panel->backlight.enabled = panel->backlight.pwm_enabled;
+
+ return 0;
+}
+
+void intel_backlight_update(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ if (!panel->backlight.present)
+ return;
+
+ mutex_lock(&dev_priv->display.backlight.lock);
+ if (!panel->backlight.enabled)
+ __intel_backlight_enable(crtc_state, conn_state);
+
+ mutex_unlock(&dev_priv->display.backlight.lock);
+}
+
+int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+
+ if (!connector->panel.vbt.backlight.present) {
+ if (intel_has_quirk(dev_priv, QUIRK_BACKLIGHT_PRESENT)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "no backlight present per VBT, but present per quirk\n");
+ } else {
+ drm_dbg_kms(&dev_priv->drm,
+ "no backlight present per VBT\n");
+ return 0;
+ }
+ }
+
+ /* ensure intel_panel has been initialized first */
+ if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs))
+ return -ENODEV;
+
+ /* set level and max in panel struct */
+ mutex_lock(&dev_priv->display.backlight.lock);
+ ret = panel->backlight.funcs->setup(connector, pipe);
+ mutex_unlock(&dev_priv->display.backlight.lock);
+
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to setup backlight for connector %s\n",
+ connector->base.name);
+ return ret;
+ }
+
+ panel->backlight.present = true;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Connector %s backlight initialized, %s, brightness %u/%u\n",
+ connector->base.name,
+ str_enabled_disabled(panel->backlight.enabled),
+ panel->backlight.level, panel->backlight.max);
+
+ return 0;
+}
+
+void intel_backlight_destroy(struct intel_panel *panel)
+{
+ /* dispose of the pwm */
+ if (panel->backlight.pwm)
+ pwm_put(panel->backlight.pwm);
+
+ panel->backlight.present = false;
+}
+
+static const struct intel_panel_bl_funcs bxt_pwm_funcs = {
+ .setup = bxt_setup_backlight,
+ .enable = bxt_enable_backlight,
+ .disable = bxt_disable_backlight,
+ .set = bxt_set_backlight,
+ .get = bxt_get_backlight,
+ .hz_to_pwm = bxt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs cnp_pwm_funcs = {
+ .setup = cnp_setup_backlight,
+ .enable = cnp_enable_backlight,
+ .disable = cnp_disable_backlight,
+ .set = bxt_set_backlight,
+ .get = bxt_get_backlight,
+ .hz_to_pwm = cnp_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs lpt_pwm_funcs = {
+ .setup = lpt_setup_backlight,
+ .enable = lpt_enable_backlight,
+ .disable = lpt_disable_backlight,
+ .set = lpt_set_backlight,
+ .get = lpt_get_backlight,
+ .hz_to_pwm = lpt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs spt_pwm_funcs = {
+ .setup = lpt_setup_backlight,
+ .enable = lpt_enable_backlight,
+ .disable = lpt_disable_backlight,
+ .set = lpt_set_backlight,
+ .get = lpt_get_backlight,
+ .hz_to_pwm = spt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pch_pwm_funcs = {
+ .setup = pch_setup_backlight,
+ .enable = pch_enable_backlight,
+ .disable = pch_disable_backlight,
+ .set = pch_set_backlight,
+ .get = pch_get_backlight,
+ .hz_to_pwm = pch_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs ext_pwm_funcs = {
+ .setup = ext_pwm_setup_backlight,
+ .enable = ext_pwm_enable_backlight,
+ .disable = ext_pwm_disable_backlight,
+ .set = ext_pwm_set_backlight,
+ .get = ext_pwm_get_backlight,
+};
+
+static const struct intel_panel_bl_funcs vlv_pwm_funcs = {
+ .setup = vlv_setup_backlight,
+ .enable = vlv_enable_backlight,
+ .disable = vlv_disable_backlight,
+ .set = vlv_set_backlight,
+ .get = vlv_get_backlight,
+ .hz_to_pwm = vlv_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i965_pwm_funcs = {
+ .setup = i965_setup_backlight,
+ .enable = i965_enable_backlight,
+ .disable = i965_disable_backlight,
+ .set = i9xx_set_backlight,
+ .get = i9xx_get_backlight,
+ .hz_to_pwm = i965_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i9xx_pwm_funcs = {
+ .setup = i9xx_setup_backlight,
+ .enable = i9xx_enable_backlight,
+ .disable = i9xx_disable_backlight,
+ .set = i9xx_set_backlight,
+ .get = i9xx_get_backlight,
+ .hz_to_pwm = i9xx_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pwm_bl_funcs = {
+ .setup = intel_pwm_setup_backlight,
+ .enable = intel_pwm_enable_backlight,
+ .disable = intel_pwm_disable_backlight,
+ .set = intel_pwm_set_backlight,
+ .get = intel_pwm_get_backlight,
+};
+
+/* Set up chip specific backlight functions */
+void intel_backlight_init_funcs(struct intel_panel *panel)
+{
+ struct intel_connector *connector =
+ container_of(panel, struct intel_connector, panel);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
+ intel_dsi_dcs_init_backlight_funcs(connector) == 0)
+ return;
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ panel->backlight.pwm_funcs = &bxt_pwm_funcs;
+ } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
+ panel->backlight.pwm_funcs = &cnp_pwm_funcs;
+ } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
+ if (HAS_PCH_LPT(dev_priv))
+ panel->backlight.pwm_funcs = &lpt_pwm_funcs;
+ else
+ panel->backlight.pwm_funcs = &spt_pwm_funcs;
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ panel->backlight.pwm_funcs = &pch_pwm_funcs;
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
+ panel->backlight.pwm_funcs = &ext_pwm_funcs;
+ } else {
+ panel->backlight.pwm_funcs = &vlv_pwm_funcs;
+ }
+ } else if (DISPLAY_VER(dev_priv) == 4) {
+ panel->backlight.pwm_funcs = &i965_pwm_funcs;
+ } else {
+ panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
+ }
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (intel_dp_aux_init_backlight_funcs(connector) == 0)
+ return;
+
+ if (!intel_has_quirk(dev_priv, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
+ connector->panel.backlight.power = intel_pps_backlight_power;
+ }
+
+ /* We're using a standard PWM backlight interface */
+ panel->backlight.funcs = &pwm_bl_funcs;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.h b/drivers/gpu/drm/i915/display/intel_backlight.h
new file mode 100644
index 000000000..339643f63
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_backlight.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_BACKLIGHT_H__
+#define __INTEL_BACKLIGHT_H__
+
+#include <linux/types.h>
+
+struct drm_connector_state;
+struct intel_atomic_state;
+struct intel_connector;
+struct intel_crtc_state;
+struct intel_encoder;
+struct intel_panel;
+enum pipe;
+
+void intel_backlight_init_funcs(struct intel_panel *panel);
+int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe);
+void intel_backlight_destroy(struct intel_panel *panel);
+
+void intel_backlight_enable(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_backlight_update(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_backlight_disable(const struct drm_connector_state *old_conn_state);
+
+void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
+ u32 level, u32 max);
+void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state,
+ u32 level);
+u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 level);
+u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 level);
+u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val);
+
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+int intel_backlight_device_register(struct intel_connector *connector);
+void intel_backlight_device_unregister(struct intel_connector *connector);
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static inline int intel_backlight_device_register(struct intel_connector *connector)
+{
+ return 0;
+}
+static inline void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+#endif /* __INTEL_BACKLIGHT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight_regs.h b/drivers/gpu/drm/i915/display/intel_backlight_regs.h
new file mode 100644
index 000000000..50c1210f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_backlight_regs.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_BACKLIGHT_REGS_H__
+#define __INTEL_BACKLIGHT_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
+#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
+ _VLV_BLC_PWM_CTL2_B)
+
+#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
+#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
+ _VLV_BLC_PWM_CTL_B)
+
+#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
+#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
+ _VLV_BLC_HIST_CTL_B)
+
+/* Backlight control */
+#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
+#define BLM_PWM_ENABLE (1 << 31)
+#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
+#define BLM_PIPE_SELECT (1 << 29)
+#define BLM_PIPE_SELECT_IVB (3 << 29)
+#define BLM_PIPE_A (0 << 29)
+#define BLM_PIPE_B (1 << 29)
+#define BLM_PIPE_C (2 << 29) /* ivb + */
+#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */
+#define BLM_TRANSCODER_B BLM_PIPE_B
+#define BLM_TRANSCODER_C BLM_PIPE_C
+#define BLM_TRANSCODER_EDP (3 << 29)
+#define BLM_PIPE(pipe) ((pipe) << 29)
+#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
+#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
+#define BLM_PHASE_IN_ENABLE (1 << 25)
+#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24)
+#define BLM_PHASE_IN_TIME_BASE_SHIFT (16)
+#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16)
+#define BLM_PHASE_IN_COUNT_SHIFT (8)
+#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
+#define BLM_PHASE_IN_INCR_SHIFT (0)
+#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
+#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
+#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
+
+#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
+#define BLM_HISTOGRAM_ENABLE (1 << 31)
+
+/* New registers for PCH-split platforms. Safe where new bits show up, the
+ * register layout machtes with gen4 BLC_PWM_CTL[12]. */
+#define BLC_PWM_CPU_CTL2 _MMIO(0x48250)
+#define BLC_PWM_CPU_CTL _MMIO(0x48254)
+
+#define HSW_BLC_PWM2_CTL _MMIO(0x48350)
+
+/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
+ * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
+#define BLC_PWM_PCH_CTL1 _MMIO(0xc8250)
+#define BLM_PCH_PWM_ENABLE (1 << 31)
+#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
+#define BLM_PCH_POLARITY (1 << 29)
+#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254)
+
+/* BXT backlight register definition. */
+#define _BXT_BLC_PWM_CTL1 0xC8250
+#define BXT_BLC_PWM_ENABLE (1 << 31)
+#define BXT_BLC_PWM_POLARITY (1 << 29)
+#define _BXT_BLC_PWM_FREQ1 0xC8254
+#define _BXT_BLC_PWM_DUTY1 0xC8258
+
+#define _BXT_BLC_PWM_CTL2 0xC8350
+#define _BXT_BLC_PWM_FREQ2 0xC8354
+#define _BXT_BLC_PWM_DUTY2 0xC8358
+
+#define BXT_BLC_PWM_CTL(controller) _MMIO_PIPE(controller, \
+ _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2)
+#define BXT_BLC_PWM_FREQ(controller) _MMIO_PIPE(controller, \
+ _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2)
+#define BXT_BLC_PWM_DUTY(controller) _MMIO_PIPE(controller, \
+ _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2)
+
+/* Utility pin */
+#define UTIL_PIN_CTL _MMIO(0x48400)
+#define UTIL_PIN_ENABLE (1 << 31)
+#define UTIL_PIN_PIPE_MASK (3 << 29)
+#define UTIL_PIN_PIPE(x) ((x) << 29)
+#define UTIL_PIN_MODE_MASK (0xf << 24)
+#define UTIL_PIN_MODE_DATA (0 << 24)
+#define UTIL_PIN_MODE_PWM (1 << 24)
+#define UTIL_PIN_MODE_VBLANK (4 << 24)
+#define UTIL_PIN_MODE_VSYNC (5 << 24)
+#define UTIL_PIN_MODE_EYE_LEVEL (8 << 24)
+#define UTIL_PIN_OUTPUT_DATA (1 << 23)
+#define UTIL_PIN_POLARITY (1 << 22)
+#define UTIL_PIN_DIRECTION_INPUT (1 << 19)
+#define UTIL_PIN_INPUT_DATA (1 << 16)
+
+#endif /* __INTEL_BACKLIGHT_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
new file mode 100644
index 000000000..a70b70617
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -0,0 +1,3790 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <drm/drm_edid.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dsc_helper.h>
+
+#include "display/intel_display.h"
+#include "display/intel_display_types.h"
+#include "display/intel_gmbus.h"
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+
+#define _INTEL_BIOS_PRIVATE
+#include "intel_vbt_defs.h"
+
+/**
+ * DOC: Video BIOS Table (VBT)
+ *
+ * The Video BIOS Table, or VBT, provides platform and board specific
+ * configuration information to the driver that is not discoverable or available
+ * through other means. The configuration is mostly related to display
+ * hardware. The VBT is available via the ACPI OpRegion or, on older systems, in
+ * the PCI ROM.
+ *
+ * The VBT consists of a VBT Header (defined as &struct vbt_header), a BDB
+ * Header (&struct bdb_header), and a number of BIOS Data Blocks (BDB) that
+ * contain the actual configuration information. The VBT Header, and thus the
+ * VBT, begins with "$VBT" signature. The VBT Header contains the offset of the
+ * BDB Header. The data blocks are concatenated after the BDB Header. The data
+ * blocks have a 1-byte Block ID, 2-byte Block Size, and Block Size bytes of
+ * data. (Block 53, the MIPI Sequence Block is an exception.)
+ *
+ * The driver parses the VBT during load. The relevant information is stored in
+ * driver private data for ease of use, and the actual VBT is not read after
+ * that.
+ */
+
+/* Wrapper for VBT child device config */
+struct intel_bios_encoder_data {
+ struct drm_i915_private *i915;
+
+ struct child_device_config child;
+ struct dsc_compression_parameters_entry *dsc;
+ struct list_head node;
+};
+
+#define SLAVE_ADDR1 0x70
+#define SLAVE_ADDR2 0x72
+
+/* Get BDB block size given a pointer to Block ID. */
+static u32 _get_blocksize(const u8 *block_base)
+{
+ /* The MIPI Sequence Block v3+ has a separate size field. */
+ if (*block_base == BDB_MIPI_SEQUENCE && *(block_base + 3) >= 3)
+ return *((const u32 *)(block_base + 4));
+ else
+ return *((const u16 *)(block_base + 1));
+}
+
+/* Get BDB block size give a pointer to data after Block ID and Block Size. */
+static u32 get_blocksize(const void *block_data)
+{
+ return _get_blocksize(block_data - 3);
+}
+
+static const void *
+find_raw_section(const void *_bdb, enum bdb_block_id section_id)
+{
+ const struct bdb_header *bdb = _bdb;
+ const u8 *base = _bdb;
+ int index = 0;
+ u32 total, current_size;
+ enum bdb_block_id current_id;
+
+ /* skip to first section */
+ index += bdb->header_size;
+ total = bdb->bdb_size;
+
+ /* walk the sections looking for section_id */
+ while (index + 3 < total) {
+ current_id = *(base + index);
+ current_size = _get_blocksize(base + index);
+ index += 3;
+
+ if (index + current_size > total)
+ return NULL;
+
+ if (current_id == section_id)
+ return base + index;
+
+ index += current_size;
+ }
+
+ return NULL;
+}
+
+/*
+ * Offset from the start of BDB to the start of the
+ * block data (just past the block header).
+ */
+static u32 raw_block_offset(const void *bdb, enum bdb_block_id section_id)
+{
+ const void *block;
+
+ block = find_raw_section(bdb, section_id);
+ if (!block)
+ return 0;
+
+ return block - bdb;
+}
+
+struct bdb_block_entry {
+ struct list_head node;
+ enum bdb_block_id section_id;
+ u8 data[];
+};
+
+static const void *
+find_section(struct drm_i915_private *i915,
+ enum bdb_block_id section_id)
+{
+ struct bdb_block_entry *entry;
+
+ list_for_each_entry(entry, &i915->display.vbt.bdb_blocks, node) {
+ if (entry->section_id == section_id)
+ return entry->data + 3;
+ }
+
+ return NULL;
+}
+
+static const struct {
+ enum bdb_block_id section_id;
+ size_t min_size;
+} bdb_blocks[] = {
+ { .section_id = BDB_GENERAL_FEATURES,
+ .min_size = sizeof(struct bdb_general_features), },
+ { .section_id = BDB_GENERAL_DEFINITIONS,
+ .min_size = sizeof(struct bdb_general_definitions), },
+ { .section_id = BDB_PSR,
+ .min_size = sizeof(struct bdb_psr), },
+ { .section_id = BDB_DRIVER_FEATURES,
+ .min_size = sizeof(struct bdb_driver_features), },
+ { .section_id = BDB_SDVO_LVDS_OPTIONS,
+ .min_size = sizeof(struct bdb_sdvo_lvds_options), },
+ { .section_id = BDB_SDVO_PANEL_DTDS,
+ .min_size = sizeof(struct bdb_sdvo_panel_dtds), },
+ { .section_id = BDB_EDP,
+ .min_size = sizeof(struct bdb_edp), },
+ { .section_id = BDB_LVDS_OPTIONS,
+ .min_size = sizeof(struct bdb_lvds_options), },
+ /*
+ * BDB_LVDS_LFP_DATA depends on BDB_LVDS_LFP_DATA_PTRS,
+ * so keep the two ordered.
+ */
+ { .section_id = BDB_LVDS_LFP_DATA_PTRS,
+ .min_size = sizeof(struct bdb_lvds_lfp_data_ptrs), },
+ { .section_id = BDB_LVDS_LFP_DATA,
+ .min_size = 0, /* special case */ },
+ { .section_id = BDB_LVDS_BACKLIGHT,
+ .min_size = sizeof(struct bdb_lfp_backlight_data), },
+ { .section_id = BDB_LFP_POWER,
+ .min_size = sizeof(struct bdb_lfp_power), },
+ { .section_id = BDB_MIPI_CONFIG,
+ .min_size = sizeof(struct bdb_mipi_config), },
+ { .section_id = BDB_MIPI_SEQUENCE,
+ .min_size = sizeof(struct bdb_mipi_sequence) },
+ { .section_id = BDB_COMPRESSION_PARAMETERS,
+ .min_size = sizeof(struct bdb_compression_parameters), },
+ { .section_id = BDB_GENERIC_DTD,
+ .min_size = sizeof(struct bdb_generic_dtd), },
+};
+
+static size_t lfp_data_min_size(struct drm_i915_private *i915)
+{
+ const struct bdb_lvds_lfp_data_ptrs *ptrs;
+ size_t size;
+
+ ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ if (!ptrs)
+ return 0;
+
+ size = sizeof(struct bdb_lvds_lfp_data);
+ if (ptrs->panel_name.table_size)
+ size = max(size, ptrs->panel_name.offset +
+ sizeof(struct bdb_lvds_lfp_data_tail));
+
+ return size;
+}
+
+static bool validate_lfp_data_ptrs(const void *bdb,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs)
+{
+ int fp_timing_size, dvo_timing_size, panel_pnp_id_size, panel_name_size;
+ int data_block_size, lfp_data_size;
+ const void *data_block;
+ int i;
+
+ data_block = find_raw_section(bdb, BDB_LVDS_LFP_DATA);
+ if (!data_block)
+ return false;
+
+ data_block_size = get_blocksize(data_block);
+ if (data_block_size == 0)
+ return false;
+
+ /* always 3 indicating the presence of fp_timing+dvo_timing+panel_pnp_id */
+ if (ptrs->lvds_entries != 3)
+ return false;
+
+ fp_timing_size = ptrs->ptr[0].fp_timing.table_size;
+ dvo_timing_size = ptrs->ptr[0].dvo_timing.table_size;
+ panel_pnp_id_size = ptrs->ptr[0].panel_pnp_id.table_size;
+ panel_name_size = ptrs->panel_name.table_size;
+
+ /* fp_timing has variable size */
+ if (fp_timing_size < 32 ||
+ dvo_timing_size != sizeof(struct lvds_dvo_timing) ||
+ panel_pnp_id_size != sizeof(struct lvds_pnp_id))
+ return false;
+
+ /* panel_name is not present in old VBTs */
+ if (panel_name_size != 0 &&
+ panel_name_size != sizeof(struct lvds_lfp_panel_name))
+ return false;
+
+ lfp_data_size = ptrs->ptr[1].fp_timing.offset - ptrs->ptr[0].fp_timing.offset;
+ if (16 * lfp_data_size > data_block_size)
+ return false;
+
+ /* make sure the table entries have uniform size */
+ for (i = 1; i < 16; i++) {
+ if (ptrs->ptr[i].fp_timing.table_size != fp_timing_size ||
+ ptrs->ptr[i].dvo_timing.table_size != dvo_timing_size ||
+ ptrs->ptr[i].panel_pnp_id.table_size != panel_pnp_id_size)
+ return false;
+
+ if (ptrs->ptr[i].fp_timing.offset - ptrs->ptr[i-1].fp_timing.offset != lfp_data_size ||
+ ptrs->ptr[i].dvo_timing.offset - ptrs->ptr[i-1].dvo_timing.offset != lfp_data_size ||
+ ptrs->ptr[i].panel_pnp_id.offset - ptrs->ptr[i-1].panel_pnp_id.offset != lfp_data_size)
+ return false;
+ }
+
+ /*
+ * Except for vlv/chv machines all real VBTs seem to have 6
+ * unaccounted bytes in the fp_timing table. And it doesn't
+ * appear to be a really intentional hole as the fp_timing
+ * 0xffff terminator is always within those 6 missing bytes.
+ */
+ if (fp_timing_size + 6 + dvo_timing_size + panel_pnp_id_size == lfp_data_size)
+ fp_timing_size += 6;
+
+ if (fp_timing_size + dvo_timing_size + panel_pnp_id_size != lfp_data_size)
+ return false;
+
+ if (ptrs->ptr[0].fp_timing.offset + fp_timing_size != ptrs->ptr[0].dvo_timing.offset ||
+ ptrs->ptr[0].dvo_timing.offset + dvo_timing_size != ptrs->ptr[0].panel_pnp_id.offset ||
+ ptrs->ptr[0].panel_pnp_id.offset + panel_pnp_id_size != lfp_data_size)
+ return false;
+
+ /* make sure the tables fit inside the data block */
+ for (i = 0; i < 16; i++) {
+ if (ptrs->ptr[i].fp_timing.offset + fp_timing_size > data_block_size ||
+ ptrs->ptr[i].dvo_timing.offset + dvo_timing_size > data_block_size ||
+ ptrs->ptr[i].panel_pnp_id.offset + panel_pnp_id_size > data_block_size)
+ return false;
+ }
+
+ if (ptrs->panel_name.offset + 16 * panel_name_size > data_block_size)
+ return false;
+
+ /* make sure fp_timing terminators are present at expected locations */
+ for (i = 0; i < 16; i++) {
+ const u16 *t = data_block + ptrs->ptr[i].fp_timing.offset +
+ fp_timing_size - 2;
+
+ if (*t != 0xffff)
+ return false;
+ }
+
+ return true;
+}
+
+/* make the data table offsets relative to the data block */
+static bool fixup_lfp_data_ptrs(const void *bdb, void *ptrs_block)
+{
+ struct bdb_lvds_lfp_data_ptrs *ptrs = ptrs_block;
+ u32 offset;
+ int i;
+
+ offset = raw_block_offset(bdb, BDB_LVDS_LFP_DATA);
+
+ for (i = 0; i < 16; i++) {
+ if (ptrs->ptr[i].fp_timing.offset < offset ||
+ ptrs->ptr[i].dvo_timing.offset < offset ||
+ ptrs->ptr[i].panel_pnp_id.offset < offset)
+ return false;
+
+ ptrs->ptr[i].fp_timing.offset -= offset;
+ ptrs->ptr[i].dvo_timing.offset -= offset;
+ ptrs->ptr[i].panel_pnp_id.offset -= offset;
+ }
+
+ if (ptrs->panel_name.table_size) {
+ if (ptrs->panel_name.offset < offset)
+ return false;
+
+ ptrs->panel_name.offset -= offset;
+ }
+
+ return validate_lfp_data_ptrs(bdb, ptrs);
+}
+
+static int make_lfp_data_ptr(struct lvds_lfp_data_ptr_table *table,
+ int table_size, int total_size)
+{
+ if (total_size < table_size)
+ return total_size;
+
+ table->table_size = table_size;
+ table->offset = total_size - table_size;
+
+ return total_size - table_size;
+}
+
+static void next_lfp_data_ptr(struct lvds_lfp_data_ptr_table *next,
+ const struct lvds_lfp_data_ptr_table *prev,
+ int size)
+{
+ next->table_size = prev->table_size;
+ next->offset = prev->offset + size;
+}
+
+static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
+ const void *bdb)
+{
+ int i, size, table_size, block_size, offset, fp_timing_size;
+ struct bdb_lvds_lfp_data_ptrs *ptrs;
+ const void *block;
+ void *ptrs_block;
+
+ /*
+ * The hardcoded fp_timing_size is only valid for
+ * modernish VBTs. All older VBTs definitely should
+ * include block 41 and thus we don't need to
+ * generate one.
+ */
+ if (i915->display.vbt.version < 155)
+ return NULL;
+
+ fp_timing_size = 38;
+
+ block = find_raw_section(bdb, BDB_LVDS_LFP_DATA);
+ if (!block)
+ return NULL;
+
+ drm_dbg_kms(&i915->drm, "Generating LFP data table pointers\n");
+
+ block_size = get_blocksize(block);
+
+ size = fp_timing_size + sizeof(struct lvds_dvo_timing) +
+ sizeof(struct lvds_pnp_id);
+ if (size * 16 > block_size)
+ return NULL;
+
+ ptrs_block = kzalloc(sizeof(*ptrs) + 3, GFP_KERNEL);
+ if (!ptrs_block)
+ return NULL;
+
+ *(u8 *)(ptrs_block + 0) = BDB_LVDS_LFP_DATA_PTRS;
+ *(u16 *)(ptrs_block + 1) = sizeof(*ptrs);
+ ptrs = ptrs_block + 3;
+
+ table_size = sizeof(struct lvds_pnp_id);
+ size = make_lfp_data_ptr(&ptrs->ptr[0].panel_pnp_id, table_size, size);
+
+ table_size = sizeof(struct lvds_dvo_timing);
+ size = make_lfp_data_ptr(&ptrs->ptr[0].dvo_timing, table_size, size);
+
+ table_size = fp_timing_size;
+ size = make_lfp_data_ptr(&ptrs->ptr[0].fp_timing, table_size, size);
+
+ if (ptrs->ptr[0].fp_timing.table_size)
+ ptrs->lvds_entries++;
+ if (ptrs->ptr[0].dvo_timing.table_size)
+ ptrs->lvds_entries++;
+ if (ptrs->ptr[0].panel_pnp_id.table_size)
+ ptrs->lvds_entries++;
+
+ if (size != 0 || ptrs->lvds_entries != 3) {
+ kfree(ptrs_block);
+ return NULL;
+ }
+
+ size = fp_timing_size + sizeof(struct lvds_dvo_timing) +
+ sizeof(struct lvds_pnp_id);
+ for (i = 1; i < 16; i++) {
+ next_lfp_data_ptr(&ptrs->ptr[i].fp_timing, &ptrs->ptr[i-1].fp_timing, size);
+ next_lfp_data_ptr(&ptrs->ptr[i].dvo_timing, &ptrs->ptr[i-1].dvo_timing, size);
+ next_lfp_data_ptr(&ptrs->ptr[i].panel_pnp_id, &ptrs->ptr[i-1].panel_pnp_id, size);
+ }
+
+ table_size = sizeof(struct lvds_lfp_panel_name);
+
+ if (16 * (size + table_size) <= block_size) {
+ ptrs->panel_name.table_size = table_size;
+ ptrs->panel_name.offset = size * 16;
+ }
+
+ offset = block - bdb;
+
+ for (i = 0; i < 16; i++) {
+ ptrs->ptr[i].fp_timing.offset += offset;
+ ptrs->ptr[i].dvo_timing.offset += offset;
+ ptrs->ptr[i].panel_pnp_id.offset += offset;
+ }
+
+ if (ptrs->panel_name.table_size)
+ ptrs->panel_name.offset += offset;
+
+ return ptrs_block;
+}
+
+static void
+init_bdb_block(struct drm_i915_private *i915,
+ const void *bdb, enum bdb_block_id section_id,
+ size_t min_size)
+{
+ struct bdb_block_entry *entry;
+ void *temp_block = NULL;
+ const void *block;
+ size_t block_size;
+
+ block = find_raw_section(bdb, section_id);
+
+ /* Modern VBTs lack the LFP data table pointers block, make one up */
+ if (!block && section_id == BDB_LVDS_LFP_DATA_PTRS) {
+ temp_block = generate_lfp_data_ptrs(i915, bdb);
+ if (temp_block)
+ block = temp_block + 3;
+ }
+ if (!block)
+ return;
+
+ drm_WARN(&i915->drm, min_size == 0,
+ "Block %d min_size is zero\n", section_id);
+
+ block_size = get_blocksize(block);
+
+ /*
+ * Version number and new block size are considered
+ * part of the header for MIPI sequenece block v3+.
+ */
+ if (section_id == BDB_MIPI_SEQUENCE && *(const u8 *)block >= 3)
+ block_size += 5;
+
+ entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3),
+ GFP_KERNEL);
+ if (!entry) {
+ kfree(temp_block);
+ return;
+ }
+
+ entry->section_id = section_id;
+ memcpy(entry->data, block - 3, block_size + 3);
+
+ kfree(temp_block);
+
+ drm_dbg_kms(&i915->drm, "Found BDB block %d (size %zu, min size %zu)\n",
+ section_id, block_size, min_size);
+
+ if (section_id == BDB_LVDS_LFP_DATA_PTRS &&
+ !fixup_lfp_data_ptrs(bdb, entry->data + 3)) {
+ drm_err(&i915->drm, "VBT has malformed LFP data table pointers\n");
+ kfree(entry);
+ return;
+ }
+
+ list_add_tail(&entry->node, &i915->display.vbt.bdb_blocks);
+}
+
+static void init_bdb_blocks(struct drm_i915_private *i915,
+ const void *bdb)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bdb_blocks); i++) {
+ enum bdb_block_id section_id = bdb_blocks[i].section_id;
+ size_t min_size = bdb_blocks[i].min_size;
+
+ if (section_id == BDB_LVDS_LFP_DATA)
+ min_size = lfp_data_min_size(i915);
+
+ init_bdb_block(i915, bdb, section_id, min_size);
+ }
+}
+
+static void
+fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
+ const struct lvds_dvo_timing *dvo_timing)
+{
+ panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+ dvo_timing->hactive_lo;
+ panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+ panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+ ((dvo_timing->hsync_pulse_width_hi << 8) |
+ dvo_timing->hsync_pulse_width_lo);
+ panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+ panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+ dvo_timing->vactive_lo;
+ panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+ ((dvo_timing->vsync_off_hi << 4) | dvo_timing->vsync_off_lo);
+ panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+ ((dvo_timing->vsync_pulse_width_hi << 4) |
+ dvo_timing->vsync_pulse_width_lo);
+ panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+ ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+ panel_fixed_mode->clock = dvo_timing->clock * 10;
+ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+ if (dvo_timing->hsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dvo_timing->vsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+ panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) |
+ dvo_timing->himage_lo;
+ panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) |
+ dvo_timing->vimage_lo;
+
+ /* Some VBTs have bogus h/vtotal values */
+ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
+ panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+ if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
+ panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+
+ drm_mode_set_name(panel_fixed_mode);
+}
+
+static const struct lvds_dvo_timing *
+get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs,
+ int index)
+{
+ return (const void *)data + ptrs->ptr[index].dvo_timing.offset;
+}
+
+static const struct lvds_fp_timing *
+get_lvds_fp_timing(const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs,
+ int index)
+{
+ return (const void *)data + ptrs->ptr[index].fp_timing.offset;
+}
+
+static const struct lvds_pnp_id *
+get_lvds_pnp_id(const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs,
+ int index)
+{
+ return (const void *)data + ptrs->ptr[index].panel_pnp_id.offset;
+}
+
+static const struct bdb_lvds_lfp_data_tail *
+get_lfp_data_tail(const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs)
+{
+ if (ptrs->panel_name.table_size)
+ return (const void *)data + ptrs->panel_name.offset;
+ else
+ return NULL;
+}
+
+static void dump_pnp_id(struct drm_i915_private *i915,
+ const struct lvds_pnp_id *pnp_id,
+ const char *name)
+{
+ u16 mfg_name = be16_to_cpu((__force __be16)pnp_id->mfg_name);
+ char vend[4];
+
+ drm_dbg_kms(&i915->drm, "%s PNPID mfg: %s (0x%x), prod: %u, serial: %u, week: %d, year: %d\n",
+ name, drm_edid_decode_mfg_id(mfg_name, vend),
+ pnp_id->mfg_name, pnp_id->product_code, pnp_id->serial,
+ pnp_id->mfg_week, pnp_id->mfg_year + 1990);
+}
+
+static int opregion_get_panel_type(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid, bool use_fallback)
+{
+ return intel_opregion_get_panel_type(i915);
+}
+
+static int vbt_get_panel_type(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid, bool use_fallback)
+{
+ const struct bdb_lvds_options *lvds_options;
+
+ lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
+ if (!lvds_options)
+ return -1;
+
+ if (lvds_options->panel_type > 0xf &&
+ lvds_options->panel_type != 0xff) {
+ drm_dbg_kms(&i915->drm, "Invalid VBT panel type 0x%x\n",
+ lvds_options->panel_type);
+ return -1;
+ }
+
+ if (devdata && devdata->child.handle == DEVICE_HANDLE_LFP2)
+ return lvds_options->panel_type2;
+
+ drm_WARN_ON(&i915->drm, devdata && devdata->child.handle != DEVICE_HANDLE_LFP1);
+
+ return lvds_options->panel_type;
+}
+
+static int pnpid_get_panel_type(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid, bool use_fallback)
+{
+ const struct bdb_lvds_lfp_data *data;
+ const struct bdb_lvds_lfp_data_ptrs *ptrs;
+ const struct lvds_pnp_id *edid_id;
+ struct lvds_pnp_id edid_id_nodate;
+ int i, best = -1;
+
+ if (!edid)
+ return -1;
+
+ edid_id = (const void *)&edid->mfg_id[0];
+
+ edid_id_nodate = *edid_id;
+ edid_id_nodate.mfg_week = 0;
+ edid_id_nodate.mfg_year = 0;
+
+ dump_pnp_id(i915, edid_id, "EDID");
+
+ ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ if (!ptrs)
+ return -1;
+
+ data = find_section(i915, BDB_LVDS_LFP_DATA);
+ if (!data)
+ return -1;
+
+ for (i = 0; i < 16; i++) {
+ const struct lvds_pnp_id *vbt_id =
+ get_lvds_pnp_id(data, ptrs, i);
+
+ /* full match? */
+ if (!memcmp(vbt_id, edid_id, sizeof(*vbt_id)))
+ return i;
+
+ /*
+ * Accept a match w/o date if no full match is found,
+ * and the VBT entry does not specify a date.
+ */
+ if (best < 0 &&
+ !memcmp(vbt_id, &edid_id_nodate, sizeof(*vbt_id)))
+ best = i;
+ }
+
+ return best;
+}
+
+static int fallback_get_panel_type(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid, bool use_fallback)
+{
+ return use_fallback ? 0 : -1;
+}
+
+enum panel_type {
+ PANEL_TYPE_OPREGION,
+ PANEL_TYPE_VBT,
+ PANEL_TYPE_PNPID,
+ PANEL_TYPE_FALLBACK,
+};
+
+static int get_panel_type(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid, bool use_fallback)
+{
+ struct {
+ const char *name;
+ int (*get_panel_type)(struct drm_i915_private *i915,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid, bool use_fallback);
+ int panel_type;
+ } panel_types[] = {
+ [PANEL_TYPE_OPREGION] = {
+ .name = "OpRegion",
+ .get_panel_type = opregion_get_panel_type,
+ },
+ [PANEL_TYPE_VBT] = {
+ .name = "VBT",
+ .get_panel_type = vbt_get_panel_type,
+ },
+ [PANEL_TYPE_PNPID] = {
+ .name = "PNPID",
+ .get_panel_type = pnpid_get_panel_type,
+ },
+ [PANEL_TYPE_FALLBACK] = {
+ .name = "fallback",
+ .get_panel_type = fallback_get_panel_type,
+ },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(panel_types); i++) {
+ panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata,
+ edid, use_fallback);
+
+ drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf &&
+ panel_types[i].panel_type != 0xff);
+
+ if (panel_types[i].panel_type >= 0)
+ drm_dbg_kms(&i915->drm, "Panel type (%s): %d\n",
+ panel_types[i].name, panel_types[i].panel_type);
+ }
+
+ if (panel_types[PANEL_TYPE_OPREGION].panel_type >= 0)
+ i = PANEL_TYPE_OPREGION;
+ else if (panel_types[PANEL_TYPE_VBT].panel_type == 0xff &&
+ panel_types[PANEL_TYPE_PNPID].panel_type >= 0)
+ i = PANEL_TYPE_PNPID;
+ else if (panel_types[PANEL_TYPE_VBT].panel_type != 0xff &&
+ panel_types[PANEL_TYPE_VBT].panel_type >= 0)
+ i = PANEL_TYPE_VBT;
+ else
+ i = PANEL_TYPE_FALLBACK;
+
+ drm_dbg_kms(&i915->drm, "Selected panel type (%s): %d\n",
+ panel_types[i].name, panel_types[i].panel_type);
+
+ return panel_types[i].panel_type;
+}
+
+static unsigned int panel_bits(unsigned int value, int panel_type, int num_bits)
+{
+ return (value >> (panel_type * num_bits)) & (BIT(num_bits) - 1);
+}
+
+static bool panel_bool(unsigned int value, int panel_type)
+{
+ return panel_bits(value, panel_type, 1);
+}
+
+/* Parse general panel options */
+static void
+parse_panel_options(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ const struct bdb_lvds_options *lvds_options;
+ int panel_type = panel->vbt.panel_type;
+ int drrs_mode;
+
+ lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
+ if (!lvds_options)
+ return;
+
+ panel->vbt.lvds_dither = lvds_options->pixel_dither;
+
+ /*
+ * Empirical evidence indicates the block size can be
+ * either 4,14,16,24+ bytes. For older VBTs no clear
+ * relationship between the block size vs. BDB version.
+ */
+ if (get_blocksize(lvds_options) < 16)
+ return;
+
+ drrs_mode = panel_bits(lvds_options->dps_panel_type_bits,
+ panel_type, 2);
+ /*
+ * VBT has static DRRS = 0 and seamless DRRS = 2.
+ * The below piece of code is required to adjust vbt.drrs_type
+ * to match the enum drrs_support_type.
+ */
+ switch (drrs_mode) {
+ case 0:
+ panel->vbt.drrs_type = DRRS_TYPE_STATIC;
+ drm_dbg_kms(&i915->drm, "DRRS supported mode is static\n");
+ break;
+ case 2:
+ panel->vbt.drrs_type = DRRS_TYPE_SEAMLESS;
+ drm_dbg_kms(&i915->drm,
+ "DRRS supported mode is seamless\n");
+ break;
+ default:
+ panel->vbt.drrs_type = DRRS_TYPE_NONE;
+ drm_dbg_kms(&i915->drm,
+ "DRRS not supported (VBT input)\n");
+ break;
+ }
+}
+
+static void
+parse_lfp_panel_dtd(struct drm_i915_private *i915,
+ struct intel_panel *panel,
+ const struct bdb_lvds_lfp_data *lvds_lfp_data,
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs)
+{
+ const struct lvds_dvo_timing *panel_dvo_timing;
+ const struct lvds_fp_timing *fp_timing;
+ struct drm_display_mode *panel_fixed_mode;
+ int panel_type = panel->vbt.panel_type;
+
+ panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ panel_type);
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+ if (!panel_fixed_mode)
+ return;
+
+ fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
+
+ panel->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+
+ drm_dbg_kms(&i915->drm,
+ "Found panel mode in BIOS VBT legacy lfp table: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(panel_fixed_mode));
+
+ fp_timing = get_lvds_fp_timing(lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ panel_type);
+
+ /* check the resolution, just to be sure */
+ if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
+ fp_timing->y_res == panel_fixed_mode->vdisplay) {
+ panel->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
+ drm_dbg_kms(&i915->drm,
+ "VBT initial LVDS value %x\n",
+ panel->vbt.bios_lvds_val);
+ }
+}
+
+static void
+parse_lfp_data(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ const struct bdb_lvds_lfp_data *data;
+ const struct bdb_lvds_lfp_data_tail *tail;
+ const struct bdb_lvds_lfp_data_ptrs *ptrs;
+ const struct lvds_pnp_id *pnp_id;
+ int panel_type = panel->vbt.panel_type;
+
+ ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ if (!ptrs)
+ return;
+
+ data = find_section(i915, BDB_LVDS_LFP_DATA);
+ if (!data)
+ return;
+
+ if (!panel->vbt.lfp_lvds_vbt_mode)
+ parse_lfp_panel_dtd(i915, panel, data, ptrs);
+
+ pnp_id = get_lvds_pnp_id(data, ptrs, panel_type);
+ dump_pnp_id(i915, pnp_id, "Panel");
+
+ tail = get_lfp_data_tail(data, ptrs);
+ if (!tail)
+ return;
+
+ drm_dbg_kms(&i915->drm, "Panel name: %.*s\n",
+ (int)sizeof(tail->panel_name[0].name),
+ tail->panel_name[panel_type].name);
+
+ if (i915->display.vbt.version >= 188) {
+ panel->vbt.seamless_drrs_min_refresh_rate =
+ tail->seamless_drrs_min_refresh_rate[panel_type];
+ drm_dbg_kms(&i915->drm,
+ "Seamless DRRS min refresh rate: %d Hz\n",
+ panel->vbt.seamless_drrs_min_refresh_rate);
+ }
+}
+
+static void
+parse_generic_dtd(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ const struct bdb_generic_dtd *generic_dtd;
+ const struct generic_dtd_entry *dtd;
+ struct drm_display_mode *panel_fixed_mode;
+ int num_dtd;
+
+ /*
+ * Older VBTs provided DTD information for internal displays through
+ * the "LFP panel tables" block (42). As of VBT revision 229 the
+ * DTD information should be provided via a newer "generic DTD"
+ * block (58). Just to be safe, we'll try the new generic DTD block
+ * first on VBT >= 229, but still fall back to trying the old LFP
+ * block if that fails.
+ */
+ if (i915->display.vbt.version < 229)
+ return;
+
+ generic_dtd = find_section(i915, BDB_GENERIC_DTD);
+ if (!generic_dtd)
+ return;
+
+ if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) {
+ drm_err(&i915->drm, "GDTD size %u is too small.\n",
+ generic_dtd->gdtd_size);
+ return;
+ } else if (generic_dtd->gdtd_size !=
+ sizeof(struct generic_dtd_entry)) {
+ drm_err(&i915->drm, "Unexpected GDTD size %u\n",
+ generic_dtd->gdtd_size);
+ /* DTD has unknown fields, but keep going */
+ }
+
+ num_dtd = (get_blocksize(generic_dtd) -
+ sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
+ if (panel->vbt.panel_type >= num_dtd) {
+ drm_err(&i915->drm,
+ "Panel type %d not found in table of %d DTD's\n",
+ panel->vbt.panel_type, num_dtd);
+ return;
+ }
+
+ dtd = &generic_dtd->dtd[panel->vbt.panel_type];
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+ if (!panel_fixed_mode)
+ return;
+
+ panel_fixed_mode->hdisplay = dtd->hactive;
+ panel_fixed_mode->hsync_start =
+ panel_fixed_mode->hdisplay + dtd->hfront_porch;
+ panel_fixed_mode->hsync_end =
+ panel_fixed_mode->hsync_start + dtd->hsync;
+ panel_fixed_mode->htotal =
+ panel_fixed_mode->hdisplay + dtd->hblank;
+
+ panel_fixed_mode->vdisplay = dtd->vactive;
+ panel_fixed_mode->vsync_start =
+ panel_fixed_mode->vdisplay + dtd->vfront_porch;
+ panel_fixed_mode->vsync_end =
+ panel_fixed_mode->vsync_start + dtd->vsync;
+ panel_fixed_mode->vtotal =
+ panel_fixed_mode->vdisplay + dtd->vblank;
+
+ panel_fixed_mode->clock = dtd->pixel_clock;
+ panel_fixed_mode->width_mm = dtd->width_mm;
+ panel_fixed_mode->height_mm = dtd->height_mm;
+
+ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(panel_fixed_mode);
+
+ if (dtd->hsync_positive_polarity)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dtd->vsync_positive_polarity)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+ drm_dbg_kms(&i915->drm,
+ "Found panel mode in BIOS VBT generic dtd table: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(panel_fixed_mode));
+
+ panel->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+}
+
+static void
+parse_lfp_backlight(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ const struct bdb_lfp_backlight_data *backlight_data;
+ const struct lfp_backlight_data_entry *entry;
+ int panel_type = panel->vbt.panel_type;
+ u16 level;
+
+ backlight_data = find_section(i915, BDB_LVDS_BACKLIGHT);
+ if (!backlight_data)
+ return;
+
+ if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
+ drm_dbg_kms(&i915->drm,
+ "Unsupported backlight data entry size %u\n",
+ backlight_data->entry_size);
+ return;
+ }
+
+ entry = &backlight_data->data[panel_type];
+
+ panel->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
+ if (!panel->vbt.backlight.present) {
+ drm_dbg_kms(&i915->drm,
+ "PWM backlight not present in VBT (type %u)\n",
+ entry->type);
+ return;
+ }
+
+ panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+ if (i915->display.vbt.version >= 191) {
+ size_t exp_size;
+
+ if (i915->display.vbt.version >= 236)
+ exp_size = sizeof(struct bdb_lfp_backlight_data);
+ else if (i915->display.vbt.version >= 234)
+ exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
+ else
+ exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
+
+ if (get_blocksize(backlight_data) >= exp_size) {
+ const struct lfp_backlight_control_method *method;
+
+ method = &backlight_data->backlight_control[panel_type];
+ panel->vbt.backlight.type = method->type;
+ panel->vbt.backlight.controller = method->controller;
+ }
+ }
+
+ panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
+ panel->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+
+ if (i915->display.vbt.version >= 234) {
+ u16 min_level;
+ bool scale;
+
+ level = backlight_data->brightness_level[panel_type].level;
+ min_level = backlight_data->brightness_min_level[panel_type].level;
+
+ if (i915->display.vbt.version >= 236)
+ scale = backlight_data->brightness_precision_bits[panel_type] == 16;
+ else
+ scale = level > 255;
+
+ if (scale)
+ min_level = min_level / 255;
+
+ if (min_level > 255) {
+ drm_warn(&i915->drm, "Brightness min level > 255\n");
+ level = 255;
+ }
+ panel->vbt.backlight.min_brightness = min_level;
+
+ panel->vbt.backlight.brightness_precision_bits =
+ backlight_data->brightness_precision_bits[panel_type];
+ } else {
+ level = backlight_data->level[panel_type];
+ panel->vbt.backlight.min_brightness = entry->min_brightness;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "VBT backlight PWM modulation frequency %u Hz, "
+ "active %s, min brightness %u, level %u, controller %u\n",
+ panel->vbt.backlight.pwm_freq_hz,
+ panel->vbt.backlight.active_low_pwm ? "low" : "high",
+ panel->vbt.backlight.min_brightness,
+ level,
+ panel->vbt.backlight.controller);
+}
+
+/* Try to find sdvo panel data */
+static void
+parse_sdvo_panel_data(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ const struct bdb_sdvo_panel_dtds *dtds;
+ struct drm_display_mode *panel_fixed_mode;
+ int index;
+
+ index = i915->params.vbt_sdvo_panel_type;
+ if (index == -2) {
+ drm_dbg_kms(&i915->drm,
+ "Ignore SDVO panel mode from BIOS VBT tables.\n");
+ return;
+ }
+
+ if (index == -1) {
+ const struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+
+ sdvo_lvds_options = find_section(i915, BDB_SDVO_LVDS_OPTIONS);
+ if (!sdvo_lvds_options)
+ return;
+
+ index = sdvo_lvds_options->panel_type;
+ }
+
+ dtds = find_section(i915, BDB_SDVO_PANEL_DTDS);
+ if (!dtds)
+ return;
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+ if (!panel_fixed_mode)
+ return;
+
+ fill_detail_timing_data(panel_fixed_mode, &dtds->dtds[index]);
+
+ panel->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
+
+ drm_dbg_kms(&i915->drm,
+ "Found SDVO panel mode in BIOS VBT tables: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(panel_fixed_mode));
+}
+
+static int intel_bios_ssc_frequency(struct drm_i915_private *i915,
+ bool alternate)
+{
+ switch (DISPLAY_VER(i915)) {
+ case 2:
+ return alternate ? 66667 : 48000;
+ case 3:
+ case 4:
+ return alternate ? 100000 : 96000;
+ default:
+ return alternate ? 100000 : 120000;
+ }
+}
+
+static void
+parse_general_features(struct drm_i915_private *i915)
+{
+ const struct bdb_general_features *general;
+
+ general = find_section(i915, BDB_GENERAL_FEATURES);
+ if (!general)
+ return;
+
+ i915->display.vbt.int_tv_support = general->int_tv_support;
+ /* int_crt_support can't be trusted on earlier platforms */
+ if (i915->display.vbt.version >= 155 &&
+ (HAS_DDI(i915) || IS_VALLEYVIEW(i915)))
+ i915->display.vbt.int_crt_support = general->int_crt_support;
+ i915->display.vbt.lvds_use_ssc = general->enable_ssc;
+ i915->display.vbt.lvds_ssc_freq =
+ intel_bios_ssc_frequency(i915, general->ssc_freq);
+ i915->display.vbt.display_clock_mode = general->display_clock_mode;
+ i915->display.vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ if (i915->display.vbt.version >= 181) {
+ i915->display.vbt.orientation = general->rotate_180 ?
+ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
+ DRM_MODE_PANEL_ORIENTATION_NORMAL;
+ } else {
+ i915->display.vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ }
+
+ if (i915->display.vbt.version >= 249 && general->afc_startup_config) {
+ i915->display.vbt.override_afc_startup = true;
+ i915->display.vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
+ i915->display.vbt.int_tv_support,
+ i915->display.vbt.int_crt_support,
+ i915->display.vbt.lvds_use_ssc,
+ i915->display.vbt.lvds_ssc_freq,
+ i915->display.vbt.display_clock_mode,
+ i915->display.vbt.fdi_rx_polarity_inverted);
+}
+
+static const struct child_device_config *
+child_device_ptr(const struct bdb_general_definitions *defs, int i)
+{
+ return (const void *) &defs->devices[i * defs->child_dev_size];
+}
+
+static void
+parse_sdvo_device_mapping(struct drm_i915_private *i915)
+{
+ struct sdvo_device_mapping *mapping;
+ const struct intel_bios_encoder_data *devdata;
+ const struct child_device_config *child;
+ int count = 0;
+
+ /*
+ * Only parse SDVO mappings on gens that could have SDVO. This isn't
+ * accurate and doesn't have to be, as long as it's not too strict.
+ */
+ if (!IS_DISPLAY_VER(i915, 3, 7)) {
+ drm_dbg_kms(&i915->drm, "Skipping SDVO device mapping\n");
+ return;
+ }
+
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ child = &devdata->child;
+
+ if (child->slave_addr != SLAVE_ADDR1 &&
+ child->slave_addr != SLAVE_ADDR2) {
+ /*
+ * If the slave address is neither 0x70 nor 0x72,
+ * it is not a SDVO device. Skip it.
+ */
+ continue;
+ }
+ if (child->dvo_port != DEVICE_PORT_DVOB &&
+ child->dvo_port != DEVICE_PORT_DVOC) {
+ /* skip the incorrect SDVO port */
+ drm_dbg_kms(&i915->drm,
+ "Incorrect SDVO port. Skip it\n");
+ continue;
+ }
+ drm_dbg_kms(&i915->drm,
+ "the SDVO device with slave addr %2x is found on"
+ " %s port\n",
+ child->slave_addr,
+ (child->dvo_port == DEVICE_PORT_DVOB) ?
+ "SDVOB" : "SDVOC");
+ mapping = &i915->display.vbt.sdvo_mappings[child->dvo_port - 1];
+ if (!mapping->initialized) {
+ mapping->dvo_port = child->dvo_port;
+ mapping->slave_addr = child->slave_addr;
+ mapping->dvo_wiring = child->dvo_wiring;
+ mapping->ddc_pin = child->ddc_pin;
+ mapping->i2c_pin = child->i2c_pin;
+ mapping->initialized = 1;
+ drm_dbg_kms(&i915->drm,
+ "SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+ mapping->dvo_port, mapping->slave_addr,
+ mapping->dvo_wiring, mapping->ddc_pin,
+ mapping->i2c_pin);
+ } else {
+ drm_dbg_kms(&i915->drm,
+ "Maybe one SDVO port is shared by "
+ "two SDVO device.\n");
+ }
+ if (child->slave2_addr) {
+ /* Maybe this is a SDVO device with multiple inputs */
+ /* And the mapping info is not added */
+ drm_dbg_kms(&i915->drm,
+ "there exists the slave2_addr. Maybe this"
+ " is a SDVO device with multiple inputs.\n");
+ }
+ count++;
+ }
+
+ if (!count) {
+ /* No SDVO device info is found */
+ drm_dbg_kms(&i915->drm,
+ "No SDVO device info is found in VBT\n");
+ }
+}
+
+static void
+parse_driver_features(struct drm_i915_private *i915)
+{
+ const struct bdb_driver_features *driver;
+
+ driver = find_section(i915, BDB_DRIVER_FEATURES);
+ if (!driver)
+ return;
+
+ if (DISPLAY_VER(i915) >= 5) {
+ /*
+ * Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS
+ * to mean "eDP". The VBT spec doesn't agree with that
+ * interpretation, but real world VBTs seem to.
+ */
+ if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
+ i915->display.vbt.int_lvds_support = 0;
+ } else {
+ /*
+ * FIXME it's not clear which BDB version has the LVDS config
+ * bits defined. Revision history in the VBT spec says:
+ * "0.92 | Add two definitions for VBT value of LVDS Active
+ * Config (00b and 11b values defined) | 06/13/2005"
+ * but does not the specify the BDB version.
+ *
+ * So far version 134 (on i945gm) is the oldest VBT observed
+ * in the wild with the bits correctly populated. Version
+ * 108 (on i85x) does not have the bits correctly populated.
+ */
+ if (i915->display.vbt.version >= 134 &&
+ driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
+ driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
+ i915->display.vbt.int_lvds_support = 0;
+ }
+}
+
+static void
+parse_panel_driver_features(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ const struct bdb_driver_features *driver;
+
+ driver = find_section(i915, BDB_DRIVER_FEATURES);
+ if (!driver)
+ return;
+
+ if (i915->display.vbt.version < 228) {
+ drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n",
+ driver->drrs_enabled);
+ /*
+ * If DRRS is not supported, drrs_type has to be set to 0.
+ * This is because, VBT is configured in such a way that
+ * static DRRS is 0 and DRRS not supported is represented by
+ * driver->drrs_enabled=false
+ */
+ if (!driver->drrs_enabled && panel->vbt.drrs_type != DRRS_TYPE_NONE) {
+ /*
+ * FIXME Should DMRRS perhaps be treated as seamless
+ * but without the automatic downclocking?
+ */
+ if (driver->dmrrs_enabled)
+ panel->vbt.drrs_type = DRRS_TYPE_STATIC;
+ else
+ panel->vbt.drrs_type = DRRS_TYPE_NONE;
+ }
+
+ panel->vbt.psr.enable = driver->psr_enabled;
+ }
+}
+
+static void
+parse_power_conservation_features(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ const struct bdb_lfp_power *power;
+ u8 panel_type = panel->vbt.panel_type;
+
+ panel->vbt.vrr = true; /* matches Windows behaviour */
+
+ if (i915->display.vbt.version < 228)
+ return;
+
+ power = find_section(i915, BDB_LFP_POWER);
+ if (!power)
+ return;
+
+ panel->vbt.psr.enable = panel_bool(power->psr, panel_type);
+
+ /*
+ * If DRRS is not supported, drrs_type has to be set to 0.
+ * This is because, VBT is configured in such a way that
+ * static DRRS is 0 and DRRS not supported is represented by
+ * power->drrs & BIT(panel_type)=false
+ */
+ if (!panel_bool(power->drrs, panel_type) && panel->vbt.drrs_type != DRRS_TYPE_NONE) {
+ /*
+ * FIXME Should DMRRS perhaps be treated as seamless
+ * but without the automatic downclocking?
+ */
+ if (panel_bool(power->dmrrs, panel_type))
+ panel->vbt.drrs_type = DRRS_TYPE_STATIC;
+ else
+ panel->vbt.drrs_type = DRRS_TYPE_NONE;
+ }
+
+ if (i915->display.vbt.version >= 232)
+ panel->vbt.edp.hobl = panel_bool(power->hobl, panel_type);
+
+ if (i915->display.vbt.version >= 233)
+ panel->vbt.vrr = panel_bool(power->vrr_feature_enabled,
+ panel_type);
+}
+
+static void
+parse_edp(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ const struct bdb_edp *edp;
+ const struct edp_power_seq *edp_pps;
+ const struct edp_fast_link_params *edp_link_params;
+ int panel_type = panel->vbt.panel_type;
+
+ edp = find_section(i915, BDB_EDP);
+ if (!edp)
+ return;
+
+ switch (panel_bits(edp->color_depth, panel_type, 2)) {
+ case EDP_18BPP:
+ panel->vbt.edp.bpp = 18;
+ break;
+ case EDP_24BPP:
+ panel->vbt.edp.bpp = 24;
+ break;
+ case EDP_30BPP:
+ panel->vbt.edp.bpp = 30;
+ break;
+ }
+
+ /* Get the eDP sequencing and link info */
+ edp_pps = &edp->power_seqs[panel_type];
+ edp_link_params = &edp->fast_link_params[panel_type];
+
+ panel->vbt.edp.pps = *edp_pps;
+
+ if (i915->display.vbt.version >= 224) {
+ panel->vbt.edp.rate =
+ edp->edp_fast_link_training_rate[panel_type] * 20;
+ } else {
+ switch (edp_link_params->rate) {
+ case EDP_RATE_1_62:
+ panel->vbt.edp.rate = 162000;
+ break;
+ case EDP_RATE_2_7:
+ panel->vbt.edp.rate = 270000;
+ break;
+ case EDP_RATE_5_4:
+ panel->vbt.edp.rate = 540000;
+ break;
+ default:
+ drm_dbg_kms(&i915->drm,
+ "VBT has unknown eDP link rate value %u\n",
+ edp_link_params->rate);
+ break;
+ }
+ }
+
+ switch (edp_link_params->lanes) {
+ case EDP_LANE_1:
+ panel->vbt.edp.lanes = 1;
+ break;
+ case EDP_LANE_2:
+ panel->vbt.edp.lanes = 2;
+ break;
+ case EDP_LANE_4:
+ panel->vbt.edp.lanes = 4;
+ break;
+ default:
+ drm_dbg_kms(&i915->drm,
+ "VBT has unknown eDP lane count value %u\n",
+ edp_link_params->lanes);
+ break;
+ }
+
+ switch (edp_link_params->preemphasis) {
+ case EDP_PREEMPHASIS_NONE:
+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
+ break;
+ case EDP_PREEMPHASIS_3_5dB:
+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
+ break;
+ case EDP_PREEMPHASIS_6dB:
+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
+ break;
+ case EDP_PREEMPHASIS_9_5dB:
+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
+ break;
+ default:
+ drm_dbg_kms(&i915->drm,
+ "VBT has unknown eDP pre-emphasis value %u\n",
+ edp_link_params->preemphasis);
+ break;
+ }
+
+ switch (edp_link_params->vswing) {
+ case EDP_VSWING_0_4V:
+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+ break;
+ case EDP_VSWING_0_6V:
+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+ break;
+ case EDP_VSWING_0_8V:
+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ break;
+ case EDP_VSWING_1_2V:
+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ break;
+ default:
+ drm_dbg_kms(&i915->drm,
+ "VBT has unknown eDP voltage swing value %u\n",
+ edp_link_params->vswing);
+ break;
+ }
+
+ if (i915->display.vbt.version >= 173) {
+ u8 vswing;
+
+ /* Don't read from VBT if module parameter has valid value*/
+ if (i915->params.edp_vswing) {
+ panel->vbt.edp.low_vswing =
+ i915->params.edp_vswing == 1;
+ } else {
+ vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
+ panel->vbt.edp.low_vswing = vswing == 0;
+ }
+ }
+
+ panel->vbt.edp.drrs_msa_timing_delay =
+ panel_bits(edp->sdrrs_msa_timing_delay, panel_type, 2);
+
+ if (i915->display.vbt.version >= 244)
+ panel->vbt.edp.max_link_rate =
+ edp->edp_max_port_link_rate[panel_type] * 20;
+}
+
+static void
+parse_psr(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ const struct bdb_psr *psr;
+ const struct psr_table *psr_table;
+ int panel_type = panel->vbt.panel_type;
+
+ psr = find_section(i915, BDB_PSR);
+ if (!psr) {
+ drm_dbg_kms(&i915->drm, "No PSR BDB found.\n");
+ return;
+ }
+
+ psr_table = &psr->psr_table[panel_type];
+
+ panel->vbt.psr.full_link = psr_table->full_link;
+ panel->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
+
+ /* Allowed VBT values goes from 0 to 15 */
+ panel->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
+ psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
+
+ /*
+ * New psr options 0=500us, 1=100us, 2=2500us, 3=0us
+ * Old decimal value is wake up time in multiples of 100 us.
+ */
+ if (i915->display.vbt.version >= 205 &&
+ (DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) {
+ switch (psr_table->tp1_wakeup_time) {
+ case 0:
+ panel->vbt.psr.tp1_wakeup_time_us = 500;
+ break;
+ case 1:
+ panel->vbt.psr.tp1_wakeup_time_us = 100;
+ break;
+ case 3:
+ panel->vbt.psr.tp1_wakeup_time_us = 0;
+ break;
+ default:
+ drm_dbg_kms(&i915->drm,
+ "VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp1_wakeup_time);
+ fallthrough;
+ case 2:
+ panel->vbt.psr.tp1_wakeup_time_us = 2500;
+ break;
+ }
+
+ switch (psr_table->tp2_tp3_wakeup_time) {
+ case 0:
+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 500;
+ break;
+ case 1:
+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 100;
+ break;
+ case 3:
+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 0;
+ break;
+ default:
+ drm_dbg_kms(&i915->drm,
+ "VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp2_tp3_wakeup_time);
+ fallthrough;
+ case 2:
+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
+ break;
+ }
+ } else {
+ panel->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
+ panel->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
+ }
+
+ if (i915->display.vbt.version >= 226) {
+ u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
+
+ wakeup_time = panel_bits(wakeup_time, panel_type, 2);
+ switch (wakeup_time) {
+ case 0:
+ wakeup_time = 500;
+ break;
+ case 1:
+ wakeup_time = 100;
+ break;
+ case 3:
+ wakeup_time = 50;
+ break;
+ default:
+ case 2:
+ wakeup_time = 2500;
+ break;
+ }
+ panel->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
+ } else {
+ /* Reusing PSR1 wakeup time for PSR2 in older VBTs */
+ panel->vbt.psr.psr2_tp2_tp3_wakeup_time_us = panel->vbt.psr.tp2_tp3_wakeup_time_us;
+ }
+}
+
+static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
+ struct intel_panel *panel,
+ enum port port)
+{
+ enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C;
+
+ if (!panel->vbt.dsi.config->dual_link || i915->display.vbt.version < 197) {
+ panel->vbt.dsi.bl_ports = BIT(port);
+ if (panel->vbt.dsi.config->cabc_supported)
+ panel->vbt.dsi.cabc_ports = BIT(port);
+
+ return;
+ }
+
+ switch (panel->vbt.dsi.config->dl_dcs_backlight_ports) {
+ case DL_DCS_PORT_A:
+ panel->vbt.dsi.bl_ports = BIT(PORT_A);
+ break;
+ case DL_DCS_PORT_C:
+ panel->vbt.dsi.bl_ports = BIT(port_bc);
+ break;
+ default:
+ case DL_DCS_PORT_A_AND_C:
+ panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(port_bc);
+ break;
+ }
+
+ if (!panel->vbt.dsi.config->cabc_supported)
+ return;
+
+ switch (panel->vbt.dsi.config->dl_dcs_cabc_ports) {
+ case DL_DCS_PORT_A:
+ panel->vbt.dsi.cabc_ports = BIT(PORT_A);
+ break;
+ case DL_DCS_PORT_C:
+ panel->vbt.dsi.cabc_ports = BIT(port_bc);
+ break;
+ default:
+ case DL_DCS_PORT_A_AND_C:
+ panel->vbt.dsi.cabc_ports =
+ BIT(PORT_A) | BIT(port_bc);
+ break;
+ }
+}
+
+static void
+parse_mipi_config(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ const struct bdb_mipi_config *start;
+ const struct mipi_config *config;
+ const struct mipi_pps_data *pps;
+ int panel_type = panel->vbt.panel_type;
+ enum port port;
+
+ /* parse MIPI blocks only if LFP type is MIPI */
+ if (!intel_bios_is_dsi_present(i915, &port))
+ return;
+
+ /* Initialize this to undefined indicating no generic MIPI support */
+ panel->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
+
+ /* Block #40 is already parsed and panel_fixed_mode is
+ * stored in i915->lfp_lvds_vbt_mode
+ * resuse this when needed
+ */
+
+ /* Parse #52 for panel index used from panel_type already
+ * parsed
+ */
+ start = find_section(i915, BDB_MIPI_CONFIG);
+ if (!start) {
+ drm_dbg_kms(&i915->drm, "No MIPI config BDB found");
+ return;
+ }
+
+ drm_dbg(&i915->drm, "Found MIPI Config block, panel index = %d\n",
+ panel_type);
+
+ /*
+ * get hold of the correct configuration block and pps data as per
+ * the panel_type as index
+ */
+ config = &start->config[panel_type];
+ pps = &start->pps[panel_type];
+
+ /* store as of now full data. Trim when we realise all is not needed */
+ panel->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL);
+ if (!panel->vbt.dsi.config)
+ return;
+
+ panel->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL);
+ if (!panel->vbt.dsi.pps) {
+ kfree(panel->vbt.dsi.config);
+ return;
+ }
+
+ parse_dsi_backlight_ports(i915, panel, port);
+
+ /* FIXME is the 90 vs. 270 correct? */
+ switch (config->rotation) {
+ case ENABLE_ROTATION_0:
+ /*
+ * Most (all?) VBTs claim 0 degrees despite having
+ * an upside down panel, thus we do not trust this.
+ */
+ panel->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ break;
+ case ENABLE_ROTATION_90:
+ panel->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
+ break;
+ case ENABLE_ROTATION_180:
+ panel->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+ break;
+ case ENABLE_ROTATION_270:
+ panel->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
+ break;
+ }
+
+ /* We have mandatory mipi config blocks. Initialize as generic panel */
+ panel->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
+}
+
+/* Find the sequence block and size for the given panel. */
+static const u8 *
+find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
+ u16 panel_id, u32 *seq_size)
+{
+ u32 total = get_blocksize(sequence);
+ const u8 *data = &sequence->data[0];
+ u8 current_id;
+ u32 current_size;
+ int header_size = sequence->version >= 3 ? 5 : 3;
+ int index = 0;
+ int i;
+
+ /* skip new block size */
+ if (sequence->version >= 3)
+ data += 4;
+
+ for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) {
+ if (index + header_size > total) {
+ DRM_ERROR("Invalid sequence block (header)\n");
+ return NULL;
+ }
+
+ current_id = *(data + index);
+ if (sequence->version >= 3)
+ current_size = *((const u32 *)(data + index + 1));
+ else
+ current_size = *((const u16 *)(data + index + 1));
+
+ index += header_size;
+
+ if (index + current_size > total) {
+ DRM_ERROR("Invalid sequence block\n");
+ return NULL;
+ }
+
+ if (current_id == panel_id) {
+ *seq_size = current_size;
+ return data + index;
+ }
+
+ index += current_size;
+ }
+
+ DRM_ERROR("Sequence block detected but no valid configuration\n");
+
+ return NULL;
+}
+
+static int goto_next_sequence(const u8 *data, int index, int total)
+{
+ u16 len;
+
+ /* Skip Sequence Byte. */
+ for (index = index + 1; index < total; index += len) {
+ u8 operation_byte = *(data + index);
+ index++;
+
+ switch (operation_byte) {
+ case MIPI_SEQ_ELEM_END:
+ return index;
+ case MIPI_SEQ_ELEM_SEND_PKT:
+ if (index + 4 > total)
+ return 0;
+
+ len = *((const u16 *)(data + index + 2)) + 4;
+ break;
+ case MIPI_SEQ_ELEM_DELAY:
+ len = 4;
+ break;
+ case MIPI_SEQ_ELEM_GPIO:
+ len = 2;
+ break;
+ case MIPI_SEQ_ELEM_I2C:
+ if (index + 7 > total)
+ return 0;
+ len = *(data + index + 6) + 7;
+ break;
+ default:
+ DRM_ERROR("Unknown operation byte\n");
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static int goto_next_sequence_v3(const u8 *data, int index, int total)
+{
+ int seq_end;
+ u16 len;
+ u32 size_of_sequence;
+
+ /*
+ * Could skip sequence based on Size of Sequence alone, but also do some
+ * checking on the structure.
+ */
+ if (total < 5) {
+ DRM_ERROR("Too small sequence size\n");
+ return 0;
+ }
+
+ /* Skip Sequence Byte. */
+ index++;
+
+ /*
+ * Size of Sequence. Excludes the Sequence Byte and the size itself,
+ * includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END
+ * byte.
+ */
+ size_of_sequence = *((const u32 *)(data + index));
+ index += 4;
+
+ seq_end = index + size_of_sequence;
+ if (seq_end > total) {
+ DRM_ERROR("Invalid sequence size\n");
+ return 0;
+ }
+
+ for (; index < total; index += len) {
+ u8 operation_byte = *(data + index);
+ index++;
+
+ if (operation_byte == MIPI_SEQ_ELEM_END) {
+ if (index != seq_end) {
+ DRM_ERROR("Invalid element structure\n");
+ return 0;
+ }
+ return index;
+ }
+
+ len = *(data + index);
+ index++;
+
+ /*
+ * FIXME: Would be nice to check elements like for v1/v2 in
+ * goto_next_sequence() above.
+ */
+ switch (operation_byte) {
+ case MIPI_SEQ_ELEM_SEND_PKT:
+ case MIPI_SEQ_ELEM_DELAY:
+ case MIPI_SEQ_ELEM_GPIO:
+ case MIPI_SEQ_ELEM_I2C:
+ case MIPI_SEQ_ELEM_SPI:
+ case MIPI_SEQ_ELEM_PMIC:
+ break;
+ default:
+ DRM_ERROR("Unknown operation byte %u\n",
+ operation_byte);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
+ * skip all delay + gpio operands and stop at the first DSI packet op.
+ */
+static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ const u8 *data = panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ int index, len;
+
+ if (drm_WARN_ON(&i915->drm,
+ !data || panel->vbt.dsi.seq_version != 1))
+ return 0;
+
+ /* index = 1 to skip sequence byte */
+ for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) {
+ switch (data[index]) {
+ case MIPI_SEQ_ELEM_SEND_PKT:
+ return index == 1 ? 0 : index;
+ case MIPI_SEQ_ELEM_DELAY:
+ len = 5; /* 1 byte for operand + uint32 */
+ break;
+ case MIPI_SEQ_ELEM_GPIO:
+ len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence.
+ * The deassert must be done before calling intel_dsi_device_ready, so for
+ * these devices we split the init OTP sequence into a deassert sequence and
+ * the actual init OTP part.
+ */
+static void fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ u8 *init_otp;
+ int len;
+
+ /* Limit this to VLV for now. */
+ if (!IS_VALLEYVIEW(i915))
+ return;
+
+ /* Limit this to v1 vid-mode sequences */
+ if (panel->vbt.dsi.config->is_cmd_mode ||
+ panel->vbt.dsi.seq_version != 1)
+ return;
+
+ /* Only do this if there are otp and assert seqs and no deassert seq */
+ if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
+ !panel->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
+ panel->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
+ return;
+
+ /* The deassert-sequence ends at the first DSI packet */
+ len = get_init_otp_deassert_fragment_len(i915, panel);
+ if (!len)
+ return;
+
+ drm_dbg_kms(&i915->drm,
+ "Using init OTP fragment to deassert reset\n");
+
+ /* Copy the fragment, update seq byte and terminate it */
+ init_otp = (u8 *)panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ panel->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
+ if (!panel->vbt.dsi.deassert_seq)
+ return;
+ panel->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
+ panel->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
+ /* Use the copy for deassert */
+ panel->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
+ panel->vbt.dsi.deassert_seq;
+ /* Replace the last byte of the fragment with init OTP seq byte */
+ init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
+ /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
+ panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
+}
+
+static void
+parse_mipi_sequence(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ int panel_type = panel->vbt.panel_type;
+ const struct bdb_mipi_sequence *sequence;
+ const u8 *seq_data;
+ u32 seq_size;
+ u8 *data;
+ int index = 0;
+
+ /* Only our generic panel driver uses the sequence block. */
+ if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
+ return;
+
+ sequence = find_section(i915, BDB_MIPI_SEQUENCE);
+ if (!sequence) {
+ drm_dbg_kms(&i915->drm,
+ "No MIPI Sequence found, parsing complete\n");
+ return;
+ }
+
+ /* Fail gracefully for forward incompatible sequence block. */
+ if (sequence->version >= 4) {
+ drm_err(&i915->drm,
+ "Unable to parse MIPI Sequence Block v%u\n",
+ sequence->version);
+ return;
+ }
+
+ drm_dbg(&i915->drm, "Found MIPI sequence block v%u\n",
+ sequence->version);
+
+ seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
+ if (!seq_data)
+ return;
+
+ data = kmemdup(seq_data, seq_size, GFP_KERNEL);
+ if (!data)
+ return;
+
+ /* Parse the sequences, store pointers to each sequence. */
+ for (;;) {
+ u8 seq_id = *(data + index);
+ if (seq_id == MIPI_SEQ_END)
+ break;
+
+ if (seq_id >= MIPI_SEQ_MAX) {
+ drm_err(&i915->drm, "Unknown sequence %u\n",
+ seq_id);
+ goto err;
+ }
+
+ /* Log about presence of sequences we won't run. */
+ if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
+ drm_dbg_kms(&i915->drm,
+ "Unsupported sequence %u\n", seq_id);
+
+ panel->vbt.dsi.sequence[seq_id] = data + index;
+
+ if (sequence->version >= 3)
+ index = goto_next_sequence_v3(data, index, seq_size);
+ else
+ index = goto_next_sequence(data, index, seq_size);
+ if (!index) {
+ drm_err(&i915->drm, "Invalid sequence %u\n",
+ seq_id);
+ goto err;
+ }
+ }
+
+ panel->vbt.dsi.data = data;
+ panel->vbt.dsi.size = seq_size;
+ panel->vbt.dsi.seq_version = sequence->version;
+
+ fixup_mipi_sequences(i915, panel);
+
+ drm_dbg(&i915->drm, "MIPI related VBT parsing complete\n");
+ return;
+
+err:
+ kfree(data);
+ memset(panel->vbt.dsi.sequence, 0, sizeof(panel->vbt.dsi.sequence));
+}
+
+static void
+parse_compression_parameters(struct drm_i915_private *i915)
+{
+ const struct bdb_compression_parameters *params;
+ struct intel_bios_encoder_data *devdata;
+ const struct child_device_config *child;
+ u16 block_size;
+ int index;
+
+ if (i915->display.vbt.version < 198)
+ return;
+
+ params = find_section(i915, BDB_COMPRESSION_PARAMETERS);
+ if (params) {
+ /* Sanity checks */
+ if (params->entry_size != sizeof(params->data[0])) {
+ drm_dbg_kms(&i915->drm,
+ "VBT: unsupported compression param entry size\n");
+ return;
+ }
+
+ block_size = get_blocksize(params);
+ if (block_size < sizeof(*params)) {
+ drm_dbg_kms(&i915->drm,
+ "VBT: expected 16 compression param entries\n");
+ return;
+ }
+ }
+
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ child = &devdata->child;
+
+ if (!child->compression_enable)
+ continue;
+
+ if (!params) {
+ drm_dbg_kms(&i915->drm,
+ "VBT: compression params not available\n");
+ continue;
+ }
+
+ if (child->compression_method_cps) {
+ drm_dbg_kms(&i915->drm,
+ "VBT: CPS compression not supported\n");
+ continue;
+ }
+
+ index = child->compression_structure_index;
+
+ devdata->dsc = kmemdup(&params->data[index],
+ sizeof(*devdata->dsc), GFP_KERNEL);
+ }
+}
+
+static u8 translate_iboost(u8 val)
+{
+ static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
+
+ if (val >= ARRAY_SIZE(mapping)) {
+ DRM_DEBUG_KMS("Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
+ return 0;
+ }
+ return mapping[val];
+}
+
+static const u8 cnp_ddc_pin_map[] = {
+ [0] = 0, /* N/A */
+ [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
+ [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
+ [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
+ [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
+};
+
+static const u8 icp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT,
+ [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
+ [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
+ [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
+ [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
+ [TGL_DDC_BUS_PORT_5] = GMBUS_PIN_13_TC5_TGP,
+ [TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP,
+};
+
+static const u8 rkl_pch_tgp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [RKL_DDC_BUS_DDI_D] = GMBUS_PIN_9_TC1_ICP,
+ [RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP,
+};
+
+static const u8 adls_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ADLS_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP,
+ [ADLS_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP,
+ [ADLS_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP,
+ [ADLS_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP,
+};
+
+static const u8 gen9bc_tgp_ddc_pin_map[] = {
+ [DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
+ [DDC_BUS_DDI_D] = GMBUS_PIN_10_TC2_ICP,
+};
+
+static const u8 adlp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [ADLP_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP,
+ [ADLP_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP,
+ [ADLP_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP,
+ [ADLP_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP,
+};
+
+static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
+{
+ const u8 *ddc_pin_map;
+ int n_entries;
+
+ if (IS_ALDERLAKE_P(i915)) {
+ ddc_pin_map = adlp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(adlp_ddc_pin_map);
+ } else if (IS_ALDERLAKE_S(i915)) {
+ ddc_pin_map = adls_ddc_pin_map;
+ n_entries = ARRAY_SIZE(adls_ddc_pin_map);
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
+ return vbt_pin;
+ } else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) {
+ ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
+ } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(i915) == 9) {
+ ddc_pin_map = gen9bc_tgp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
+ ddc_pin_map = icp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(icp_ddc_pin_map);
+ } else if (HAS_PCH_CNP(i915)) {
+ ddc_pin_map = cnp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
+ } else {
+ /* Assuming direct map */
+ return vbt_pin;
+ }
+
+ if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
+ return ddc_pin_map[vbt_pin];
+
+ drm_dbg_kms(&i915->drm,
+ "Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
+ vbt_pin);
+ return 0;
+}
+
+static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
+{
+ const struct intel_bios_encoder_data *devdata;
+ enum port port;
+
+ if (!ddc_pin)
+ return PORT_NONE;
+
+ for_each_port(port) {
+ devdata = i915->display.vbt.ports[port];
+
+ if (devdata && ddc_pin == devdata->child.ddc_pin)
+ return port;
+ }
+
+ return PORT_NONE;
+}
+
+static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata,
+ enum port port)
+{
+ struct drm_i915_private *i915 = devdata->i915;
+ struct child_device_config *child;
+ u8 mapped_ddc_pin;
+ enum port p;
+
+ if (!devdata->child.ddc_pin)
+ return;
+
+ mapped_ddc_pin = map_ddc_pin(i915, devdata->child.ddc_pin);
+ if (!intel_gmbus_is_valid_pin(i915, mapped_ddc_pin)) {
+ drm_dbg_kms(&i915->drm,
+ "Port %c has invalid DDC pin %d, "
+ "sticking to defaults\n",
+ port_name(port), mapped_ddc_pin);
+ devdata->child.ddc_pin = 0;
+ return;
+ }
+
+ p = get_port_by_ddc_pin(i915, devdata->child.ddc_pin);
+ if (p == PORT_NONE)
+ return;
+
+ drm_dbg_kms(&i915->drm,
+ "port %c trying to use the same DDC pin (0x%x) as port %c, "
+ "disabling port %c DVI/HDMI support\n",
+ port_name(port), mapped_ddc_pin,
+ port_name(p), port_name(p));
+
+ /*
+ * If we have multiple ports supposedly sharing the pin, then dvi/hdmi
+ * couldn't exist on the shared port. Otherwise they share the same ddc
+ * pin and system couldn't communicate with them separately.
+ *
+ * Give inverse child device order the priority, last one wins. Yes,
+ * there are real machines (eg. Asrock B250M-HDV) where VBT has both
+ * port A and port E with the same AUX ch and we must pick port E :(
+ */
+ child = &i915->display.vbt.ports[p]->child;
+
+ child->device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING;
+ child->device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT;
+
+ child->ddc_pin = 0;
+}
+
+static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
+{
+ const struct intel_bios_encoder_data *devdata;
+ enum port port;
+
+ if (!aux_ch)
+ return PORT_NONE;
+
+ for_each_port(port) {
+ devdata = i915->display.vbt.ports[port];
+
+ if (devdata && aux_ch == devdata->child.aux_channel)
+ return port;
+ }
+
+ return PORT_NONE;
+}
+
+static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata,
+ enum port port)
+{
+ struct drm_i915_private *i915 = devdata->i915;
+ struct child_device_config *child;
+ enum port p;
+
+ p = get_port_by_aux_ch(i915, devdata->child.aux_channel);
+ if (p == PORT_NONE)
+ return;
+
+ drm_dbg_kms(&i915->drm,
+ "port %c trying to use the same AUX CH (0x%x) as port %c, "
+ "disabling port %c DP support\n",
+ port_name(port), devdata->child.aux_channel,
+ port_name(p), port_name(p));
+
+ /*
+ * If we have multiple ports supposedly sharing the aux channel, then DP
+ * couldn't exist on the shared port. Otherwise they share the same aux
+ * channel and system couldn't communicate with them separately.
+ *
+ * Give inverse child device order the priority, last one wins. Yes,
+ * there are real machines (eg. Asrock B250M-HDV) where VBT has both
+ * port A and port E with the same AUX ch and we must pick port E :(
+ */
+ child = &i915->display.vbt.ports[p]->child;
+
+ child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+ child->aux_channel = 0;
+}
+
+static u8 dvo_port_type(u8 dvo_port)
+{
+ switch (dvo_port) {
+ case DVO_PORT_HDMIA:
+ case DVO_PORT_HDMIB:
+ case DVO_PORT_HDMIC:
+ case DVO_PORT_HDMID:
+ case DVO_PORT_HDMIE:
+ case DVO_PORT_HDMIF:
+ case DVO_PORT_HDMIG:
+ case DVO_PORT_HDMIH:
+ case DVO_PORT_HDMII:
+ return DVO_PORT_HDMIA;
+ case DVO_PORT_DPA:
+ case DVO_PORT_DPB:
+ case DVO_PORT_DPC:
+ case DVO_PORT_DPD:
+ case DVO_PORT_DPE:
+ case DVO_PORT_DPF:
+ case DVO_PORT_DPG:
+ case DVO_PORT_DPH:
+ case DVO_PORT_DPI:
+ return DVO_PORT_DPA;
+ case DVO_PORT_MIPIA:
+ case DVO_PORT_MIPIB:
+ case DVO_PORT_MIPIC:
+ case DVO_PORT_MIPID:
+ return DVO_PORT_MIPIA;
+ default:
+ return dvo_port;
+ }
+}
+
+static enum port __dvo_port_to_port(int n_ports, int n_dvo,
+ const int port_mapping[][3], u8 dvo_port)
+{
+ enum port port;
+ int i;
+
+ for (port = PORT_A; port < n_ports; port++) {
+ for (i = 0; i < n_dvo; i++) {
+ if (port_mapping[port][i] == -1)
+ break;
+
+ if (dvo_port == port_mapping[port][i])
+ return port;
+ }
+ }
+
+ return PORT_NONE;
+}
+
+static enum port dvo_port_to_port(struct drm_i915_private *i915,
+ u8 dvo_port)
+{
+ /*
+ * Each DDI port can have more than one value on the "DVO Port" field,
+ * so look for all the possible values for each port.
+ */
+ static const int port_mapping[][3] = {
+ [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 },
+ [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 },
+ [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
+ [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
+ [PORT_E] = { DVO_PORT_HDMIE, DVO_PORT_DPE, DVO_PORT_CRT },
+ [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1 },
+ [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1 },
+ [PORT_H] = { DVO_PORT_HDMIH, DVO_PORT_DPH, -1 },
+ [PORT_I] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 },
+ };
+ /*
+ * RKL VBT uses PHY based mapping. Combo PHYs A,B,C,D
+ * map to DDI A,B,TC1,TC2 respectively.
+ */
+ static const int rkl_port_mapping[][3] = {
+ [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 },
+ [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 },
+ [PORT_C] = { -1 },
+ [PORT_TC1] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
+ [PORT_TC2] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
+ };
+ /*
+ * Alderlake S ports used in the driver are PORT_A, PORT_D, PORT_E,
+ * PORT_F and PORT_G, we need to map that to correct VBT sections.
+ */
+ static const int adls_port_mapping[][3] = {
+ [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 },
+ [PORT_B] = { -1 },
+ [PORT_C] = { -1 },
+ [PORT_TC1] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 },
+ [PORT_TC2] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
+ [PORT_TC3] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
+ [PORT_TC4] = { DVO_PORT_HDMIE, DVO_PORT_DPE, -1 },
+ };
+ static const int xelpd_port_mapping[][3] = {
+ [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1 },
+ [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1 },
+ [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1 },
+ [PORT_D_XELPD] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1 },
+ [PORT_E_XELPD] = { DVO_PORT_HDMIE, DVO_PORT_DPE, -1 },
+ [PORT_TC1] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1 },
+ [PORT_TC2] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1 },
+ [PORT_TC3] = { DVO_PORT_HDMIH, DVO_PORT_DPH, -1 },
+ [PORT_TC4] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 },
+ };
+
+ if (DISPLAY_VER(i915) >= 13)
+ return __dvo_port_to_port(ARRAY_SIZE(xelpd_port_mapping),
+ ARRAY_SIZE(xelpd_port_mapping[0]),
+ xelpd_port_mapping,
+ dvo_port);
+ else if (IS_ALDERLAKE_S(i915))
+ return __dvo_port_to_port(ARRAY_SIZE(adls_port_mapping),
+ ARRAY_SIZE(adls_port_mapping[0]),
+ adls_port_mapping,
+ dvo_port);
+ else if (IS_DG1(i915) || IS_ROCKETLAKE(i915))
+ return __dvo_port_to_port(ARRAY_SIZE(rkl_port_mapping),
+ ARRAY_SIZE(rkl_port_mapping[0]),
+ rkl_port_mapping,
+ dvo_port);
+ else
+ return __dvo_port_to_port(ARRAY_SIZE(port_mapping),
+ ARRAY_SIZE(port_mapping[0]),
+ port_mapping,
+ dvo_port);
+}
+
+static enum port
+dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
+{
+ switch (dvo_port) {
+ case DVO_PORT_MIPIA:
+ return PORT_A;
+ case DVO_PORT_MIPIC:
+ if (DISPLAY_VER(i915) >= 11)
+ return PORT_B;
+ else
+ return PORT_C;
+ default:
+ return PORT_NONE;
+ }
+}
+
+static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
+{
+ switch (vbt_max_link_rate) {
+ default:
+ case BDB_230_VBT_DP_MAX_LINK_RATE_DEF:
+ return 0;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR20:
+ return 2000000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR13P5:
+ return 1350000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_UHBR10:
+ return 1000000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_HBR3:
+ return 810000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_HBR2:
+ return 540000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_HBR:
+ return 270000;
+ case BDB_230_VBT_DP_MAX_LINK_RATE_LBR:
+ return 162000;
+ }
+}
+
+static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
+{
+ switch (vbt_max_link_rate) {
+ default:
+ case BDB_216_VBT_DP_MAX_LINK_RATE_HBR3:
+ return 810000;
+ case BDB_216_VBT_DP_MAX_LINK_RATE_HBR2:
+ return 540000;
+ case BDB_216_VBT_DP_MAX_LINK_RATE_HBR:
+ return 270000;
+ case BDB_216_VBT_DP_MAX_LINK_RATE_LBR:
+ return 162000;
+ }
+}
+
+static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
+{
+ if (!devdata || devdata->i915->display.vbt.version < 216)
+ return 0;
+
+ if (devdata->i915->display.vbt.version >= 230)
+ return parse_bdb_230_dp_max_link_rate(devdata->child.dp_max_link_rate);
+ else
+ return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate);
+}
+
+static int _intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
+{
+ if (!devdata || devdata->i915->display.vbt.version < 244)
+ return 0;
+
+ return devdata->child.dp_max_lane_count + 1;
+}
+
+static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
+ enum port port)
+{
+ struct drm_i915_private *i915 = devdata->i915;
+ bool is_hdmi;
+
+ if (port != PORT_A || DISPLAY_VER(i915) >= 12)
+ return;
+
+ if (!intel_bios_encoder_supports_dvi(devdata))
+ return;
+
+ is_hdmi = intel_bios_encoder_supports_hdmi(devdata);
+
+ drm_dbg_kms(&i915->drm, "VBT claims port A supports DVI%s, ignoring\n",
+ is_hdmi ? "/HDMI" : "");
+
+ devdata->child.device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING;
+ devdata->child.device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT;
+}
+
+static bool
+intel_bios_encoder_supports_crt(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata->child.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
+}
+
+bool
+intel_bios_encoder_supports_dvi(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata->child.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
+}
+
+bool
+intel_bios_encoder_supports_hdmi(const struct intel_bios_encoder_data *devdata)
+{
+ return intel_bios_encoder_supports_dvi(devdata) &&
+ (devdata->child.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
+}
+
+bool
+intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata->child.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+}
+
+static bool
+intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
+{
+ return intel_bios_encoder_supports_dp(devdata) &&
+ devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR;
+}
+
+static bool
+intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT;
+}
+
+static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
+{
+ if (!devdata || devdata->i915->display.vbt.version < 158)
+ return -1;
+
+ return devdata->child.hdmi_level_shifter_value;
+}
+
+static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
+{
+ if (!devdata || devdata->i915->display.vbt.version < 204)
+ return 0;
+
+ switch (devdata->child.hdmi_max_data_rate) {
+ default:
+ MISSING_CASE(devdata->child.hdmi_max_data_rate);
+ fallthrough;
+ case HDMI_MAX_DATA_RATE_PLATFORM:
+ return 0;
+ case HDMI_MAX_DATA_RATE_594:
+ return 594000;
+ case HDMI_MAX_DATA_RATE_340:
+ return 340000;
+ case HDMI_MAX_DATA_RATE_300:
+ return 300000;
+ case HDMI_MAX_DATA_RATE_297:
+ return 297000;
+ case HDMI_MAX_DATA_RATE_165:
+ return 165000;
+ }
+}
+
+static bool is_port_valid(struct drm_i915_private *i915, enum port port)
+{
+ /*
+ * On some ICL SKUs port F is not present, but broken VBTs mark
+ * the port as present. Only try to initialize port F for the
+ * SKUs that may actually have it.
+ */
+ if (port == PORT_F && IS_ICELAKE(i915))
+ return IS_ICL_WITH_PORT_F(i915);
+
+ return true;
+}
+
+static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
+ enum port port)
+{
+ struct drm_i915_private *i915 = devdata->i915;
+ const struct child_device_config *child = &devdata->child;
+ bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt;
+ int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
+
+ is_dvi = intel_bios_encoder_supports_dvi(devdata);
+ is_dp = intel_bios_encoder_supports_dp(devdata);
+ is_crt = intel_bios_encoder_supports_crt(devdata);
+ is_hdmi = intel_bios_encoder_supports_hdmi(devdata);
+ is_edp = intel_bios_encoder_supports_edp(devdata);
+ is_dsi = intel_bios_encoder_supports_dsi(devdata);
+
+ supports_typec_usb = intel_bios_encoder_supports_typec_usb(devdata);
+ supports_tbt = intel_bios_encoder_supports_tbt(devdata);
+
+ drm_dbg_kms(&i915->drm,
+ "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
+ port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi,
+ HAS_LSPCON(i915) && child->lspcon,
+ supports_typec_usb, supports_tbt,
+ devdata->dsc != NULL);
+
+ hdmi_level_shift = _intel_bios_hdmi_level_shift(devdata);
+ if (hdmi_level_shift >= 0) {
+ drm_dbg_kms(&i915->drm,
+ "Port %c VBT HDMI level shift: %d\n",
+ port_name(port), hdmi_level_shift);
+ }
+
+ max_tmds_clock = _intel_bios_max_tmds_clock(devdata);
+ if (max_tmds_clock)
+ drm_dbg_kms(&i915->drm,
+ "Port %c VBT HDMI max TMDS clock: %d kHz\n",
+ port_name(port), max_tmds_clock);
+
+ /* I_boost config for SKL and above */
+ dp_boost_level = intel_bios_encoder_dp_boost_level(devdata);
+ if (dp_boost_level)
+ drm_dbg_kms(&i915->drm,
+ "Port %c VBT (e)DP boost level: %d\n",
+ port_name(port), dp_boost_level);
+
+ hdmi_boost_level = intel_bios_encoder_hdmi_boost_level(devdata);
+ if (hdmi_boost_level)
+ drm_dbg_kms(&i915->drm,
+ "Port %c VBT HDMI boost level: %d\n",
+ port_name(port), hdmi_boost_level);
+
+ dp_max_link_rate = _intel_bios_dp_max_link_rate(devdata);
+ if (dp_max_link_rate)
+ drm_dbg_kms(&i915->drm,
+ "Port %c VBT DP max link rate: %d\n",
+ port_name(port), dp_max_link_rate);
+}
+
+static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
+{
+ struct drm_i915_private *i915 = devdata->i915;
+ const struct child_device_config *child = &devdata->child;
+ enum port port;
+
+ port = dvo_port_to_port(i915, child->dvo_port);
+ if (port == PORT_NONE && DISPLAY_VER(i915) >= 11)
+ port = dsi_dvo_port_to_port(i915, child->dvo_port);
+ if (port == PORT_NONE)
+ return;
+
+ if (!is_port_valid(i915, port)) {
+ drm_dbg_kms(&i915->drm,
+ "VBT reports port %c as supported, but that can't be true: skipping\n",
+ port_name(port));
+ return;
+ }
+
+ if (i915->display.vbt.ports[port]) {
+ drm_dbg_kms(&i915->drm,
+ "More than one child device for port %c in VBT, using the first.\n",
+ port_name(port));
+ return;
+ }
+
+ sanitize_device_type(devdata, port);
+
+ if (intel_bios_encoder_supports_dvi(devdata))
+ sanitize_ddc_pin(devdata, port);
+
+ if (intel_bios_encoder_supports_dp(devdata))
+ sanitize_aux_ch(devdata, port);
+
+ i915->display.vbt.ports[port] = devdata;
+}
+
+static bool has_ddi_port_info(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 5 || IS_G4X(i915);
+}
+
+static void parse_ddi_ports(struct drm_i915_private *i915)
+{
+ struct intel_bios_encoder_data *devdata;
+ enum port port;
+
+ if (!has_ddi_port_info(i915))
+ return;
+
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
+ parse_ddi_port(devdata);
+
+ for_each_port(port) {
+ if (i915->display.vbt.ports[port])
+ print_ddi_port(i915->display.vbt.ports[port], port);
+ }
+}
+
+static void
+parse_general_definitions(struct drm_i915_private *i915)
+{
+ const struct bdb_general_definitions *defs;
+ struct intel_bios_encoder_data *devdata;
+ const struct child_device_config *child;
+ int i, child_device_num;
+ u8 expected_size;
+ u16 block_size;
+ int bus_pin;
+
+ defs = find_section(i915, BDB_GENERAL_DEFINITIONS);
+ if (!defs) {
+ drm_dbg_kms(&i915->drm,
+ "No general definition block is found, no devices defined.\n");
+ return;
+ }
+
+ block_size = get_blocksize(defs);
+ if (block_size < sizeof(*defs)) {
+ drm_dbg_kms(&i915->drm,
+ "General definitions block too small (%u)\n",
+ block_size);
+ return;
+ }
+
+ bus_pin = defs->crt_ddc_gmbus_pin;
+ drm_dbg_kms(&i915->drm, "crt_ddc_bus_pin: %d\n", bus_pin);
+ if (intel_gmbus_is_valid_pin(i915, bus_pin))
+ i915->display.vbt.crt_ddc_pin = bus_pin;
+
+ if (i915->display.vbt.version < 106) {
+ expected_size = 22;
+ } else if (i915->display.vbt.version < 111) {
+ expected_size = 27;
+ } else if (i915->display.vbt.version < 195) {
+ expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
+ } else if (i915->display.vbt.version == 195) {
+ expected_size = 37;
+ } else if (i915->display.vbt.version <= 215) {
+ expected_size = 38;
+ } else if (i915->display.vbt.version <= 237) {
+ expected_size = 39;
+ } else {
+ expected_size = sizeof(*child);
+ BUILD_BUG_ON(sizeof(*child) < 39);
+ drm_dbg(&i915->drm,
+ "Expected child device config size for VBT version %u not known; assuming %u\n",
+ i915->display.vbt.version, expected_size);
+ }
+
+ /* Flag an error for unexpected size, but continue anyway. */
+ if (defs->child_dev_size != expected_size)
+ drm_err(&i915->drm,
+ "Unexpected child device config size %u (expected %u for VBT version %u)\n",
+ defs->child_dev_size, expected_size, i915->display.vbt.version);
+
+ /* The legacy sized child device config is the minimum we need. */
+ if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
+ drm_dbg_kms(&i915->drm,
+ "Child device config size %u is too small.\n",
+ defs->child_dev_size);
+ return;
+ }
+
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
+
+ for (i = 0; i < child_device_num; i++) {
+ child = child_device_ptr(defs, i);
+ if (!child->device_type)
+ continue;
+
+ drm_dbg_kms(&i915->drm,
+ "Found VBT child device with type 0x%x\n",
+ child->device_type);
+
+ devdata = kzalloc(sizeof(*devdata), GFP_KERNEL);
+ if (!devdata)
+ break;
+
+ devdata->i915 = i915;
+
+ /*
+ * Copy as much as we know (sizeof) and is available
+ * (child_dev_size) of the child device config. Accessing the
+ * data must depend on VBT version.
+ */
+ memcpy(&devdata->child, child,
+ min_t(size_t, defs->child_dev_size, sizeof(*child)));
+
+ list_add_tail(&devdata->node, &i915->display.vbt.display_devices);
+ }
+
+ if (list_empty(&i915->display.vbt.display_devices))
+ drm_dbg_kms(&i915->drm,
+ "no child dev is parsed from VBT\n");
+}
+
+/* Common defaults which may be overridden by VBT. */
+static void
+init_vbt_defaults(struct drm_i915_private *i915)
+{
+ i915->display.vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
+
+ /* general features */
+ i915->display.vbt.int_tv_support = 1;
+ i915->display.vbt.int_crt_support = 1;
+
+ /* driver features */
+ i915->display.vbt.int_lvds_support = 1;
+
+ /* Default to using SSC */
+ i915->display.vbt.lvds_use_ssc = 1;
+ /*
+ * Core/SandyBridge/IvyBridge use alternative (120MHz) reference
+ * clock for LVDS.
+ */
+ i915->display.vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915,
+ !HAS_PCH_SPLIT(i915));
+ drm_dbg_kms(&i915->drm, "Set default to SSC at %d kHz\n",
+ i915->display.vbt.lvds_ssc_freq);
+}
+
+/* Common defaults which may be overridden by VBT. */
+static void
+init_vbt_panel_defaults(struct intel_panel *panel)
+{
+ /* Default to having backlight */
+ panel->vbt.backlight.present = true;
+
+ /* LFP panel data */
+ panel->vbt.lvds_dither = true;
+}
+
+/* Defaults to initialize only if there is no VBT. */
+static void
+init_vbt_missing_defaults(struct drm_i915_private *i915)
+{
+ enum port port;
+ int ports = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) |
+ BIT(PORT_D) | BIT(PORT_E) | BIT(PORT_F);
+
+ if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
+ return;
+
+ for_each_port_masked(port, ports) {
+ struct intel_bios_encoder_data *devdata;
+ struct child_device_config *child;
+ enum phy phy = intel_port_to_phy(i915, port);
+
+ /*
+ * VBT has the TypeC mode (native,TBT/USB) and we don't want
+ * to detect it.
+ */
+ if (intel_phy_is_tc(i915, phy))
+ continue;
+
+ /* Create fake child device config */
+ devdata = kzalloc(sizeof(*devdata), GFP_KERNEL);
+ if (!devdata)
+ break;
+
+ devdata->i915 = i915;
+ child = &devdata->child;
+
+ if (port == PORT_F)
+ child->dvo_port = DVO_PORT_HDMIF;
+ else if (port == PORT_E)
+ child->dvo_port = DVO_PORT_HDMIE;
+ else
+ child->dvo_port = DVO_PORT_HDMIA + port;
+
+ if (port != PORT_A && port != PORT_E)
+ child->device_type |= DEVICE_TYPE_TMDS_DVI_SIGNALING;
+
+ if (port != PORT_E)
+ child->device_type |= DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+
+ if (port == PORT_A)
+ child->device_type |= DEVICE_TYPE_INTERNAL_CONNECTOR;
+
+ list_add_tail(&devdata->node, &i915->display.vbt.display_devices);
+
+ drm_dbg_kms(&i915->drm,
+ "Generating default VBT child device with type 0x04%x on port %c\n",
+ child->device_type, port_name(port));
+ }
+
+ /* Bypass some minimum baseline VBT version checks */
+ i915->display.vbt.version = 155;
+}
+
+static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
+{
+ const void *_vbt = vbt;
+
+ return _vbt + vbt->bdb_offset;
+}
+
+/**
+ * intel_bios_is_valid_vbt - does the given buffer contain a valid VBT
+ * @buf: pointer to a buffer to validate
+ * @size: size of the buffer
+ *
+ * Returns true on valid VBT.
+ */
+bool intel_bios_is_valid_vbt(const void *buf, size_t size)
+{
+ const struct vbt_header *vbt = buf;
+ const struct bdb_header *bdb;
+
+ if (!vbt)
+ return false;
+
+ if (sizeof(struct vbt_header) > size) {
+ DRM_DEBUG_DRIVER("VBT header incomplete\n");
+ return false;
+ }
+
+ if (memcmp(vbt->signature, "$VBT", 4)) {
+ DRM_DEBUG_DRIVER("VBT invalid signature\n");
+ return false;
+ }
+
+ if (vbt->vbt_size > size) {
+ DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
+ return false;
+ }
+
+ size = vbt->vbt_size;
+
+ if (range_overflows_t(size_t,
+ vbt->bdb_offset,
+ sizeof(struct bdb_header),
+ size)) {
+ DRM_DEBUG_DRIVER("BDB header incomplete\n");
+ return false;
+ }
+
+ bdb = get_bdb_header(vbt);
+ if (range_overflows_t(size_t, vbt->bdb_offset, bdb->bdb_size, size)) {
+ DRM_DEBUG_DRIVER("BDB incomplete\n");
+ return false;
+ }
+
+ return vbt;
+}
+
+static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915)
+{
+ u32 count, data, found, store = 0;
+ u32 static_region, oprom_offset;
+ u32 oprom_size = 0x200000;
+ u16 vbt_size;
+ u32 *vbt;
+
+ static_region = intel_uncore_read(&i915->uncore, SPI_STATIC_REGIONS);
+ static_region &= OPTIONROM_SPI_REGIONID_MASK;
+ intel_uncore_write(&i915->uncore, PRIMARY_SPI_REGIONID, static_region);
+
+ oprom_offset = intel_uncore_read(&i915->uncore, OROM_OFFSET);
+ oprom_offset &= OROM_OFFSET_MASK;
+
+ for (count = 0; count < oprom_size; count += 4) {
+ intel_uncore_write(&i915->uncore, PRIMARY_SPI_ADDRESS, oprom_offset + count);
+ data = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER);
+
+ if (data == *((const u32 *)"$VBT")) {
+ found = oprom_offset + count;
+ break;
+ }
+ }
+
+ if (count >= oprom_size)
+ goto err_not_found;
+
+ /* Get VBT size and allocate space for the VBT */
+ intel_uncore_write(&i915->uncore, PRIMARY_SPI_ADDRESS, found +
+ offsetof(struct vbt_header, vbt_size));
+ vbt_size = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER);
+ vbt_size &= 0xffff;
+
+ vbt = kzalloc(round_up(vbt_size, 4), GFP_KERNEL);
+ if (!vbt)
+ goto err_not_found;
+
+ for (count = 0; count < vbt_size; count += 4) {
+ intel_uncore_write(&i915->uncore, PRIMARY_SPI_ADDRESS, found + count);
+ data = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER);
+ *(vbt + store++) = data;
+ }
+
+ if (!intel_bios_is_valid_vbt(vbt, vbt_size))
+ goto err_free_vbt;
+
+ drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n");
+
+ return (struct vbt_header *)vbt;
+
+err_free_vbt:
+ kfree(vbt);
+err_not_found:
+ return NULL;
+}
+
+static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ void __iomem *p = NULL, *oprom;
+ struct vbt_header *vbt;
+ u16 vbt_size;
+ size_t i, size;
+
+ oprom = pci_map_rom(pdev, &size);
+ if (!oprom)
+ return NULL;
+
+ /* Scour memory looking for the VBT signature. */
+ for (i = 0; i + 4 < size; i += 4) {
+ if (ioread32(oprom + i) != *((const u32 *)"$VBT"))
+ continue;
+
+ p = oprom + i;
+ size -= i;
+ break;
+ }
+
+ if (!p)
+ goto err_unmap_oprom;
+
+ if (sizeof(struct vbt_header) > size) {
+ drm_dbg(&i915->drm, "VBT header incomplete\n");
+ goto err_unmap_oprom;
+ }
+
+ vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size));
+ if (vbt_size > size) {
+ drm_dbg(&i915->drm,
+ "VBT incomplete (vbt_size overflows)\n");
+ goto err_unmap_oprom;
+ }
+
+ /* The rest will be validated by intel_bios_is_valid_vbt() */
+ vbt = kmalloc(vbt_size, GFP_KERNEL);
+ if (!vbt)
+ goto err_unmap_oprom;
+
+ memcpy_fromio(vbt, p, vbt_size);
+
+ if (!intel_bios_is_valid_vbt(vbt, vbt_size))
+ goto err_free_vbt;
+
+ pci_unmap_rom(pdev, oprom);
+
+ drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n");
+
+ return vbt;
+
+err_free_vbt:
+ kfree(vbt);
+err_unmap_oprom:
+ pci_unmap_rom(pdev, oprom);
+
+ return NULL;
+}
+
+/**
+ * intel_bios_init - find VBT and initialize settings from the BIOS
+ * @i915: i915 device instance
+ *
+ * Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT
+ * was not found in ACPI OpRegion, try to find it in PCI ROM first. Also
+ * initialize some defaults if the VBT is not present at all.
+ */
+void intel_bios_init(struct drm_i915_private *i915)
+{
+ const struct vbt_header *vbt = i915->display.opregion.vbt;
+ struct vbt_header *oprom_vbt = NULL;
+ const struct bdb_header *bdb;
+
+ INIT_LIST_HEAD(&i915->display.vbt.display_devices);
+ INIT_LIST_HEAD(&i915->display.vbt.bdb_blocks);
+
+ if (!HAS_DISPLAY(i915)) {
+ drm_dbg_kms(&i915->drm,
+ "Skipping VBT init due to disabled display.\n");
+ return;
+ }
+
+ init_vbt_defaults(i915);
+
+ /*
+ * If the OpRegion does not have VBT, look in SPI flash through MMIO or
+ * PCI mapping
+ */
+ if (!vbt && IS_DGFX(i915)) {
+ oprom_vbt = spi_oprom_get_vbt(i915);
+ vbt = oprom_vbt;
+ }
+
+ if (!vbt) {
+ oprom_vbt = oprom_get_vbt(i915);
+ vbt = oprom_vbt;
+ }
+
+ if (!vbt)
+ goto out;
+
+ bdb = get_bdb_header(vbt);
+ i915->display.vbt.version = bdb->version;
+
+ drm_dbg_kms(&i915->drm,
+ "VBT signature \"%.*s\", BDB version %d\n",
+ (int)sizeof(vbt->signature), vbt->signature, i915->display.vbt.version);
+
+ init_bdb_blocks(i915, bdb);
+
+ /* Grab useful general definitions */
+ parse_general_features(i915);
+ parse_general_definitions(i915);
+ parse_driver_features(i915);
+
+ /* Depends on child device list */
+ parse_compression_parameters(i915);
+
+out:
+ if (!vbt) {
+ drm_info(&i915->drm,
+ "Failed to find VBIOS tables (VBT)\n");
+ init_vbt_missing_defaults(i915);
+ }
+
+ /* Further processing on pre-parsed or generated child device data */
+ parse_sdvo_device_mapping(i915);
+ parse_ddi_ports(i915);
+
+ kfree(oprom_vbt);
+}
+
+static void intel_bios_init_panel(struct drm_i915_private *i915,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid,
+ bool use_fallback)
+{
+ /* already have it? */
+ if (panel->vbt.panel_type >= 0) {
+ drm_WARN_ON(&i915->drm, !use_fallback);
+ return;
+ }
+
+ panel->vbt.panel_type = get_panel_type(i915, devdata,
+ edid, use_fallback);
+ if (panel->vbt.panel_type < 0) {
+ drm_WARN_ON(&i915->drm, use_fallback);
+ return;
+ }
+
+ init_vbt_panel_defaults(panel);
+
+ parse_panel_options(i915, panel);
+ parse_generic_dtd(i915, panel);
+ parse_lfp_data(i915, panel);
+ parse_lfp_backlight(i915, panel);
+ parse_sdvo_panel_data(i915, panel);
+ parse_panel_driver_features(i915, panel);
+ parse_power_conservation_features(i915, panel);
+ parse_edp(i915, panel);
+ parse_psr(i915, panel);
+ parse_mipi_config(i915, panel);
+ parse_mipi_sequence(i915, panel);
+}
+
+void intel_bios_init_panel_early(struct drm_i915_private *i915,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata)
+{
+ intel_bios_init_panel(i915, panel, devdata, NULL, false);
+}
+
+void intel_bios_init_panel_late(struct drm_i915_private *i915,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid)
+{
+ intel_bios_init_panel(i915, panel, devdata, edid, true);
+}
+
+/**
+ * intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
+ * @i915: i915 device instance
+ */
+void intel_bios_driver_remove(struct drm_i915_private *i915)
+{
+ struct intel_bios_encoder_data *devdata, *nd;
+ struct bdb_block_entry *entry, *ne;
+
+ list_for_each_entry_safe(devdata, nd, &i915->display.vbt.display_devices, node) {
+ list_del(&devdata->node);
+ kfree(devdata->dsc);
+ kfree(devdata);
+ }
+
+ list_for_each_entry_safe(entry, ne, &i915->display.vbt.bdb_blocks, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+}
+
+void intel_bios_fini_panel(struct intel_panel *panel)
+{
+ kfree(panel->vbt.sdvo_lvds_vbt_mode);
+ panel->vbt.sdvo_lvds_vbt_mode = NULL;
+ kfree(panel->vbt.lfp_lvds_vbt_mode);
+ panel->vbt.lfp_lvds_vbt_mode = NULL;
+ kfree(panel->vbt.dsi.data);
+ panel->vbt.dsi.data = NULL;
+ kfree(panel->vbt.dsi.pps);
+ panel->vbt.dsi.pps = NULL;
+ kfree(panel->vbt.dsi.config);
+ panel->vbt.dsi.config = NULL;
+ kfree(panel->vbt.dsi.deassert_seq);
+ panel->vbt.dsi.deassert_seq = NULL;
+}
+
+/**
+ * intel_bios_is_tv_present - is integrated TV present in VBT
+ * @i915: i915 device instance
+ *
+ * Return true if TV is present. If no child devices were parsed from VBT,
+ * assume TV is present.
+ */
+bool intel_bios_is_tv_present(struct drm_i915_private *i915)
+{
+ const struct intel_bios_encoder_data *devdata;
+ const struct child_device_config *child;
+
+ if (!i915->display.vbt.int_tv_support)
+ return false;
+
+ if (list_empty(&i915->display.vbt.display_devices))
+ return true;
+
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ child = &devdata->child;
+
+ /*
+ * If the device type is not TV, continue.
+ */
+ switch (child->device_type) {
+ case DEVICE_TYPE_INT_TV:
+ case DEVICE_TYPE_TV:
+ case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
+ break;
+ default:
+ continue;
+ }
+ /* Only when the addin_offset is non-zero, it is regarded
+ * as present.
+ */
+ if (child->addin_offset)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * intel_bios_is_lvds_present - is LVDS present in VBT
+ * @i915: i915 device instance
+ * @i2c_pin: i2c pin for LVDS if present
+ *
+ * Return true if LVDS is present. If no child devices were parsed from VBT,
+ * assume LVDS is present.
+ */
+bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
+{
+ const struct intel_bios_encoder_data *devdata;
+ const struct child_device_config *child;
+
+ if (list_empty(&i915->display.vbt.display_devices))
+ return true;
+
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ child = &devdata->child;
+
+ /* If the device type is not LFP, continue.
+ * We have to check both the new identifiers as well as the
+ * old for compatibility with some BIOSes.
+ */
+ if (child->device_type != DEVICE_TYPE_INT_LFP &&
+ child->device_type != DEVICE_TYPE_LFP)
+ continue;
+
+ if (intel_gmbus_is_valid_pin(i915, child->i2c_pin))
+ *i2c_pin = child->i2c_pin;
+
+ /* However, we cannot trust the BIOS writers to populate
+ * the VBT correctly. Since LVDS requires additional
+ * information from AIM blocks, a non-zero addin offset is
+ * a good indicator that the LVDS is actually present.
+ */
+ if (child->addin_offset)
+ return true;
+
+ /* But even then some BIOS writers perform some black magic
+ * and instantiate the device without reference to any
+ * additional data. Trust that if the VBT was written into
+ * the OpRegion then they have validated the LVDS's existence.
+ */
+ if (i915->display.opregion.vbt)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * intel_bios_is_port_present - is the specified digital port present
+ * @i915: i915 device instance
+ * @port: port to check
+ *
+ * Return true if the device in %port is present.
+ */
+bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port)
+{
+ if (WARN_ON(!has_ddi_port_info(i915)))
+ return true;
+
+ return i915->display.vbt.ports[port];
+}
+
+/**
+ * intel_bios_is_port_edp - is the device in given port eDP
+ * @i915: i915 device instance
+ * @port: port to check
+ *
+ * Return true if the device in %port is eDP.
+ */
+bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port)
+{
+ const struct intel_bios_encoder_data *devdata =
+ intel_bios_encoder_data_lookup(i915, port);
+
+ return devdata && intel_bios_encoder_supports_edp(devdata);
+}
+
+static bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata)
+{
+ const struct child_device_config *child = &devdata->child;
+
+ if (!intel_bios_encoder_supports_dp(devdata) ||
+ !intel_bios_encoder_supports_hdmi(devdata))
+ return false;
+
+ if (dvo_port_type(child->dvo_port) == DVO_PORT_DPA)
+ return true;
+
+ /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
+ if (dvo_port_type(child->dvo_port) == DVO_PORT_HDMIA &&
+ child->aux_channel != 0)
+ return true;
+
+ return false;
+}
+
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915,
+ enum port port)
+{
+ const struct intel_bios_encoder_data *devdata =
+ intel_bios_encoder_data_lookup(i915, port);
+
+ return devdata && intel_bios_encoder_supports_dp_dual_mode(devdata);
+}
+
+/**
+ * intel_bios_is_dsi_present - is DSI present in VBT
+ * @i915: i915 device instance
+ * @port: port for DSI if present
+ *
+ * Return true if DSI is present, and return the port in %port.
+ */
+bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
+ enum port *port)
+{
+ const struct intel_bios_encoder_data *devdata;
+ const struct child_device_config *child;
+ u8 dvo_port;
+
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ child = &devdata->child;
+
+ if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
+ continue;
+
+ dvo_port = child->dvo_port;
+
+ if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) {
+ drm_dbg_kms(&i915->drm,
+ "VBT has unsupported DSI port %c\n",
+ port_name(dvo_port - DVO_PORT_MIPIA));
+ continue;
+ }
+
+ if (port)
+ *port = dsi_dvo_port_to_port(i915, dvo_port);
+ return true;
+ }
+
+ return false;
+}
+
+static void fill_dsc(struct intel_crtc_state *crtc_state,
+ struct dsc_compression_parameters_entry *dsc,
+ int dsc_max_bpc)
+{
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ int bpc = 8;
+
+ vdsc_cfg->dsc_version_major = dsc->version_major;
+ vdsc_cfg->dsc_version_minor = dsc->version_minor;
+
+ if (dsc->support_12bpc && dsc_max_bpc >= 12)
+ bpc = 12;
+ else if (dsc->support_10bpc && dsc_max_bpc >= 10)
+ bpc = 10;
+ else if (dsc->support_8bpc && dsc_max_bpc >= 8)
+ bpc = 8;
+ else
+ DRM_DEBUG_KMS("VBT: Unsupported BPC %d for DCS\n",
+ dsc_max_bpc);
+
+ crtc_state->pipe_bpp = bpc * 3;
+
+ crtc_state->dsc.compressed_bpp = min(crtc_state->pipe_bpp,
+ VBT_DSC_MAX_BPP(dsc->max_bpp));
+
+ /*
+ * FIXME: This is ugly, and slice count should take DSC engine
+ * throughput etc. into account.
+ *
+ * Also, per spec DSI supports 1, 2, 3 or 4 horizontal slices.
+ */
+ if (dsc->slices_per_line & BIT(2)) {
+ crtc_state->dsc.slice_count = 4;
+ } else if (dsc->slices_per_line & BIT(1)) {
+ crtc_state->dsc.slice_count = 2;
+ } else {
+ /* FIXME */
+ if (!(dsc->slices_per_line & BIT(0)))
+ DRM_DEBUG_KMS("VBT: Unsupported DSC slice count for DSI\n");
+
+ crtc_state->dsc.slice_count = 1;
+ }
+
+ if (crtc_state->hw.adjusted_mode.crtc_hdisplay %
+ crtc_state->dsc.slice_count != 0)
+ DRM_DEBUG_KMS("VBT: DSC hdisplay %d not divisible by slice count %d\n",
+ crtc_state->hw.adjusted_mode.crtc_hdisplay,
+ crtc_state->dsc.slice_count);
+
+ /*
+ * The VBT rc_buffer_block_size and rc_buffer_size definitions
+ * correspond to DP 1.4 DPCD offsets 0x62 and 0x63.
+ */
+ vdsc_cfg->rc_model_size = drm_dsc_dp_rc_buffer_size(dsc->rc_buffer_block_size,
+ dsc->rc_buffer_size);
+
+ /* FIXME: DSI spec says bpc + 1 for this one */
+ vdsc_cfg->line_buf_depth = VBT_DSC_LINE_BUFFER_DEPTH(dsc->line_buffer_depth);
+
+ vdsc_cfg->block_pred_enable = dsc->block_prediction_enable;
+
+ vdsc_cfg->slice_height = dsc->slice_height;
+}
+
+/* FIXME: initially DSI specific */
+bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int dsc_max_bpc)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_bios_encoder_data *devdata;
+ const struct child_device_config *child;
+
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
+ child = &devdata->child;
+
+ if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
+ continue;
+
+ if (dsi_dvo_port_to_port(i915, child->dvo_port) == encoder->port) {
+ if (!devdata->dsc)
+ return false;
+
+ if (crtc_state)
+ fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc);
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * intel_bios_is_port_hpd_inverted - is HPD inverted for %port
+ * @i915: i915 device instance
+ * @port: port to check
+ *
+ * Return true if HPD should be inverted for %port.
+ */
+bool
+intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
+ enum port port)
+{
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
+
+ if (drm_WARN_ON_ONCE(&i915->drm,
+ !IS_GEMINILAKE(i915) && !IS_BROXTON(i915)))
+ return false;
+
+ return devdata && devdata->child.hpd_invert;
+}
+
+/**
+ * intel_bios_is_lspcon_present - if LSPCON is attached on %port
+ * @i915: i915 device instance
+ * @port: port to check
+ *
+ * Return true if LSPCON is present on this port
+ */
+bool
+intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
+ enum port port)
+{
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
+
+ return HAS_LSPCON(i915) && devdata && devdata->child.lspcon;
+}
+
+/**
+ * intel_bios_is_lane_reversal_needed - if lane reversal needed on port
+ * @i915: i915 device instance
+ * @port: port to check
+ *
+ * Return true if port requires lane reversal
+ */
+bool
+intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
+ enum port port)
+{
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
+
+ return devdata && devdata->child.lane_reversal;
+}
+
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
+ enum port port)
+{
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
+ enum aux_ch aux_ch;
+
+ if (!devdata || !devdata->child.aux_channel) {
+ aux_ch = (enum aux_ch)port;
+
+ drm_dbg_kms(&i915->drm,
+ "using AUX %c for port %c (platform default)\n",
+ aux_ch_name(aux_ch), port_name(port));
+ return aux_ch;
+ }
+
+ /*
+ * RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D
+ * map to DDI A,B,TC1,TC2 respectively.
+ *
+ * ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E
+ * map to DDI A,TC1,TC2,TC3,TC4 respectively.
+ */
+ switch (devdata->child.aux_channel) {
+ case DP_AUX_A:
+ aux_ch = AUX_CH_A;
+ break;
+ case DP_AUX_B:
+ if (IS_ALDERLAKE_S(i915))
+ aux_ch = AUX_CH_USBC1;
+ else
+ aux_ch = AUX_CH_B;
+ break;
+ case DP_AUX_C:
+ if (IS_ALDERLAKE_S(i915))
+ aux_ch = AUX_CH_USBC2;
+ else if (IS_DG1(i915) || IS_ROCKETLAKE(i915))
+ aux_ch = AUX_CH_USBC1;
+ else
+ aux_ch = AUX_CH_C;
+ break;
+ case DP_AUX_D:
+ if (DISPLAY_VER(i915) >= 13)
+ aux_ch = AUX_CH_D_XELPD;
+ else if (IS_ALDERLAKE_S(i915))
+ aux_ch = AUX_CH_USBC3;
+ else if (IS_DG1(i915) || IS_ROCKETLAKE(i915))
+ aux_ch = AUX_CH_USBC2;
+ else
+ aux_ch = AUX_CH_D;
+ break;
+ case DP_AUX_E:
+ if (DISPLAY_VER(i915) >= 13)
+ aux_ch = AUX_CH_E_XELPD;
+ else if (IS_ALDERLAKE_S(i915))
+ aux_ch = AUX_CH_USBC4;
+ else
+ aux_ch = AUX_CH_E;
+ break;
+ case DP_AUX_F:
+ if (DISPLAY_VER(i915) >= 13)
+ aux_ch = AUX_CH_USBC1;
+ else
+ aux_ch = AUX_CH_F;
+ break;
+ case DP_AUX_G:
+ if (DISPLAY_VER(i915) >= 13)
+ aux_ch = AUX_CH_USBC2;
+ else
+ aux_ch = AUX_CH_G;
+ break;
+ case DP_AUX_H:
+ if (DISPLAY_VER(i915) >= 13)
+ aux_ch = AUX_CH_USBC3;
+ else
+ aux_ch = AUX_CH_H;
+ break;
+ case DP_AUX_I:
+ if (DISPLAY_VER(i915) >= 13)
+ aux_ch = AUX_CH_USBC4;
+ else
+ aux_ch = AUX_CH_I;
+ break;
+ default:
+ MISSING_CASE(devdata->child.aux_channel);
+ aux_ch = AUX_CH_A;
+ break;
+ }
+
+ drm_dbg_kms(&i915->drm, "using AUX %c for port %c (VBT)\n",
+ aux_ch_name(aux_ch), port_name(port));
+
+ return aux_ch;
+}
+
+int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
+
+ return _intel_bios_max_tmds_clock(devdata);
+}
+
+/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */
+int intel_bios_hdmi_level_shift(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
+
+ return _intel_bios_hdmi_level_shift(devdata);
+}
+
+int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata)
+{
+ if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
+ return 0;
+
+ return translate_iboost(devdata->child.dp_iboost_level);
+}
+
+int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
+{
+ if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
+ return 0;
+
+ return translate_iboost(devdata->child.hdmi_iboost_level);
+}
+
+int intel_bios_dp_max_link_rate(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
+
+ return _intel_bios_dp_max_link_rate(devdata);
+}
+
+int intel_bios_dp_max_lane_count(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
+
+ return _intel_bios_dp_max_lane_count(devdata);
+}
+
+int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
+
+ if (!devdata || !devdata->child.ddc_pin)
+ return 0;
+
+ return map_ddc_pin(i915, devdata->child.ddc_pin);
+}
+
+bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata->i915->display.vbt.version >= 195 && devdata->child.dp_usb_type_c;
+}
+
+bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt;
+}
+
+const struct intel_bios_encoder_data *
+intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
+{
+ return i915->display.vbt.ports[port];
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
new file mode 100644
index 000000000..ff1fdd2e0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -0,0 +1,280 @@
+/*
+ * Copyright © 2016-2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * Please use intel_vbt_defs.h for VBT private data, to hide and abstract away
+ * the VBT from the rest of the driver. Add the parsed, clean data to struct
+ * intel_vbt_data within struct drm_i915_private.
+ */
+
+#ifndef _INTEL_BIOS_H_
+#define _INTEL_BIOS_H_
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct edid;
+struct intel_bios_encoder_data;
+struct intel_crtc_state;
+struct intel_encoder;
+struct intel_panel;
+enum port;
+
+enum intel_backlight_type {
+ INTEL_BACKLIGHT_PMIC,
+ INTEL_BACKLIGHT_LPSS,
+ INTEL_BACKLIGHT_DISPLAY_DDI,
+ INTEL_BACKLIGHT_DSI_DCS,
+ INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
+ INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE,
+};
+
+struct edp_power_seq {
+ u16 t1_t3;
+ u16 t8;
+ u16 t9;
+ u16 t10;
+ u16 t11_t12;
+} __packed;
+
+/*
+ * MIPI Sequence Block definitions
+ *
+ * Note the VBT spec has AssertReset / DeassertReset swapped from their
+ * usual naming, we use the proper names here to avoid confusion when
+ * reading the code.
+ */
+enum mipi_seq {
+ MIPI_SEQ_END = 0,
+ MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */
+ MIPI_SEQ_INIT_OTP,
+ MIPI_SEQ_DISPLAY_ON,
+ MIPI_SEQ_DISPLAY_OFF,
+ MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */
+ MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
+ MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
+ MIPI_SEQ_POWER_ON, /* sequence block v3+ */
+ MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
+ MIPI_SEQ_MAX
+};
+
+enum mipi_seq_element {
+ MIPI_SEQ_ELEM_END = 0,
+ MIPI_SEQ_ELEM_SEND_PKT,
+ MIPI_SEQ_ELEM_DELAY,
+ MIPI_SEQ_ELEM_GPIO,
+ MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
+ MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
+ MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
+ MIPI_SEQ_ELEM_MAX
+};
+
+#define MIPI_DSI_UNDEFINED_PANEL_ID 0
+#define MIPI_DSI_GENERIC_PANEL_ID 1
+
+struct mipi_config {
+ u16 panel_id;
+
+ /* General Params */
+ u32 enable_dithering:1;
+ u32 rsvd1:1;
+ u32 is_bridge:1;
+
+ u32 panel_arch_type:2;
+ u32 is_cmd_mode:1;
+
+#define NON_BURST_SYNC_PULSE 0x1
+#define NON_BURST_SYNC_EVENTS 0x2
+#define BURST_MODE 0x3
+ u32 video_transfer_mode:2;
+
+ u32 cabc_supported:1;
+#define PPS_BLC_PMIC 0
+#define PPS_BLC_SOC 1
+ u32 pwm_blc:1;
+
+ /* Bit 13:10 */
+#define PIXEL_FORMAT_RGB565 0x1
+#define PIXEL_FORMAT_RGB666 0x2
+#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
+#define PIXEL_FORMAT_RGB888 0x4
+ u32 videomode_color_format:4;
+
+ /* Bit 15:14 */
+#define ENABLE_ROTATION_0 0x0
+#define ENABLE_ROTATION_90 0x1
+#define ENABLE_ROTATION_180 0x2
+#define ENABLE_ROTATION_270 0x3
+ u32 rotation:2;
+ u32 bta_enabled:1;
+ u32 rsvd2:15;
+
+ /* 2 byte Port Description */
+#define DUAL_LINK_NOT_SUPPORTED 0
+#define DUAL_LINK_FRONT_BACK 1
+#define DUAL_LINK_PIXEL_ALT 2
+ u16 dual_link:2;
+ u16 lane_cnt:2;
+ u16 pixel_overlap:3;
+ u16 rgb_flip:1;
+#define DL_DCS_PORT_A 0x00
+#define DL_DCS_PORT_C 0x01
+#define DL_DCS_PORT_A_AND_C 0x02
+ u16 dl_dcs_cabc_ports:2;
+ u16 dl_dcs_backlight_ports:2;
+ u16 rsvd3:4;
+
+ u16 rsvd4;
+
+ u8 rsvd5;
+ u32 target_burst_mode_freq;
+ u32 dsi_ddr_clk;
+ u32 bridge_ref_clk;
+
+#define BYTE_CLK_SEL_20MHZ 0
+#define BYTE_CLK_SEL_10MHZ 1
+#define BYTE_CLK_SEL_5MHZ 2
+ u8 byte_clk_sel:2;
+
+ u8 rsvd6:6;
+
+ /* DPHY Flags */
+ u16 dphy_param_valid:1;
+ u16 eot_pkt_disabled:1;
+ u16 enable_clk_stop:1;
+ u16 rsvd7:13;
+
+ u32 hs_tx_timeout;
+ u32 lp_rx_timeout;
+ u32 turn_around_timeout;
+ u32 device_reset_timer;
+ u32 master_init_timer;
+ u32 dbi_bw_timer;
+ u32 lp_byte_clk_val;
+
+ /* 4 byte Dphy Params */
+ u32 prepare_cnt:6;
+ u32 rsvd8:2;
+ u32 clk_zero_cnt:8;
+ u32 trail_cnt:5;
+ u32 rsvd9:3;
+ u32 exit_zero_cnt:6;
+ u32 rsvd10:2;
+
+ u32 clk_lane_switch_cnt;
+ u32 hl_switch_cnt;
+
+ u32 rsvd11[6];
+
+ /* timings based on dphy spec */
+ u8 tclk_miss;
+ u8 tclk_post;
+ u8 rsvd12;
+ u8 tclk_pre;
+ u8 tclk_prepare;
+ u8 tclk_settle;
+ u8 tclk_term_enable;
+ u8 tclk_trail;
+ u16 tclk_prepare_clkzero;
+ u8 rsvd13;
+ u8 td_term_enable;
+ u8 teot;
+ u8 ths_exit;
+ u8 ths_prepare;
+ u16 ths_prepare_hszero;
+ u8 rsvd14;
+ u8 ths_settle;
+ u8 ths_skip;
+ u8 ths_trail;
+ u8 tinit;
+ u8 tlpx;
+ u8 rsvd15[3];
+
+ /* GPIOs */
+ u8 panel_enable;
+ u8 bl_enable;
+ u8 pwm_enable;
+ u8 reset_r_n;
+ u8 pwr_down_r;
+ u8 stdby_r_n;
+
+} __packed;
+
+/* all delays have a unit of 100us */
+struct mipi_pps_data {
+ u16 panel_on_delay;
+ u16 bl_enable_delay;
+ u16 bl_disable_delay;
+ u16 panel_off_delay;
+ u16 panel_power_cycle_delay;
+} __packed;
+
+void intel_bios_init(struct drm_i915_private *dev_priv);
+void intel_bios_init_panel_early(struct drm_i915_private *dev_priv,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata);
+void intel_bios_init_panel_late(struct drm_i915_private *dev_priv,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata,
+ const struct edid *edid);
+void intel_bios_fini_panel(struct intel_panel *panel);
+void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
+bool intel_bios_is_valid_vbt(const void *buf, size_t size);
+bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
+bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
+bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
+ enum port port);
+bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
+ enum port port);
+bool intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
+ enum port port);
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int dsc_max_bpc);
+int intel_bios_max_tmds_clock(struct intel_encoder *encoder);
+int intel_bios_hdmi_level_shift(struct intel_encoder *encoder);
+int intel_bios_dp_max_link_rate(struct intel_encoder *encoder);
+int intel_bios_dp_max_lane_count(struct intel_encoder *encoder);
+int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder);
+bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
+
+const struct intel_bios_encoder_data *
+intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port);
+
+bool intel_bios_encoder_supports_dvi(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_supports_hdmi(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata);
+int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata);
+int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
+
+#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
new file mode 100644
index 000000000..4ace026b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -0,0 +1,1203 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_atomic_state_helper.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_utils.h"
+#include "intel_atomic.h"
+#include "intel_bw.h"
+#include "intel_cdclk.h"
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "skl_watermark.h"
+#include "intel_mchbar_regs.h"
+#include "intel_pcode.h"
+
+/* Parameters for Qclk Geyserville (QGV) */
+struct intel_qgv_point {
+ u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
+};
+
+struct intel_psf_gv_point {
+ u8 clk; /* clock in multiples of 16.6666 MHz */
+};
+
+struct intel_qgv_info {
+ struct intel_qgv_point points[I915_NUM_QGV_POINTS];
+ struct intel_psf_gv_point psf_points[I915_NUM_PSF_GV_POINTS];
+ u8 num_points;
+ u8 num_psf_points;
+ u8 t_bl;
+ u8 max_numchannels;
+ u8 channel_width;
+ u8 deinterleave;
+};
+
+static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
+ struct intel_qgv_point *sp,
+ int point)
+{
+ u32 dclk_ratio, dclk_reference;
+ u32 val;
+
+ val = intel_uncore_read(&dev_priv->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
+ dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val);
+ if (val & DG1_QCLK_REFERENCE)
+ dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
+ else
+ dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
+ sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000);
+
+ val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
+ if (val & DG1_GEAR_TYPE)
+ sp->dclk *= 2;
+
+ if (sp->dclk == 0)
+ return -EINVAL;
+
+ val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
+ sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val);
+ sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val);
+
+ val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
+ sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val);
+ sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val);
+
+ sp->t_rc = sp->t_rp + sp->t_ras;
+
+ return 0;
+}
+
+static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
+ struct intel_qgv_point *sp,
+ int point)
+{
+ u32 val = 0, val2 = 0;
+ u16 dclk;
+ int ret;
+
+ ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
+ &val, &val2);
+ if (ret)
+ return ret;
+
+ dclk = val & 0xffff;
+ sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) > 11 ? 500 : 0), 1000);
+ sp->t_rp = (val & 0xff0000) >> 16;
+ sp->t_rcd = (val & 0xff000000) >> 24;
+
+ sp->t_rdpre = val2 & 0xff;
+ sp->t_ras = (val2 & 0xff00) >> 8;
+
+ sp->t_rc = sp->t_rp + sp->t_ras;
+
+ return 0;
+}
+
+static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
+ struct intel_psf_gv_point *points)
+{
+ u32 val = 0;
+ int ret;
+ int i;
+
+ ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < I915_NUM_PSF_GV_POINTS; i++) {
+ points[i].clk = val & 0xff;
+ val >>= 8;
+ }
+
+ return 0;
+}
+
+int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+ u32 points_mask)
+{
+ int ret;
+
+ /* bspec says to keep retrying for at least 1 ms */
+ ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ points_mask,
+ ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
+ ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
+ 1);
+
+ if (ret < 0) {
+ drm_err(&dev_priv->drm, "Failed to disable qgv points (%d) points: 0x%x\n", ret, points_mask);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
+ struct intel_qgv_point *sp, int point)
+{
+ u32 val, val2;
+ u16 dclk;
+
+ val = intel_uncore_read(&dev_priv->uncore,
+ MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
+ val2 = intel_uncore_read(&dev_priv->uncore,
+ MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
+ dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
+ sp->dclk = DIV_ROUND_UP((16667 * dclk), 1000);
+ sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val);
+ sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val);
+
+ sp->t_rdpre = REG_FIELD_GET(MTL_TRDPRE_MASK, val2);
+ sp->t_ras = REG_FIELD_GET(MTL_TRAS_MASK, val2);
+
+ sp->t_rc = sp->t_rp + sp->t_ras;
+
+ return 0;
+}
+
+static int
+intel_read_qgv_point_info(struct drm_i915_private *dev_priv,
+ struct intel_qgv_point *sp,
+ int point)
+{
+ if (DISPLAY_VER(dev_priv) >= 14)
+ return mtl_read_qgv_point_info(dev_priv, sp, point);
+ else if (IS_DG1(dev_priv))
+ return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point);
+ else
+ return icl_pcode_read_qgv_point_info(dev_priv, sp, point);
+}
+
+static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
+ struct intel_qgv_info *qi,
+ bool is_y_tile)
+{
+ const struct dram_info *dram_info = &dev_priv->dram_info;
+ int i, ret;
+
+ qi->num_points = dram_info->num_qgv_points;
+ qi->num_psf_points = dram_info->num_psf_gv_points;
+
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ switch (dram_info->type) {
+ case INTEL_DRAM_DDR4:
+ qi->t_bl = 4;
+ qi->max_numchannels = 2;
+ qi->channel_width = 64;
+ qi->deinterleave = 2;
+ break;
+ case INTEL_DRAM_DDR5:
+ qi->t_bl = 8;
+ qi->max_numchannels = 4;
+ qi->channel_width = 32;
+ qi->deinterleave = 2;
+ break;
+ case INTEL_DRAM_LPDDR4:
+ case INTEL_DRAM_LPDDR5:
+ qi->t_bl = 16;
+ qi->max_numchannels = 8;
+ qi->channel_width = 16;
+ qi->deinterleave = 4;
+ break;
+ default:
+ MISSING_CASE(dram_info->type);
+ return -EINVAL;
+ }
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
+ switch (dram_info->type) {
+ case INTEL_DRAM_DDR4:
+ qi->t_bl = is_y_tile ? 8 : 4;
+ qi->max_numchannels = 2;
+ qi->channel_width = 64;
+ qi->deinterleave = is_y_tile ? 1 : 2;
+ break;
+ case INTEL_DRAM_DDR5:
+ qi->t_bl = is_y_tile ? 16 : 8;
+ qi->max_numchannels = 4;
+ qi->channel_width = 32;
+ qi->deinterleave = is_y_tile ? 1 : 2;
+ break;
+ case INTEL_DRAM_LPDDR4:
+ if (IS_ROCKETLAKE(dev_priv)) {
+ qi->t_bl = 8;
+ qi->max_numchannels = 4;
+ qi->channel_width = 32;
+ qi->deinterleave = 2;
+ break;
+ }
+ fallthrough;
+ case INTEL_DRAM_LPDDR5:
+ qi->t_bl = 16;
+ qi->max_numchannels = 8;
+ qi->channel_width = 16;
+ qi->deinterleave = is_y_tile ? 2 : 4;
+ break;
+ default:
+ qi->t_bl = 16;
+ qi->max_numchannels = 1;
+ break;
+ }
+ } else if (DISPLAY_VER(dev_priv) == 11) {
+ qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
+ qi->max_numchannels = 1;
+ }
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ qi->num_points > ARRAY_SIZE(qi->points)))
+ qi->num_points = ARRAY_SIZE(qi->points);
+
+ for (i = 0; i < qi->num_points; i++) {
+ struct intel_qgv_point *sp = &qi->points[i];
+
+ ret = intel_read_qgv_point_info(dev_priv, sp, i);
+ if (ret)
+ return ret;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
+ i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
+ sp->t_rcd, sp->t_rc);
+ }
+
+ if (qi->num_psf_points > 0) {
+ ret = adls_pcode_read_psf_gv_point_info(dev_priv, qi->psf_points);
+ if (ret) {
+ drm_err(&dev_priv->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
+ qi->num_psf_points = 0;
+ }
+
+ for (i = 0; i < qi->num_psf_points; i++)
+ drm_dbg_kms(&dev_priv->drm,
+ "PSF GV %d: CLK=%d \n",
+ i, qi->psf_points[i].clk);
+ }
+
+ return 0;
+}
+
+static int adl_calc_psf_bw(int clk)
+{
+ /*
+ * clk is multiples of 16.666MHz (100/6)
+ * According to BSpec PSF GV bandwidth is
+ * calculated as BW = 64 * clk * 16.666Mhz
+ */
+ return DIV_ROUND_CLOSEST(64 * clk * 100, 6);
+}
+
+static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
+{
+ u16 dclk = 0;
+ int i;
+
+ for (i = 0; i < qi->num_points; i++)
+ dclk = max(dclk, qi->points[i].dclk);
+
+ return dclk;
+}
+
+struct intel_sa_info {
+ u16 displayrtids;
+ u8 deburst, deprogbwlimit, derating;
+};
+
+static const struct intel_sa_info icl_sa_info = {
+ .deburst = 8,
+ .deprogbwlimit = 25, /* GB/s */
+ .displayrtids = 128,
+ .derating = 10,
+};
+
+static const struct intel_sa_info tgl_sa_info = {
+ .deburst = 16,
+ .deprogbwlimit = 34, /* GB/s */
+ .displayrtids = 256,
+ .derating = 10,
+};
+
+static const struct intel_sa_info rkl_sa_info = {
+ .deburst = 8,
+ .deprogbwlimit = 20, /* GB/s */
+ .displayrtids = 128,
+ .derating = 10,
+};
+
+static const struct intel_sa_info adls_sa_info = {
+ .deburst = 16,
+ .deprogbwlimit = 38, /* GB/s */
+ .displayrtids = 256,
+ .derating = 10,
+};
+
+static const struct intel_sa_info adlp_sa_info = {
+ .deburst = 16,
+ .deprogbwlimit = 38, /* GB/s */
+ .displayrtids = 256,
+ .derating = 20,
+};
+
+static const struct intel_sa_info mtl_sa_info = {
+ .deburst = 32,
+ .deprogbwlimit = 38, /* GB/s */
+ .displayrtids = 256,
+ .derating = 20,
+};
+
+static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
+{
+ struct intel_qgv_info qi = {};
+ bool is_y_tile = true; /* assume y tile may be used */
+ int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
+ int ipqdepth, ipqdepthpch = 16;
+ int dclk_max;
+ int maxdebw;
+ int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
+ int i, ret;
+
+ ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to get memory subsystem information, ignoring bandwidth limits");
+ return ret;
+ }
+
+ dclk_max = icl_sagv_max_dclk(&qi);
+ maxdebw = min(sa->deprogbwlimit * 1000, dclk_max * 16 * 6 / 10);
+ ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
+ qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
+
+ for (i = 0; i < num_groups; i++) {
+ struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
+ int clpchgroup;
+ int j;
+
+ clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
+ bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
+
+ bi->num_qgv_points = qi.num_points;
+ bi->num_psf_gv_points = qi.num_psf_points;
+
+ for (j = 0; j < qi.num_points; j++) {
+ const struct intel_qgv_point *sp = &qi.points[j];
+ int ct, bw;
+
+ /*
+ * Max row cycle time
+ *
+ * FIXME what is the logic behind the
+ * assumed burst length?
+ */
+ ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
+ (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
+ bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
+
+ bi->deratedbw[j] = min(maxdebw,
+ bw * (100 - sa->derating) / 100);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
+ i, j, bi->num_planes, bi->deratedbw[j]);
+ }
+ }
+ /*
+ * In case if SAGV is disabled in BIOS, we always get 1
+ * SAGV point, but we can't send PCode commands to restrict it
+ * as it will fail and pointless anyway.
+ */
+ if (qi.num_points == 1)
+ dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ else
+ dev_priv->display.sagv.status = I915_SAGV_ENABLED;
+
+ return 0;
+}
+
+static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
+{
+ struct intel_qgv_info qi = {};
+ const struct dram_info *dram_info = &dev_priv->dram_info;
+ bool is_y_tile = true; /* assume y tile may be used */
+ int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels);
+ int ipqdepth, ipqdepthpch = 16;
+ int dclk_max;
+ int maxdebw, peakbw;
+ int clperchgroup;
+ int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
+ int i, ret;
+
+ ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to get memory subsystem information, ignoring bandwidth limits");
+ return ret;
+ }
+
+ if (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5)
+ num_channels *= 2;
+
+ qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
+
+ if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12)
+ qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
+
+ if (DISPLAY_VER(dev_priv) > 11 && num_channels > qi.max_numchannels)
+ drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels.");
+ if (qi.max_numchannels != 0)
+ num_channels = min_t(u8, num_channels, qi.max_numchannels);
+
+ dclk_max = icl_sagv_max_dclk(&qi);
+
+ peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max;
+ maxdebw = min(sa->deprogbwlimit * 1000, peakbw * 6 / 10); /* 60% */
+
+ ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
+ /*
+ * clperchgroup = 4kpagespermempage * clperchperblock,
+ * clperchperblock = 8 / num_channels * interleave
+ */
+ clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
+
+ for (i = 0; i < num_groups; i++) {
+ struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
+ struct intel_bw_info *bi_next;
+ int clpchgroup;
+ int j;
+
+ clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
+
+ if (i < num_groups - 1) {
+ bi_next = &dev_priv->display.bw.max[i + 1];
+
+ if (clpchgroup < clperchgroup)
+ bi_next->num_planes = (ipqdepth - clpchgroup) /
+ clpchgroup + 1;
+ else
+ bi_next->num_planes = 0;
+ }
+
+ bi->num_qgv_points = qi.num_points;
+ bi->num_psf_gv_points = qi.num_psf_points;
+
+ for (j = 0; j < qi.num_points; j++) {
+ const struct intel_qgv_point *sp = &qi.points[j];
+ int ct, bw;
+
+ /*
+ * Max row cycle time
+ *
+ * FIXME what is the logic behind the
+ * assumed burst length?
+ */
+ ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
+ (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
+ bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
+
+ bi->deratedbw[j] = min(maxdebw,
+ bw * (100 - sa->derating) / 100);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
+ i, j, bi->num_planes, bi->deratedbw[j]);
+ }
+
+ for (j = 0; j < qi.num_psf_points; j++) {
+ const struct intel_psf_gv_point *sp = &qi.psf_points[j];
+
+ bi->psf_bw[j] = adl_calc_psf_bw(sp->clk);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "BW%d / PSF GV %d: num_planes=%d bw=%u\n",
+ i, j, bi->num_planes, bi->psf_bw[j]);
+ }
+ }
+
+ /*
+ * In case if SAGV is disabled in BIOS, we always get 1
+ * SAGV point, but we can't send PCode commands to restrict it
+ * as it will fail and pointless anyway.
+ */
+ if (qi.num_points == 1)
+ dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ else
+ dev_priv->display.sagv.status = I915_SAGV_ENABLED;
+
+ return 0;
+}
+
+static void dg2_get_bw_info(struct drm_i915_private *i915)
+{
+ unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000;
+ int num_groups = ARRAY_SIZE(i915->display.bw.max);
+ int i;
+
+ /*
+ * DG2 doesn't have SAGV or QGV points, just a constant max bandwidth
+ * that doesn't depend on the number of planes enabled. So fill all the
+ * plane group with constant bw information for uniformity with other
+ * platforms. DG2-G10 platforms have a constant 50 GB/s bandwidth,
+ * whereas DG2-G11 platforms have 38 GB/s.
+ */
+ for (i = 0; i < num_groups; i++) {
+ struct intel_bw_info *bi = &i915->display.bw.max[i];
+
+ bi->num_planes = 1;
+ /* Need only one dummy QGV point per group */
+ bi->num_qgv_points = 1;
+ bi->deratedbw[0] = deratedbw;
+ }
+
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+}
+
+static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
+ int num_planes, int qgv_point)
+{
+ int i;
+
+ /*
+ * Let's return max bw for 0 planes
+ */
+ num_planes = max(1, num_planes);
+
+ for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) {
+ const struct intel_bw_info *bi =
+ &dev_priv->display.bw.max[i];
+
+ /*
+ * Pcode will not expose all QGV points when
+ * SAGV is forced to off/min/med/max.
+ */
+ if (qgv_point >= bi->num_qgv_points)
+ return UINT_MAX;
+
+ if (num_planes >= bi->num_planes)
+ return bi->deratedbw[qgv_point];
+ }
+
+ return 0;
+}
+
+static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv,
+ int num_planes, int qgv_point)
+{
+ int i;
+
+ /*
+ * Let's return max bw for 0 planes
+ */
+ num_planes = max(1, num_planes);
+
+ for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) {
+ const struct intel_bw_info *bi =
+ &dev_priv->display.bw.max[i];
+
+ /*
+ * Pcode will not expose all QGV points when
+ * SAGV is forced to off/min/med/max.
+ */
+ if (qgv_point >= bi->num_qgv_points)
+ return UINT_MAX;
+
+ if (num_planes <= bi->num_planes)
+ return bi->deratedbw[qgv_point];
+ }
+
+ return dev_priv->display.bw.max[0].deratedbw[qgv_point];
+}
+
+static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
+ int psf_gv_point)
+{
+ const struct intel_bw_info *bi =
+ &dev_priv->display.bw.max[0];
+
+ return bi->psf_bw[psf_gv_point];
+}
+
+void intel_bw_init_hw(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ if (DISPLAY_VER(dev_priv) >= 14)
+ tgl_get_bw_info(dev_priv, &mtl_sa_info);
+ else if (IS_DG2(dev_priv))
+ dg2_get_bw_info(dev_priv);
+ else if (IS_ALDERLAKE_P(dev_priv))
+ tgl_get_bw_info(dev_priv, &adlp_sa_info);
+ else if (IS_ALDERLAKE_S(dev_priv))
+ tgl_get_bw_info(dev_priv, &adls_sa_info);
+ else if (IS_ROCKETLAKE(dev_priv))
+ tgl_get_bw_info(dev_priv, &rkl_sa_info);
+ else if (DISPLAY_VER(dev_priv) == 12)
+ tgl_get_bw_info(dev_priv, &tgl_sa_info);
+ else if (DISPLAY_VER(dev_priv) == 11)
+ icl_get_bw_info(dev_priv, &icl_sa_info);
+}
+
+static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * We assume cursors are small enough
+ * to not not cause bandwidth problems.
+ */
+ return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
+}
+
+static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ unsigned int data_rate = 0;
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ /*
+ * We assume cursors are small enough
+ * to not not cause bandwidth problems.
+ */
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ data_rate += crtc_state->data_rate[plane_id];
+
+ if (DISPLAY_VER(i915) < 11)
+ data_rate += crtc_state->data_rate_y[plane_id];
+ }
+
+ return data_rate;
+}
+
+/* "Maximum Pipe Read Bandwidth" */
+static int intel_bw_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (DISPLAY_VER(i915) < 12)
+ return 0;
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(intel_bw_crtc_data_rate(crtc_state), 10), 512);
+}
+
+void intel_bw_crtc_update(struct intel_bw_state *bw_state,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ bw_state->data_rate[crtc->pipe] =
+ intel_bw_crtc_data_rate(crtc_state);
+ bw_state->num_active_planes[crtc->pipe] =
+ intel_bw_crtc_num_active_planes(crtc_state);
+
+ drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
+ pipe_name(crtc->pipe),
+ bw_state->data_rate[crtc->pipe],
+ bw_state->num_active_planes[crtc->pipe]);
+}
+
+static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
+ const struct intel_bw_state *bw_state)
+{
+ unsigned int num_active_planes = 0;
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe)
+ num_active_planes += bw_state->num_active_planes[pipe];
+
+ return num_active_planes;
+}
+
+static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
+ const struct intel_bw_state *bw_state)
+{
+ unsigned int data_rate = 0;
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe)
+ data_rate += bw_state->data_rate[pipe];
+
+ if (DISPLAY_VER(dev_priv) >= 13 && i915_vtd_active(dev_priv))
+ data_rate = DIV_ROUND_UP(data_rate * 105, 100);
+
+ return data_rate;
+}
+
+struct intel_bw_state *
+intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *bw_state;
+
+ bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj);
+
+ return to_intel_bw_state(bw_state);
+}
+
+struct intel_bw_state *
+intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *bw_state;
+
+ bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj);
+
+ return to_intel_bw_state(bw_state);
+}
+
+struct intel_bw_state *
+intel_atomic_get_bw_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *bw_state;
+
+ bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj);
+ if (IS_ERR(bw_state))
+ return ERR_CAST(bw_state);
+
+ return to_intel_bw_state(bw_state);
+}
+
+static bool intel_bw_state_changed(struct drm_i915_private *i915,
+ const struct intel_bw_state *old_bw_state,
+ const struct intel_bw_state *new_bw_state)
+{
+ enum pipe pipe;
+
+ for_each_pipe(i915, pipe) {
+ const struct intel_dbuf_bw *old_crtc_bw =
+ &old_bw_state->dbuf_bw[pipe];
+ const struct intel_dbuf_bw *new_crtc_bw =
+ &new_bw_state->dbuf_bw[pipe];
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(i915, slice) {
+ if (old_crtc_bw->max_bw[slice] != new_crtc_bw->max_bw[slice] ||
+ old_crtc_bw->active_planes[slice] != new_crtc_bw->active_planes[slice])
+ return true;
+ }
+
+ if (old_bw_state->min_cdclk[pipe] != new_bw_state->min_cdclk[pipe])
+ return true;
+ }
+
+ return false;
+}
+
+static void skl_plane_calc_dbuf_bw(struct intel_bw_state *bw_state,
+ struct intel_crtc *crtc,
+ enum plane_id plane_id,
+ const struct skl_ddb_entry *ddb,
+ unsigned int data_rate)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
+ unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(i915, ddb);
+ enum dbuf_slice slice;
+
+ /*
+ * The arbiter can only really guarantee an
+ * equal share of the total bw to each plane.
+ */
+ for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask) {
+ crtc_bw->max_bw[slice] = max(crtc_bw->max_bw[slice], data_rate);
+ crtc_bw->active_planes[slice] |= BIT(plane_id);
+ }
+}
+
+static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe];
+ enum plane_id plane_id;
+
+ memset(crtc_bw, 0, sizeof(*crtc_bw));
+
+ if (!crtc_state->hw.active)
+ return;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ /*
+ * We assume cursors are small enough
+ * to not cause bandwidth problems.
+ */
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ &crtc_state->wm.skl.plane_ddb[plane_id],
+ crtc_state->data_rate[plane_id]);
+
+ if (DISPLAY_VER(i915) < 11)
+ skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id,
+ &crtc_state->wm.skl.plane_ddb_y[plane_id],
+ crtc_state->data_rate[plane_id]);
+ }
+}
+
+/* "Maximum Data Buffer Bandwidth" */
+static int
+intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state)
+{
+ unsigned int total_max_bw = 0;
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(i915, slice) {
+ int num_active_planes = 0;
+ unsigned int max_bw = 0;
+ enum pipe pipe;
+
+ /*
+ * The arbiter can only really guarantee an
+ * equal share of the total bw to each plane.
+ */
+ for_each_pipe(i915, pipe) {
+ const struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[pipe];
+
+ max_bw = max(crtc_bw->max_bw[slice], max_bw);
+ num_active_planes += hweight8(crtc_bw->active_planes[slice]);
+ }
+ max_bw *= num_active_planes;
+
+ total_max_bw = max(total_max_bw, max_bw);
+ }
+
+ return DIV_ROUND_UP(total_max_bw, 64);
+}
+
+int intel_bw_min_cdclk(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state)
+{
+ enum pipe pipe;
+ int min_cdclk;
+
+ min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state);
+
+ for_each_pipe(i915, pipe)
+ min_cdclk = max(bw_state->min_cdclk[pipe], min_cdclk);
+
+ return min_cdclk;
+}
+
+int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_bw_state *new_bw_state = NULL;
+ const struct intel_bw_state *old_bw_state = NULL;
+ const struct intel_cdclk_state *cdclk_state;
+ const struct intel_crtc_state *crtc_state;
+ int old_min_cdclk, new_min_cdclk;
+ struct intel_crtc *crtc;
+ int i;
+
+ if (DISPLAY_VER(dev_priv) < 9)
+ return 0;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ skl_crtc_calc_dbuf_bw(new_bw_state, crtc_state);
+
+ new_bw_state->min_cdclk[crtc->pipe] =
+ intel_bw_crtc_min_cdclk(crtc_state);
+ }
+
+ if (!old_bw_state)
+ return 0;
+
+ if (intel_bw_state_changed(dev_priv, old_bw_state, new_bw_state)) {
+ int ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ old_min_cdclk = intel_bw_min_cdclk(dev_priv, old_bw_state);
+ new_min_cdclk = intel_bw_min_cdclk(dev_priv, new_bw_state);
+
+ /*
+ * No need to check against the cdclk state if
+ * the min cdclk doesn't increase.
+ *
+ * Ie. we only ever increase the cdclk due to bandwidth
+ * requirements. This can reduce back and forth
+ * display blinking due to constant cdclk changes.
+ */
+ if (new_min_cdclk <= old_min_cdclk)
+ return 0;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ /*
+ * No need to recalculate the cdclk state if
+ * the min cdclk doesn't increase.
+ *
+ * Ie. we only ever increase the cdclk due to bandwidth
+ * requirements. This can reduce back and forth
+ * display blinking due to constant cdclk changes.
+ */
+ if (new_min_cdclk <= cdclk_state->bw_min_cdclk)
+ return 0;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
+ new_min_cdclk, cdclk_state->bw_min_cdclk);
+ *need_cdclk_calc = true;
+
+ return 0;
+}
+
+static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
+{
+ unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
+ u16 qgv_points = 0, psf_points = 0;
+
+ /*
+ * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
+ * it with failure if we try masking any unadvertised points.
+ * So need to operate only with those returned from PCode.
+ */
+ if (num_qgv_points > 0)
+ qgv_points = GENMASK(num_qgv_points - 1, 0);
+
+ if (num_psf_gv_points > 0)
+ psf_points = GENMASK(num_psf_gv_points - 1, 0);
+
+ return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
+}
+
+static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ unsigned int old_data_rate =
+ intel_bw_crtc_data_rate(old_crtc_state);
+ unsigned int new_data_rate =
+ intel_bw_crtc_data_rate(new_crtc_state);
+ unsigned int old_active_planes =
+ intel_bw_crtc_num_active_planes(old_crtc_state);
+ unsigned int new_active_planes =
+ intel_bw_crtc_num_active_planes(new_crtc_state);
+ struct intel_bw_state *new_bw_state;
+
+ /*
+ * Avoid locking the bw state when
+ * nothing significant has changed.
+ */
+ if (old_data_rate == new_data_rate &&
+ old_active_planes == new_active_planes)
+ continue;
+
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ new_bw_state->data_rate[crtc->pipe] = new_data_rate;
+ new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
+
+ *changed = true;
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] data rate %u num active planes %u\n",
+ crtc->base.base.id, crtc->base.name,
+ new_bw_state->data_rate[crtc->pipe],
+ new_bw_state->num_active_planes[crtc->pipe]);
+ }
+
+ return 0;
+}
+
+int intel_bw_atomic_check(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_bw_state *old_bw_state;
+ struct intel_bw_state *new_bw_state;
+ unsigned int data_rate;
+ unsigned int num_active_planes;
+ int i, ret;
+ u16 qgv_points = 0, psf_points = 0;
+ unsigned int max_bw_point = 0, max_bw = 0;
+ unsigned int num_qgv_points = dev_priv->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = dev_priv->display.bw.max[0].num_psf_gv_points;
+ bool changed = false;
+
+ /* FIXME earlier gens need some checks too */
+ if (DISPLAY_VER(dev_priv) < 11)
+ return 0;
+
+ ret = intel_bw_check_data_rate(state, &changed);
+ if (ret)
+ return ret;
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+
+ if (new_bw_state &&
+ intel_can_enable_sagv(dev_priv, old_bw_state) !=
+ intel_can_enable_sagv(dev_priv, new_bw_state))
+ changed = true;
+
+ /*
+ * If none of our inputs (data rates, number of active
+ * planes, SAGV yes/no) changed then nothing to do here.
+ */
+ if (!changed)
+ return 0;
+
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+
+ data_rate = intel_bw_data_rate(dev_priv, new_bw_state);
+ data_rate = DIV_ROUND_UP(data_rate, 1000);
+
+ num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state);
+
+ for (i = 0; i < num_qgv_points; i++) {
+ unsigned int max_data_rate;
+
+ if (DISPLAY_VER(dev_priv) > 11)
+ max_data_rate = tgl_max_bw(dev_priv, num_active_planes, i);
+ else
+ max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
+ /*
+ * We need to know which qgv point gives us
+ * maximum bandwidth in order to disable SAGV
+ * if we find that we exceed SAGV block time
+ * with watermarks. By that moment we already
+ * have those, as it is calculated earlier in
+ * intel_atomic_check,
+ */
+ if (max_data_rate > max_bw) {
+ max_bw_point = i;
+ max_bw = max_data_rate;
+ }
+ if (max_data_rate >= data_rate)
+ qgv_points |= BIT(i);
+
+ drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
+ i, max_data_rate, data_rate);
+ }
+
+ for (i = 0; i < num_psf_gv_points; i++) {
+ unsigned int max_data_rate = adl_psf_bw(dev_priv, i);
+
+ if (max_data_rate >= data_rate)
+ psf_points |= BIT(i);
+
+ drm_dbg_kms(&dev_priv->drm, "PSF GV point %d: max bw %d"
+ " required %d\n",
+ i, max_data_rate, data_rate);
+ }
+
+ /*
+ * BSpec states that we always should have at least one allowed point
+ * left, so if we couldn't - simply reject the configuration for obvious
+ * reasons.
+ */
+ if (qgv_points == 0) {
+ drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
+ " bandwidth %d for display configuration(%d active planes).\n",
+ data_rate, num_active_planes);
+ return -EINVAL;
+ }
+
+ if (num_psf_gv_points > 0 && psf_points == 0) {
+ drm_dbg_kms(&dev_priv->drm, "No PSF GV points provide sufficient memory"
+ " bandwidth %d for display configuration(%d active planes).\n",
+ data_rate, num_active_planes);
+ return -EINVAL;
+ }
+
+ /*
+ * Leave only single point with highest bandwidth, if
+ * we can't enable SAGV due to the increased memory latency it may
+ * cause.
+ */
+ if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
+ qgv_points = BIT(max_bw_point);
+ drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
+ max_bw_point);
+ }
+
+ /*
+ * We store the ones which need to be masked as that is what PCode
+ * actually accepts as a parameter.
+ */
+ new_bw_state->qgv_points_mask =
+ ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
+ ADLS_PCODE_REQ_PSF_PT(psf_points)) &
+ icl_qgv_points_mask(dev_priv);
+
+ /*
+ * If the actual mask had changed we need to make sure that
+ * the commits are serialized(in case this is a nomodeset, nonblocking)
+ */
+ if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct intel_global_state *
+intel_bw_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_bw_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ return &state->base;
+}
+
+static void intel_bw_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_bw_funcs = {
+ .atomic_duplicate_state = intel_bw_duplicate_state,
+ .atomic_destroy_state = intel_bw_destroy_state,
+};
+
+int intel_bw_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_bw_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->display.bw.obj,
+ &state->base, &intel_bw_funcs);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
new file mode 100644
index 000000000..cb7ee3a24
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_BW_H__
+#define __INTEL_BW_H__
+
+#include <drm/drm_atomic.h>
+
+#include "intel_display.h"
+#include "intel_display_power.h"
+#include "intel_global_state.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc_state;
+
+struct intel_dbuf_bw {
+ unsigned int max_bw[I915_MAX_DBUF_SLICES];
+ u8 active_planes[I915_MAX_DBUF_SLICES];
+};
+
+struct intel_bw_state {
+ struct intel_global_state base;
+ struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
+
+ /*
+ * Contains a bit mask, used to determine, whether correspondent
+ * pipe allows SAGV or not.
+ */
+ u8 pipe_sagv_reject;
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
+
+ /*
+ * Current QGV points mask, which restricts
+ * some particular SAGV states, not to confuse
+ * with pipe_sagv_mask.
+ */
+ u16 qgv_points_mask;
+
+ int min_cdclk[I915_MAX_PIPES];
+ unsigned int data_rate[I915_MAX_PIPES];
+ u8 num_active_planes[I915_MAX_PIPES];
+};
+
+#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
+
+struct intel_bw_state *
+intel_atomic_get_old_bw_state(struct intel_atomic_state *state);
+
+struct intel_bw_state *
+intel_atomic_get_new_bw_state(struct intel_atomic_state *state);
+
+struct intel_bw_state *
+intel_atomic_get_bw_state(struct intel_atomic_state *state);
+
+void intel_bw_init_hw(struct drm_i915_private *dev_priv);
+int intel_bw_init(struct drm_i915_private *dev_priv);
+int intel_bw_atomic_check(struct intel_atomic_state *state);
+void intel_bw_crtc_update(struct intel_bw_state *bw_state,
+ const struct intel_crtc_state *crtc_state);
+int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+ u32 points_mask);
+int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc);
+int intel_bw_min_cdclk(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state);
+
+#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
new file mode 100644
index 000000000..25dcdde5f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -0,0 +1,3283 @@
+/*
+ * Copyright © 2006-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/time.h>
+
+#include "hsw_ips.h"
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_audio.h"
+#include "intel_bw.h"
+#include "intel_cdclk.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_mchbar_regs.h"
+#include "intel_pci_config.h"
+#include "intel_pcode.h"
+#include "intel_psr.h"
+#include "vlv_sideband.h"
+
+/**
+ * DOC: CDCLK / RAWCLK
+ *
+ * The display engine uses several different clocks to do its work. There
+ * are two main clocks involved that aren't directly related to the actual
+ * pixel clock or any symbol/bit clock of the actual output port. These
+ * are the core display clock (CDCLK) and RAWCLK.
+ *
+ * CDCLK clocks most of the display pipe logic, and thus its frequency
+ * must be high enough to support the rate at which pixels are flowing
+ * through the pipes. Downscaling must also be accounted as that increases
+ * the effective pixel rate.
+ *
+ * On several platforms the CDCLK frequency can be changed dynamically
+ * to minimize power consumption for a given display configuration.
+ * Typically changes to the CDCLK frequency require all the display pipes
+ * to be shut down while the frequency is being changed.
+ *
+ * On SKL+ the DMC will toggle the CDCLK off/on during DC5/6 entry/exit.
+ * DMC will not change the active CDCLK frequency however, so that part
+ * will still be performed by the driver directly.
+ *
+ * RAWCLK is a fixed frequency clock, often used by various auxiliary
+ * blocks such as AUX CH or backlight PWM. Hence the only thing we
+ * really need to know about RAWCLK is its frequency so that various
+ * dividers can be programmed correctly.
+ */
+
+struct intel_cdclk_funcs {
+ void (*get_cdclk)(struct drm_i915_private *i915,
+ struct intel_cdclk_config *cdclk_config);
+ void (*set_cdclk)(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe);
+ int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
+ u8 (*calc_voltage_level)(int cdclk);
+};
+
+void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ dev_priv->display.funcs.cdclk->get_cdclk(dev_priv, cdclk_config);
+}
+
+static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe)
+{
+ dev_priv->display.funcs.cdclk->set_cdclk(dev_priv, cdclk_config, pipe);
+}
+
+static int intel_cdclk_modeset_calc_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_config)
+{
+ return dev_priv->display.funcs.cdclk->modeset_calc_cdclk(cdclk_config);
+}
+
+static u8 intel_cdclk_calc_voltage_level(struct drm_i915_private *dev_priv,
+ int cdclk)
+{
+ return dev_priv->display.funcs.cdclk->calc_voltage_level(cdclk);
+}
+
+static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ cdclk_config->cdclk = 133333;
+}
+
+static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ cdclk_config->cdclk = 200000;
+}
+
+static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ cdclk_config->cdclk = 266667;
+}
+
+static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ cdclk_config->cdclk = 333333;
+}
+
+static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ cdclk_config->cdclk = 400000;
+}
+
+static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ cdclk_config->cdclk = 450000;
+}
+
+static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ u16 hpllcc = 0;
+
+ /*
+ * 852GM/852GMV only supports 133 MHz and the HPLLCC
+ * encoding is different :(
+ * FIXME is this the right way to detect 852GM/852GMV?
+ */
+ if (pdev->revision == 0x1) {
+ cdclk_config->cdclk = 133333;
+ return;
+ }
+
+ pci_bus_read_config_word(pdev->bus,
+ PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
+
+ /* Assume that the hardware is in the high speed state. This
+ * should be the default.
+ */
+ switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
+ case GC_CLOCK_133_200:
+ case GC_CLOCK_133_200_2:
+ case GC_CLOCK_100_200:
+ cdclk_config->cdclk = 200000;
+ break;
+ case GC_CLOCK_166_250:
+ cdclk_config->cdclk = 250000;
+ break;
+ case GC_CLOCK_100_133:
+ cdclk_config->cdclk = 133333;
+ break;
+ case GC_CLOCK_133_266:
+ case GC_CLOCK_133_266_2:
+ case GC_CLOCK_166_266:
+ cdclk_config->cdclk = 266667;
+ break;
+ }
+}
+
+static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ u16 gcfgc = 0;
+
+ pci_read_config_word(pdev, GCFGC, &gcfgc);
+
+ if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
+ cdclk_config->cdclk = 133333;
+ return;
+ }
+
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_333_320_MHZ:
+ cdclk_config->cdclk = 333333;
+ break;
+ default:
+ case GC_DISPLAY_CLOCK_190_200_MHZ:
+ cdclk_config->cdclk = 190000;
+ break;
+ }
+}
+
+static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ u16 gcfgc = 0;
+
+ pci_read_config_word(pdev, GCFGC, &gcfgc);
+
+ if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
+ cdclk_config->cdclk = 133333;
+ return;
+ }
+
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_333_320_MHZ:
+ cdclk_config->cdclk = 320000;
+ break;
+ default:
+ case GC_DISPLAY_CLOCK_190_200_MHZ:
+ cdclk_config->cdclk = 200000;
+ break;
+ }
+}
+
+static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
+{
+ static const unsigned int blb_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 4800000,
+ [4] = 6400000,
+ };
+ static const unsigned int pnv_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 4800000,
+ [4] = 2666667,
+ };
+ static const unsigned int cl_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 6400000,
+ [4] = 3333333,
+ [5] = 3566667,
+ [6] = 4266667,
+ };
+ static const unsigned int elk_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 4800000,
+ };
+ static const unsigned int ctg_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 6400000,
+ [4] = 2666667,
+ [5] = 4266667,
+ };
+ const unsigned int *vco_table;
+ unsigned int vco;
+ u8 tmp = 0;
+
+ /* FIXME other chipsets? */
+ if (IS_GM45(dev_priv))
+ vco_table = ctg_vco;
+ else if (IS_G45(dev_priv))
+ vco_table = elk_vco;
+ else if (IS_I965GM(dev_priv))
+ vco_table = cl_vco;
+ else if (IS_PINEVIEW(dev_priv))
+ vco_table = pnv_vco;
+ else if (IS_G33(dev_priv))
+ vco_table = blb_vco;
+ else
+ return 0;
+
+ tmp = intel_de_read(dev_priv,
+ IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
+
+ vco = vco_table[tmp & 0x7];
+ if (vco == 0)
+ drm_err(&dev_priv->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n",
+ tmp);
+ else
+ drm_dbg_kms(&dev_priv->drm, "HPLL VCO %u kHz\n", vco);
+
+ return vco;
+}
+
+static void g33_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 };
+ static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 };
+ static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 };
+ static const u8 div_5333[] = { 20, 16, 12, 12, 8, 28 };
+ const u8 *div_table;
+ unsigned int cdclk_sel;
+ u16 tmp = 0;
+
+ cdclk_config->vco = intel_hpll_vco(dev_priv);
+
+ pci_read_config_word(pdev, GCFGC, &tmp);
+
+ cdclk_sel = (tmp >> 4) & 0x7;
+
+ if (cdclk_sel >= ARRAY_SIZE(div_3200))
+ goto fail;
+
+ switch (cdclk_config->vco) {
+ case 3200000:
+ div_table = div_3200;
+ break;
+ case 4000000:
+ div_table = div_4000;
+ break;
+ case 4800000:
+ div_table = div_4800;
+ break;
+ case 5333333:
+ div_table = div_5333;
+ break;
+ default:
+ goto fail;
+ }
+
+ cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco,
+ div_table[cdclk_sel]);
+ return;
+
+fail:
+ drm_err(&dev_priv->drm,
+ "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n",
+ cdclk_config->vco, tmp);
+ cdclk_config->cdclk = 190476;
+}
+
+static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ u16 gcfgc = 0;
+
+ pci_read_config_word(pdev, GCFGC, &gcfgc);
+
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_267_MHZ_PNV:
+ cdclk_config->cdclk = 266667;
+ break;
+ case GC_DISPLAY_CLOCK_333_MHZ_PNV:
+ cdclk_config->cdclk = 333333;
+ break;
+ case GC_DISPLAY_CLOCK_444_MHZ_PNV:
+ cdclk_config->cdclk = 444444;
+ break;
+ case GC_DISPLAY_CLOCK_200_MHZ_PNV:
+ cdclk_config->cdclk = 200000;
+ break;
+ default:
+ drm_err(&dev_priv->drm,
+ "Unknown pnv display core clock 0x%04x\n", gcfgc);
+ fallthrough;
+ case GC_DISPLAY_CLOCK_133_MHZ_PNV:
+ cdclk_config->cdclk = 133333;
+ break;
+ case GC_DISPLAY_CLOCK_167_MHZ_PNV:
+ cdclk_config->cdclk = 166667;
+ break;
+ }
+}
+
+static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ static const u8 div_3200[] = { 16, 10, 8 };
+ static const u8 div_4000[] = { 20, 12, 10 };
+ static const u8 div_5333[] = { 24, 16, 14 };
+ const u8 *div_table;
+ unsigned int cdclk_sel;
+ u16 tmp = 0;
+
+ cdclk_config->vco = intel_hpll_vco(dev_priv);
+
+ pci_read_config_word(pdev, GCFGC, &tmp);
+
+ cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
+
+ if (cdclk_sel >= ARRAY_SIZE(div_3200))
+ goto fail;
+
+ switch (cdclk_config->vco) {
+ case 3200000:
+ div_table = div_3200;
+ break;
+ case 4000000:
+ div_table = div_4000;
+ break;
+ case 5333333:
+ div_table = div_5333;
+ break;
+ default:
+ goto fail;
+ }
+
+ cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco,
+ div_table[cdclk_sel]);
+ return;
+
+fail:
+ drm_err(&dev_priv->drm,
+ "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n",
+ cdclk_config->vco, tmp);
+ cdclk_config->cdclk = 200000;
+}
+
+static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ unsigned int cdclk_sel;
+ u16 tmp = 0;
+
+ cdclk_config->vco = intel_hpll_vco(dev_priv);
+
+ pci_read_config_word(pdev, GCFGC, &tmp);
+
+ cdclk_sel = (tmp >> 12) & 0x1;
+
+ switch (cdclk_config->vco) {
+ case 2666667:
+ case 4000000:
+ case 5333333:
+ cdclk_config->cdclk = cdclk_sel ? 333333 : 222222;
+ break;
+ case 3200000:
+ cdclk_config->cdclk = cdclk_sel ? 320000 : 228571;
+ break;
+ default:
+ drm_err(&dev_priv->drm,
+ "Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n",
+ cdclk_config->vco, tmp);
+ cdclk_config->cdclk = 222222;
+ break;
+ }
+}
+
+static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL);
+ u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
+
+ if (lcpll & LCPLL_CD_SOURCE_FCLK)
+ cdclk_config->cdclk = 800000;
+ else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ cdclk_config->cdclk = 450000;
+ else if (freq == LCPLL_CLK_FREQ_450)
+ cdclk_config->cdclk = 450000;
+ else if (IS_HSW_ULT(dev_priv))
+ cdclk_config->cdclk = 337500;
+ else
+ cdclk_config->cdclk = 540000;
+}
+
+static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
+{
+ int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ?
+ 333333 : 320000;
+
+ /*
+ * We seem to get an unstable or solid color picture at 200MHz.
+ * Not sure what's wrong. For now use 200MHz only when all pipes
+ * are off.
+ */
+ if (IS_VALLEYVIEW(dev_priv) && min_cdclk > freq_320)
+ return 400000;
+ else if (min_cdclk > 266667)
+ return freq_320;
+ else if (min_cdclk > 0)
+ return 266667;
+ else
+ return 200000;
+}
+
+static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
+{
+ if (IS_VALLEYVIEW(dev_priv)) {
+ if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
+ return 2;
+ else if (cdclk >= 266667)
+ return 1;
+ else
+ return 0;
+ } else {
+ /*
+ * Specs are full of misinformation, but testing on actual
+ * hardware has shown that we just need to write the desired
+ * CCK divider into the Punit register.
+ */
+ return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+ }
+}
+
+static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ u32 val;
+
+ vlv_iosf_sb_get(dev_priv,
+ BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
+
+ cdclk_config->vco = vlv_get_hpll_vco(dev_priv);
+ cdclk_config->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
+ CCK_DISPLAY_CLOCK_CONTROL,
+ cdclk_config->vco);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+
+ vlv_iosf_sb_put(dev_priv,
+ BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
+
+ if (IS_VALLEYVIEW(dev_priv))
+ cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >>
+ DSPFREQGUAR_SHIFT;
+ else
+ cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >>
+ DSPFREQGUAR_SHIFT_CHV;
+}
+
+static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
+{
+ unsigned int credits, default_credits;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ default_credits = PFI_CREDIT(12);
+ else
+ default_credits = PFI_CREDIT(8);
+
+ if (dev_priv->display.cdclk.hw.cdclk >= dev_priv->czclk_freq) {
+ /* CHV suggested value is 31 or 63 */
+ if (IS_CHERRYVIEW(dev_priv))
+ credits = PFI_CREDIT_63;
+ else
+ credits = PFI_CREDIT(15);
+ } else {
+ credits = default_credits;
+ }
+
+ /*
+ * WA - write default credits before re-programming
+ * FIXME: should we also set the resend bit here?
+ */
+ intel_de_write(dev_priv, GCI_CONTROL,
+ VGA_FAST_MODE_DISABLE | default_credits);
+
+ intel_de_write(dev_priv, GCI_CONTROL,
+ VGA_FAST_MODE_DISABLE | credits | PFI_CREDIT_RESEND);
+
+ /*
+ * FIXME is this guaranteed to clear
+ * immediately or should we poll for it?
+ */
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, GCI_CONTROL) & PFI_CREDIT_RESEND);
+}
+
+static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe)
+{
+ int cdclk = cdclk_config->cdclk;
+ u32 val, cmd = cdclk_config->voltage_level;
+ intel_wakeref_t wakeref;
+
+ switch (cdclk) {
+ case 400000:
+ case 333333:
+ case 320000:
+ case 266667:
+ case 200000:
+ break;
+ default:
+ MISSING_CASE(cdclk);
+ return;
+ }
+
+ /* There are cases where we can end up here with power domains
+ * off and a CDCLK frequency other than the minimum, like when
+ * issuing a modeset without actually changing any display after
+ * a system suspend. So grab the display core domain, which covers
+ * the HW blocks needed for the following programming.
+ */
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
+
+ vlv_iosf_sb_get(dev_priv,
+ BIT(VLV_IOSF_SB_CCK) |
+ BIT(VLV_IOSF_SB_BUNIT) |
+ BIT(VLV_IOSF_SB_PUNIT));
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val &= ~DSPFREQGUAR_MASK;
+ val |= (cmd << DSPFREQGUAR_SHIFT);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
+ DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
+ 50)) {
+ drm_err(&dev_priv->drm,
+ "timed out waiting for CDclk change\n");
+ }
+
+ if (cdclk == 400000) {
+ u32 divider;
+
+ divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1,
+ cdclk) - 1;
+
+ /* adjust cdclk divider */
+ val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ val &= ~CCK_FREQUENCY_VALUES;
+ val |= divider;
+ vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+
+ if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
+ CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
+ 50))
+ drm_err(&dev_priv->drm,
+ "timed out waiting for CDclk change\n");
+ }
+
+ /* adjust self-refresh exit latency value */
+ val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+ val &= ~0x7f;
+
+ /*
+ * For high bandwidth configs, we set a higher latency in the bunit
+ * so that the core display fetch happens in time to avoid underruns.
+ */
+ if (cdclk == 400000)
+ val |= 4500 / 250; /* 4.5 usec */
+ else
+ val |= 3000 / 250; /* 3.0 usec */
+ vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+
+ vlv_iosf_sb_put(dev_priv,
+ BIT(VLV_IOSF_SB_CCK) |
+ BIT(VLV_IOSF_SB_BUNIT) |
+ BIT(VLV_IOSF_SB_PUNIT));
+
+ intel_update_cdclk(dev_priv);
+
+ vlv_program_pfi_credits(dev_priv);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+}
+
+static void chv_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe)
+{
+ int cdclk = cdclk_config->cdclk;
+ u32 val, cmd = cdclk_config->voltage_level;
+ intel_wakeref_t wakeref;
+
+ switch (cdclk) {
+ case 333333:
+ case 320000:
+ case 266667:
+ case 200000:
+ break;
+ default:
+ MISSING_CASE(cdclk);
+ return;
+ }
+
+ /* There are cases where we can end up here with power domains
+ * off and a CDCLK frequency other than the minimum, like when
+ * issuing a modeset without actually changing any display after
+ * a system suspend. So grab the display core domain, which covers
+ * the HW blocks needed for the following programming.
+ */
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
+
+ vlv_punit_get(dev_priv);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ val &= ~DSPFREQGUAR_MASK_CHV;
+ val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
+ DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
+ 50)) {
+ drm_err(&dev_priv->drm,
+ "timed out waiting for CDclk change\n");
+ }
+
+ vlv_punit_put(dev_priv);
+
+ intel_update_cdclk(dev_priv);
+
+ vlv_program_pfi_credits(dev_priv);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+}
+
+static int bdw_calc_cdclk(int min_cdclk)
+{
+ if (min_cdclk > 540000)
+ return 675000;
+ else if (min_cdclk > 450000)
+ return 540000;
+ else if (min_cdclk > 337500)
+ return 450000;
+ else
+ return 337500;
+}
+
+static u8 bdw_calc_voltage_level(int cdclk)
+{
+ switch (cdclk) {
+ default:
+ case 337500:
+ return 2;
+ case 450000:
+ return 0;
+ case 540000:
+ return 1;
+ case 675000:
+ return 3;
+ }
+}
+
+static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL);
+ u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
+
+ if (lcpll & LCPLL_CD_SOURCE_FCLK)
+ cdclk_config->cdclk = 800000;
+ else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ cdclk_config->cdclk = 450000;
+ else if (freq == LCPLL_CLK_FREQ_450)
+ cdclk_config->cdclk = 450000;
+ else if (freq == LCPLL_CLK_FREQ_54O_BDW)
+ cdclk_config->cdclk = 540000;
+ else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
+ cdclk_config->cdclk = 337500;
+ else
+ cdclk_config->cdclk = 675000;
+
+ /*
+ * Can't read this out :( Let's assume it's
+ * at least what the CDCLK frequency requires.
+ */
+ cdclk_config->voltage_level =
+ bdw_calc_voltage_level(cdclk_config->cdclk);
+}
+
+static u32 bdw_cdclk_freq_sel(int cdclk)
+{
+ switch (cdclk) {
+ default:
+ MISSING_CASE(cdclk);
+ fallthrough;
+ case 337500:
+ return LCPLL_CLK_FREQ_337_5_BDW;
+ case 450000:
+ return LCPLL_CLK_FREQ_450;
+ case 540000:
+ return LCPLL_CLK_FREQ_54O_BDW;
+ case 675000:
+ return LCPLL_CLK_FREQ_675_BDW;
+ }
+}
+
+static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe)
+{
+ int cdclk = cdclk_config->cdclk;
+ int ret;
+
+ if (drm_WARN(&dev_priv->drm,
+ (intel_de_read(dev_priv, LCPLL_CTL) &
+ (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
+ LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
+ LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
+ LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
+ "trying to change cdclk frequency with cdclk not enabled\n"))
+ return;
+
+ ret = snb_pcode_write(&dev_priv->uncore, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "failed to inform pcode about cdclk change\n");
+ return;
+ }
+
+ intel_de_rmw(dev_priv, LCPLL_CTL,
+ 0, LCPLL_CD_SOURCE_FCLK);
+
+ /*
+ * According to the spec, it should be enough to poll for this 1 us.
+ * However, extensive testing shows that this can take longer.
+ */
+ if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 100))
+ drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
+
+ intel_de_rmw(dev_priv, LCPLL_CTL,
+ LCPLL_CLK_FREQ_MASK, bdw_cdclk_freq_sel(cdclk));
+
+ intel_de_rmw(dev_priv, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK, 0);
+
+ if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n");
+
+ snb_pcode_write(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_config->voltage_level);
+
+ intel_de_write(dev_priv, CDCLK_FREQ,
+ DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
+
+ intel_update_cdclk(dev_priv);
+}
+
+static int skl_calc_cdclk(int min_cdclk, int vco)
+{
+ if (vco == 8640000) {
+ if (min_cdclk > 540000)
+ return 617143;
+ else if (min_cdclk > 432000)
+ return 540000;
+ else if (min_cdclk > 308571)
+ return 432000;
+ else
+ return 308571;
+ } else {
+ if (min_cdclk > 540000)
+ return 675000;
+ else if (min_cdclk > 450000)
+ return 540000;
+ else if (min_cdclk > 337500)
+ return 450000;
+ else
+ return 337500;
+ }
+}
+
+static u8 skl_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 540000)
+ return 3;
+ else if (cdclk > 450000)
+ return 2;
+ else if (cdclk > 337500)
+ return 1;
+ else
+ return 0;
+}
+
+static void skl_dpll0_update(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ u32 val;
+
+ cdclk_config->ref = 24000;
+ cdclk_config->vco = 0;
+
+ val = intel_de_read(dev_priv, LCPLL1_CTL);
+ if ((val & LCPLL_PLL_ENABLE) == 0)
+ return;
+
+ if (drm_WARN_ON(&dev_priv->drm, (val & LCPLL_PLL_LOCK) == 0))
+ return;
+
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ (val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
+ DPLL_CTRL1_SSC(SKL_DPLL0) |
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
+ return;
+
+ switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
+ cdclk_config->vco = 8100000;
+ break;
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
+ cdclk_config->vco = 8640000;
+ break;
+ default:
+ MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
+ break;
+ }
+}
+
+static void skl_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ u32 cdctl;
+
+ skl_dpll0_update(dev_priv, cdclk_config);
+
+ cdclk_config->cdclk = cdclk_config->bypass = cdclk_config->ref;
+
+ if (cdclk_config->vco == 0)
+ goto out;
+
+ cdctl = intel_de_read(dev_priv, CDCLK_CTL);
+
+ if (cdclk_config->vco == 8640000) {
+ switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+ case CDCLK_FREQ_450_432:
+ cdclk_config->cdclk = 432000;
+ break;
+ case CDCLK_FREQ_337_308:
+ cdclk_config->cdclk = 308571;
+ break;
+ case CDCLK_FREQ_540:
+ cdclk_config->cdclk = 540000;
+ break;
+ case CDCLK_FREQ_675_617:
+ cdclk_config->cdclk = 617143;
+ break;
+ default:
+ MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
+ break;
+ }
+ } else {
+ switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+ case CDCLK_FREQ_450_432:
+ cdclk_config->cdclk = 450000;
+ break;
+ case CDCLK_FREQ_337_308:
+ cdclk_config->cdclk = 337500;
+ break;
+ case CDCLK_FREQ_540:
+ cdclk_config->cdclk = 540000;
+ break;
+ case CDCLK_FREQ_675_617:
+ cdclk_config->cdclk = 675000;
+ break;
+ default:
+ MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
+ break;
+ }
+ }
+
+ out:
+ /*
+ * Can't read this out :( Let's assume it's
+ * at least what the CDCLK frequency requires.
+ */
+ cdclk_config->voltage_level =
+ skl_calc_voltage_level(cdclk_config->cdclk);
+}
+
+/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
+static int skl_cdclk_decimal(int cdclk)
+{
+ return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
+}
+
+static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
+ int vco)
+{
+ bool changed = dev_priv->skl_preferred_vco_freq != vco;
+
+ dev_priv->skl_preferred_vco_freq = vco;
+
+ if (changed)
+ intel_update_max_cdclk(dev_priv);
+}
+
+static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco)
+{
+ drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
+
+ /*
+ * We always enable DPLL0 with the lowest link rate possible, but still
+ * taking into account the VCO required to operate the eDP panel at the
+ * desired frequency. The usual DP link rates operate with a VCO of
+ * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
+ * The modeset code is responsible for the selection of the exact link
+ * rate later on, with the constraint of choosing a frequency that
+ * works with vco.
+ */
+ if (vco == 8640000)
+ return DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0);
+ else
+ return DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0);
+}
+
+static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
+{
+ intel_de_rmw(dev_priv, DPLL_CTRL1,
+ DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
+ DPLL_CTRL1_SSC(SKL_DPLL0) |
+ DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0),
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0) |
+ skl_dpll0_link_rate(dev_priv, vco));
+ intel_de_posting_read(dev_priv, DPLL_CTRL1);
+
+ intel_de_rmw(dev_priv, LCPLL1_CTL,
+ 0, LCPLL_PLL_ENABLE);
+
+ if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
+ drm_err(&dev_priv->drm, "DPLL0 not locked\n");
+
+ dev_priv->display.cdclk.hw.vco = vco;
+
+ /* We'll want to keep using the current vco from now on. */
+ skl_set_preferred_cdclk_vco(dev_priv, vco);
+}
+
+static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
+{
+ intel_de_rmw(dev_priv, LCPLL1_CTL,
+ LCPLL_PLL_ENABLE, 0);
+
+ if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
+ drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n");
+
+ dev_priv->display.cdclk.hw.vco = 0;
+}
+
+static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv,
+ int cdclk, int vco)
+{
+ switch (cdclk) {
+ default:
+ drm_WARN_ON(&dev_priv->drm,
+ cdclk != dev_priv->display.cdclk.hw.bypass);
+ drm_WARN_ON(&dev_priv->drm, vco != 0);
+ fallthrough;
+ case 308571:
+ case 337500:
+ return CDCLK_FREQ_337_308;
+ case 450000:
+ case 432000:
+ return CDCLK_FREQ_450_432;
+ case 540000:
+ return CDCLK_FREQ_540;
+ case 617143:
+ case 675000:
+ return CDCLK_FREQ_675_617;
+ }
+}
+
+static void skl_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe)
+{
+ int cdclk = cdclk_config->cdclk;
+ int vco = cdclk_config->vco;
+ u32 freq_select, cdclk_ctl;
+ int ret;
+
+ /*
+ * Based on WA#1183 CDCLK rates 308 and 617MHz CDCLK rates are
+ * unsupported on SKL. In theory this should never happen since only
+ * the eDP1.4 2.16 and 4.32Gbps rates require it, but eDP1.4 is not
+ * supported on SKL either, see the above WA. WARN whenever trying to
+ * use the corresponding VCO freq as that always leads to using the
+ * minimum 308MHz CDCLK.
+ */
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ IS_SKYLAKE(dev_priv) && vco == 8640000);
+
+ ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "Failed to inform PCU about cdclk change (%d)\n", ret);
+ return;
+ }
+
+ freq_select = skl_cdclk_freq_sel(dev_priv, cdclk, vco);
+
+ if (dev_priv->display.cdclk.hw.vco != 0 &&
+ dev_priv->display.cdclk.hw.vco != vco)
+ skl_dpll0_disable(dev_priv);
+
+ cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL);
+
+ if (dev_priv->display.cdclk.hw.vco != vco) {
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
+ cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+ }
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+ intel_de_posting_read(dev_priv, CDCLK_CTL);
+
+ if (dev_priv->display.cdclk.hw.vco != vco)
+ skl_dpll0_enable(dev_priv, vco);
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+
+ cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+ intel_de_posting_read(dev_priv, CDCLK_CTL);
+
+ /* inform PCU of the change */
+ snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_config->voltage_level);
+
+ intel_update_cdclk(dev_priv);
+}
+
+static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
+{
+ u32 cdctl, expected;
+
+ /*
+ * check if the pre-os initialized the display
+ * There is SWF18 scratchpad register defined which is set by the
+ * pre-os which can be used by the OS drivers to check the status
+ */
+ if ((intel_de_read(dev_priv, SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
+ goto sanitize;
+
+ intel_update_cdclk(dev_priv);
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
+
+ /* Is PLL enabled and locked ? */
+ if (dev_priv->display.cdclk.hw.vco == 0 ||
+ dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass)
+ goto sanitize;
+
+ /* DPLL okay; verify the cdclock
+ *
+ * Noticed in some instances that the freq selection is correct but
+ * decimal part is programmed wrong from BIOS where pre-os does not
+ * enable display. Verify the same as well.
+ */
+ cdctl = intel_de_read(dev_priv, CDCLK_CTL);
+ expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
+ skl_cdclk_decimal(dev_priv->display.cdclk.hw.cdclk);
+ if (cdctl == expected)
+ /* All well; nothing to sanitize */
+ return;
+
+sanitize:
+ drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
+
+ /* force cdclk programming */
+ dev_priv->display.cdclk.hw.cdclk = 0;
+ /* force full PLL disable + enable */
+ dev_priv->display.cdclk.hw.vco = -1;
+}
+
+static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_config cdclk_config;
+
+ skl_sanitize_cdclk(dev_priv);
+
+ if (dev_priv->display.cdclk.hw.cdclk != 0 &&
+ dev_priv->display.cdclk.hw.vco != 0) {
+ /*
+ * Use the current vco as our initial
+ * guess as to what the preferred vco is.
+ */
+ if (dev_priv->skl_preferred_vco_freq == 0)
+ skl_set_preferred_cdclk_vco(dev_priv,
+ dev_priv->display.cdclk.hw.vco);
+ return;
+ }
+
+ cdclk_config = dev_priv->display.cdclk.hw;
+
+ cdclk_config.vco = dev_priv->skl_preferred_vco_freq;
+ if (cdclk_config.vco == 0)
+ cdclk_config.vco = 8100000;
+ cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco);
+ cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk);
+
+ skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
+}
+
+static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw;
+
+ cdclk_config.cdclk = cdclk_config.bypass;
+ cdclk_config.vco = 0;
+ cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk);
+
+ skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
+}
+
+static bool has_cdclk_squasher(struct drm_i915_private *i915)
+{
+ return IS_DG2(i915);
+}
+
+struct intel_cdclk_vals {
+ u32 cdclk;
+ u16 refclk;
+ u16 waveform;
+ u8 divider; /* CD2X divider * 2 */
+ u8 ratio;
+};
+
+static const struct intel_cdclk_vals bxt_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 384000, .divider = 3, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 576000, .divider = 2, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 624000, .divider = 2, .ratio = 65 },
+ {}
+};
+
+static const struct intel_cdclk_vals glk_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 79200, .divider = 8, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 158400, .divider = 4, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 316800, .divider = 2, .ratio = 33 },
+ {}
+};
+
+static const struct intel_cdclk_vals icl_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 },
+ { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 326400, .divider = 4, .ratio = 68 },
+ { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 180000, .divider = 2, .ratio = 15 },
+ { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 324000, .divider = 4, .ratio = 54 },
+ { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 },
+ { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ {}
+};
+
+static const struct intel_cdclk_vals rkl_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 172800, .divider = 4, .ratio = 36 },
+ { .refclk = 19200, .cdclk = 192000, .divider = 4, .ratio = 40 },
+ { .refclk = 19200, .cdclk = 307200, .divider = 4, .ratio = 64 },
+ { .refclk = 19200, .cdclk = 326400, .divider = 8, .ratio = 136 },
+ { .refclk = 19200, .cdclk = 556800, .divider = 4, .ratio = 116 },
+ { .refclk = 19200, .cdclk = 652800, .divider = 4, .ratio = 136 },
+
+ { .refclk = 24000, .cdclk = 180000, .divider = 4, .ratio = 30 },
+ { .refclk = 24000, .cdclk = 192000, .divider = 4, .ratio = 32 },
+ { .refclk = 24000, .cdclk = 312000, .divider = 4, .ratio = 52 },
+ { .refclk = 24000, .cdclk = 324000, .divider = 8, .ratio = 108 },
+ { .refclk = 24000, .cdclk = 552000, .divider = 4, .ratio = 92 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 4, .ratio = 108 },
+
+ { .refclk = 38400, .cdclk = 172800, .divider = 4, .ratio = 18 },
+ { .refclk = 38400, .cdclk = 192000, .divider = 4, .ratio = 20 },
+ { .refclk = 38400, .cdclk = 307200, .divider = 4, .ratio = 32 },
+ { .refclk = 38400, .cdclk = 326400, .divider = 8, .ratio = 68 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 4, .ratio = 58 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 4, .ratio = 68 },
+ {}
+};
+
+static const struct intel_cdclk_vals adlp_a_step_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
+ { .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ {}
+};
+
+static const struct intel_cdclk_vals adlp_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 },
+ { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 },
+ { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
+ { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ {}
+};
+
+static const struct intel_cdclk_vals dg2_cdclk_table[] = {
+ { .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 },
+ { .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 },
+ { .refclk = 38400, .cdclk = 244800, .divider = 2, .ratio = 34, .waveform = 0xa4a4 },
+ { .refclk = 38400, .cdclk = 285600, .divider = 2, .ratio = 34, .waveform = 0xa54a },
+ { .refclk = 38400, .cdclk = 326400, .divider = 2, .ratio = 34, .waveform = 0xaaaa },
+ { .refclk = 38400, .cdclk = 367200, .divider = 2, .ratio = 34, .waveform = 0xad5a },
+ { .refclk = 38400, .cdclk = 408000, .divider = 2, .ratio = 34, .waveform = 0xb6b6 },
+ { .refclk = 38400, .cdclk = 448800, .divider = 2, .ratio = 34, .waveform = 0xdbb6 },
+ { .refclk = 38400, .cdclk = 489600, .divider = 2, .ratio = 34, .waveform = 0xeeee },
+ { .refclk = 38400, .cdclk = 530400, .divider = 2, .ratio = 34, .waveform = 0xf7de },
+ { .refclk = 38400, .cdclk = 571200, .divider = 2, .ratio = 34, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 612000, .divider = 2, .ratio = 34, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0xffff },
+ {}
+};
+
+static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
+{
+ const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
+ int i;
+
+ for (i = 0; table[i].refclk; i++)
+ if (table[i].refclk == dev_priv->display.cdclk.hw.ref &&
+ table[i].cdclk >= min_cdclk)
+ return table[i].cdclk;
+
+ drm_WARN(&dev_priv->drm, 1,
+ "Cannot satisfy minimum cdclk %d with refclk %u\n",
+ min_cdclk, dev_priv->display.cdclk.hw.ref);
+ return 0;
+}
+
+static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+{
+ const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
+ int i;
+
+ if (cdclk == dev_priv->display.cdclk.hw.bypass)
+ return 0;
+
+ for (i = 0; table[i].refclk; i++)
+ if (table[i].refclk == dev_priv->display.cdclk.hw.ref &&
+ table[i].cdclk == cdclk)
+ return dev_priv->display.cdclk.hw.ref * table[i].ratio;
+
+ drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
+ cdclk, dev_priv->display.cdclk.hw.ref);
+ return 0;
+}
+
+static u8 bxt_calc_voltage_level(int cdclk)
+{
+ return DIV_ROUND_UP(cdclk, 25000);
+}
+
+static u8 icl_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 556800)
+ return 2;
+ else if (cdclk > 312000)
+ return 1;
+ else
+ return 0;
+}
+
+static u8 ehl_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 326400)
+ return 3;
+ else if (cdclk > 312000)
+ return 2;
+ else if (cdclk > 180000)
+ return 1;
+ else
+ return 0;
+}
+
+static u8 tgl_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 556800)
+ return 3;
+ else if (cdclk > 326400)
+ return 2;
+ else if (cdclk > 312000)
+ return 1;
+ else
+ return 0;
+}
+
+static void icl_readout_refclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ u32 dssm = intel_de_read(dev_priv, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
+
+ switch (dssm) {
+ default:
+ MISSING_CASE(dssm);
+ fallthrough;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
+ cdclk_config->ref = 24000;
+ break;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
+ cdclk_config->ref = 19200;
+ break;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
+ cdclk_config->ref = 38400;
+ break;
+ }
+}
+
+static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ u32 val, ratio;
+
+ if (IS_DG2(dev_priv))
+ cdclk_config->ref = 38400;
+ else if (DISPLAY_VER(dev_priv) >= 11)
+ icl_readout_refclk(dev_priv, cdclk_config);
+ else
+ cdclk_config->ref = 19200;
+
+ val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE);
+ if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
+ (val & BXT_DE_PLL_LOCK) == 0) {
+ /*
+ * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
+ * setting it to zero is a way to signal that.
+ */
+ cdclk_config->vco = 0;
+ return;
+ }
+
+ /*
+ * DISPLAY_VER >= 11 have the ratio directly in the PLL enable register,
+ * gen9lp had it in a separate PLL control register.
+ */
+ if (DISPLAY_VER(dev_priv) >= 11)
+ ratio = val & ICL_CDCLK_PLL_RATIO_MASK;
+ else
+ ratio = intel_de_read(dev_priv, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
+
+ cdclk_config->vco = ratio * cdclk_config->ref;
+}
+
+static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ u32 squash_ctl = 0;
+ u32 divider;
+ int div;
+
+ bxt_de_pll_readout(dev_priv, cdclk_config);
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ cdclk_config->bypass = cdclk_config->ref / 2;
+ else if (DISPLAY_VER(dev_priv) >= 11)
+ cdclk_config->bypass = 50000;
+ else
+ cdclk_config->bypass = cdclk_config->ref;
+
+ if (cdclk_config->vco == 0) {
+ cdclk_config->cdclk = cdclk_config->bypass;
+ goto out;
+ }
+
+ divider = intel_de_read(dev_priv, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
+
+ switch (divider) {
+ case BXT_CDCLK_CD2X_DIV_SEL_1:
+ div = 2;
+ break;
+ case BXT_CDCLK_CD2X_DIV_SEL_1_5:
+ div = 3;
+ break;
+ case BXT_CDCLK_CD2X_DIV_SEL_2:
+ div = 4;
+ break;
+ case BXT_CDCLK_CD2X_DIV_SEL_4:
+ div = 8;
+ break;
+ default:
+ MISSING_CASE(divider);
+ return;
+ }
+
+ if (has_cdclk_squasher(dev_priv))
+ squash_ctl = intel_de_read(dev_priv, CDCLK_SQUASH_CTL);
+
+ if (squash_ctl & CDCLK_SQUASH_ENABLE) {
+ u16 waveform;
+ int size;
+
+ size = REG_FIELD_GET(CDCLK_SQUASH_WINDOW_SIZE_MASK, squash_ctl) + 1;
+ waveform = REG_FIELD_GET(CDCLK_SQUASH_WAVEFORM_MASK, squash_ctl) >> (16 - size);
+
+ cdclk_config->cdclk = DIV_ROUND_CLOSEST(hweight16(waveform) *
+ cdclk_config->vco, size * div);
+ } else {
+ cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div);
+ }
+
+ out:
+ /*
+ * Can't read this out :( Let's assume it's
+ * at least what the CDCLK frequency requires.
+ */
+ cdclk_config->voltage_level =
+ intel_cdclk_calc_voltage_level(dev_priv, cdclk_config->cdclk);
+}
+
+static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
+{
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, 0);
+
+ /* Timeout 200us */
+ if (intel_de_wait_for_clear(dev_priv,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n");
+
+ dev_priv->display.cdclk.hw.vco = 0;
+}
+
+static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
+{
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref);
+
+ intel_de_rmw(dev_priv, BXT_DE_PLL_CTL,
+ BXT_DE_PLL_RATIO_MASK, BXT_DE_PLL_RATIO(ratio));
+
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+
+ /* Timeout 200us */
+ if (intel_de_wait_for_set(dev_priv,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n");
+
+ dev_priv->display.cdclk.hw.vco = vco;
+}
+
+static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
+{
+ intel_de_rmw(dev_priv, BXT_DE_PLL_ENABLE,
+ BXT_DE_PLL_PLL_ENABLE, 0);
+
+ /* Timeout 200us */
+ if (intel_de_wait_for_clear(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL unlock\n");
+
+ dev_priv->display.cdclk.hw.vco = 0;
+}
+
+static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
+{
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref);
+ u32 val;
+
+ val = ICL_CDCLK_PLL_RATIO(ratio);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+
+ val |= BXT_DE_PLL_PLL_ENABLE;
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+
+ /* Timeout 200us */
+ if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL lock\n");
+
+ dev_priv->display.cdclk.hw.vco = vco;
+}
+
+static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco)
+{
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref);
+ u32 val;
+
+ /* Write PLL ratio without disabling */
+ val = ICL_CDCLK_PLL_RATIO(ratio) | BXT_DE_PLL_PLL_ENABLE;
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+
+ /* Submit freq change request */
+ val |= BXT_DE_PLL_FREQ_REQ;
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+
+ /* Timeout 200us */
+ if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE,
+ BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1))
+ drm_err(&dev_priv->drm, "timeout waiting for FREQ change request ack\n");
+
+ val &= ~BXT_DE_PLL_FREQ_REQ;
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+
+ dev_priv->display.cdclk.hw.vco = vco;
+}
+
+static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ if (pipe == INVALID_PIPE)
+ return TGL_CDCLK_CD2X_PIPE_NONE;
+ else
+ return TGL_CDCLK_CD2X_PIPE(pipe);
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
+ if (pipe == INVALID_PIPE)
+ return ICL_CDCLK_CD2X_PIPE_NONE;
+ else
+ return ICL_CDCLK_CD2X_PIPE(pipe);
+ } else {
+ if (pipe == INVALID_PIPE)
+ return BXT_CDCLK_CD2X_PIPE_NONE;
+ else
+ return BXT_CDCLK_CD2X_PIPE(pipe);
+ }
+}
+
+static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
+ int cdclk, int vco)
+{
+ /* cdclk = vco / 2 / div{1,1.5,2,4} */
+ switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
+ default:
+ drm_WARN_ON(&dev_priv->drm,
+ cdclk != dev_priv->display.cdclk.hw.bypass);
+ drm_WARN_ON(&dev_priv->drm, vco != 0);
+ fallthrough;
+ case 2:
+ return BXT_CDCLK_CD2X_DIV_SEL_1;
+ case 3:
+ return BXT_CDCLK_CD2X_DIV_SEL_1_5;
+ case 4:
+ return BXT_CDCLK_CD2X_DIV_SEL_2;
+ case 8:
+ return BXT_CDCLK_CD2X_DIV_SEL_4;
+ }
+}
+
+static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv,
+ int cdclk)
+{
+ const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
+ int i;
+
+ if (cdclk == dev_priv->display.cdclk.hw.bypass)
+ return 0;
+
+ for (i = 0; table[i].refclk; i++)
+ if (table[i].refclk == dev_priv->display.cdclk.hw.ref &&
+ table[i].cdclk == cdclk)
+ return table[i].waveform;
+
+ drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
+ cdclk, dev_priv->display.cdclk.hw.ref);
+
+ return 0xffff;
+}
+
+static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe)
+{
+ int cdclk = cdclk_config->cdclk;
+ int vco = cdclk_config->vco;
+ u32 val;
+ u16 waveform;
+ int clock;
+ int ret;
+
+ /* Inform power controller of upcoming frequency change. */
+ if (DISPLAY_VER(dev_priv) >= 11)
+ ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ else
+ /*
+ * BSpec requires us to wait up to 150usec, but that leads to
+ * timeouts; the 2ms used here is based on experiment.
+ */
+ ret = snb_pcode_write_timeout(&dev_priv->uncore,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 150, 2);
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "Failed to inform PCU about cdclk change (err %d, freq %d)\n",
+ ret, cdclk);
+ return;
+ }
+
+ if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0) {
+ if (dev_priv->display.cdclk.hw.vco != vco)
+ adlp_cdclk_pll_crawl(dev_priv, vco);
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
+ if (dev_priv->display.cdclk.hw.vco != 0 &&
+ dev_priv->display.cdclk.hw.vco != vco)
+ icl_cdclk_pll_disable(dev_priv);
+
+ if (dev_priv->display.cdclk.hw.vco != vco)
+ icl_cdclk_pll_enable(dev_priv, vco);
+ } else {
+ if (dev_priv->display.cdclk.hw.vco != 0 &&
+ dev_priv->display.cdclk.hw.vco != vco)
+ bxt_de_pll_disable(dev_priv);
+
+ if (dev_priv->display.cdclk.hw.vco != vco)
+ bxt_de_pll_enable(dev_priv, vco);
+ }
+
+ waveform = cdclk_squash_waveform(dev_priv, cdclk);
+
+ if (waveform)
+ clock = vco / 2;
+ else
+ clock = cdclk;
+
+ if (has_cdclk_squasher(dev_priv)) {
+ u32 squash_ctl = 0;
+
+ if (waveform)
+ squash_ctl = CDCLK_SQUASH_ENABLE |
+ CDCLK_SQUASH_WINDOW_SIZE(0xf) | waveform;
+
+ intel_de_write(dev_priv, CDCLK_SQUASH_CTL, squash_ctl);
+ }
+
+ val = bxt_cdclk_cd2x_div_sel(dev_priv, clock, vco) |
+ bxt_cdclk_cd2x_pipe(dev_priv, pipe) |
+ skl_cdclk_decimal(cdclk);
+
+ /*
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
+ */
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ cdclk >= 500000)
+ val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+ intel_de_write(dev_priv, CDCLK_CTL, val);
+
+ if (pipe != INVALID_PIPE)
+ intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
+
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_config->voltage_level);
+ } else {
+ /*
+ * The timeout isn't specified, the 2ms used here is based on
+ * experiment.
+ * FIXME: Waiting for the request completion could be delayed
+ * until the next PCODE request based on BSpec.
+ */
+ ret = snb_pcode_write_timeout(&dev_priv->uncore,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_config->voltage_level,
+ 150, 2);
+ }
+
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "PCode CDCLK freq set failed, (err %d, freq %d)\n",
+ ret, cdclk);
+ return;
+ }
+
+ intel_update_cdclk(dev_priv);
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ /*
+ * Can't read out the voltage level :(
+ * Let's just assume everything is as expected.
+ */
+ dev_priv->display.cdclk.hw.voltage_level = cdclk_config->voltage_level;
+}
+
+static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
+{
+ u32 cdctl, expected;
+ int cdclk, clock, vco;
+
+ intel_update_cdclk(dev_priv);
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
+
+ if (dev_priv->display.cdclk.hw.vco == 0 ||
+ dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass)
+ goto sanitize;
+
+ /* DPLL okay; verify the cdclock
+ *
+ * Some BIOS versions leave an incorrect decimal frequency value and
+ * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
+ * so sanitize this register.
+ */
+ cdctl = intel_de_read(dev_priv, CDCLK_CTL);
+ /*
+ * Let's ignore the pipe field, since BIOS could have configured the
+ * dividers both synching to an active pipe, or asynchronously
+ * (PIPE_NONE).
+ */
+ cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
+
+ /* Make sure this is a legal cdclk value for the platform */
+ cdclk = bxt_calc_cdclk(dev_priv, dev_priv->display.cdclk.hw.cdclk);
+ if (cdclk != dev_priv->display.cdclk.hw.cdclk)
+ goto sanitize;
+
+ /* Make sure the VCO is correct for the cdclk */
+ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
+ if (vco != dev_priv->display.cdclk.hw.vco)
+ goto sanitize;
+
+ expected = skl_cdclk_decimal(cdclk);
+
+ /* Figure out what CD2X divider we should be using for this cdclk */
+ if (has_cdclk_squasher(dev_priv))
+ clock = dev_priv->display.cdclk.hw.vco / 2;
+ else
+ clock = dev_priv->display.cdclk.hw.cdclk;
+
+ expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock,
+ dev_priv->display.cdclk.hw.vco);
+
+ /*
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
+ */
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ dev_priv->display.cdclk.hw.cdclk >= 500000)
+ expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+
+ if (cdctl == expected)
+ /* All well; nothing to sanitize */
+ return;
+
+sanitize:
+ drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
+
+ /* force cdclk programming */
+ dev_priv->display.cdclk.hw.cdclk = 0;
+
+ /* force full PLL disable + enable */
+ dev_priv->display.cdclk.hw.vco = -1;
+}
+
+static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_config cdclk_config;
+
+ bxt_sanitize_cdclk(dev_priv);
+
+ if (dev_priv->display.cdclk.hw.cdclk != 0 &&
+ dev_priv->display.cdclk.hw.vco != 0)
+ return;
+
+ cdclk_config = dev_priv->display.cdclk.hw;
+
+ /*
+ * FIXME:
+ * - The initial CDCLK needs to be read from VBT.
+ * Need to make this change after VBT has changes for BXT.
+ */
+ cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0);
+ cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk);
+ cdclk_config.voltage_level =
+ intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk);
+
+ bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
+}
+
+static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw;
+
+ cdclk_config.cdclk = cdclk_config.bypass;
+ cdclk_config.vco = 0;
+ cdclk_config.voltage_level =
+ intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk);
+
+ bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
+}
+
+/**
+ * intel_cdclk_init_hw - Initialize CDCLK hardware
+ * @i915: i915 device
+ *
+ * Initialize CDCLK. This consists mainly of initializing dev_priv->display.cdclk.hw and
+ * sanitizing the state of the hardware if needed. This is generally done only
+ * during the display core initialization sequence, after which the DMC will
+ * take care of turning CDCLK off/on as needed.
+ */
+void intel_cdclk_init_hw(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915))
+ bxt_cdclk_init_hw(i915);
+ else if (DISPLAY_VER(i915) == 9)
+ skl_cdclk_init_hw(i915);
+}
+
+/**
+ * intel_cdclk_uninit_hw - Uninitialize CDCLK hardware
+ * @i915: i915 device
+ *
+ * Uninitialize CDCLK. This is done only during the display core
+ * uninitialization sequence.
+ */
+void intel_cdclk_uninit_hw(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915))
+ bxt_cdclk_uninit_hw(i915);
+ else if (DISPLAY_VER(i915) == 9)
+ skl_cdclk_uninit_hw(i915);
+}
+
+static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
+{
+ int a_div, b_div;
+
+ if (!HAS_CDCLK_CRAWL(dev_priv))
+ return false;
+
+ /*
+ * The vco and cd2x divider will change independently
+ * from each, so we disallow cd2x change when crawling.
+ */
+ a_div = DIV_ROUND_CLOSEST(a->vco, a->cdclk);
+ b_div = DIV_ROUND_CLOSEST(b->vco, b->cdclk);
+
+ return a->vco != 0 && b->vco != 0 &&
+ a->vco != b->vco &&
+ a_div == b_div &&
+ a->ref == b->ref;
+}
+
+static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
+{
+ /*
+ * FIXME should store a bit more state in intel_cdclk_config
+ * to differentiate squasher vs. cd2x divider properly. For
+ * the moment all platforms with squasher use a fixed cd2x
+ * divider.
+ */
+ if (!has_cdclk_squasher(dev_priv))
+ return false;
+
+ return a->cdclk != b->cdclk &&
+ a->vco != 0 &&
+ a->vco == b->vco &&
+ a->ref == b->ref;
+}
+
+/**
+ * intel_cdclk_needs_modeset - Determine if changong between the CDCLK
+ * configurations requires a modeset on all pipes
+ * @a: first CDCLK configuration
+ * @b: second CDCLK configuration
+ *
+ * Returns:
+ * True if changing between the two CDCLK configurations
+ * requires all pipes to be off, false if not.
+ */
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
+{
+ return a->cdclk != b->cdclk ||
+ a->vco != b->vco ||
+ a->ref != b->ref;
+}
+
+/**
+ * intel_cdclk_can_cd2x_update - Determine if changing between the two CDCLK
+ * configurations requires only a cd2x divider update
+ * @dev_priv: i915 device
+ * @a: first CDCLK configuration
+ * @b: second CDCLK configuration
+ *
+ * Returns:
+ * True if changing between the two CDCLK configurations
+ * can be done with just a cd2x divider update, false if not.
+ */
+static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
+{
+ /* Older hw doesn't have the capability */
+ if (DISPLAY_VER(dev_priv) < 10 && !IS_BROXTON(dev_priv))
+ return false;
+
+ /*
+ * FIXME should store a bit more state in intel_cdclk_config
+ * to differentiate squasher vs. cd2x divider properly. For
+ * the moment all platforms with squasher use a fixed cd2x
+ * divider.
+ */
+ if (has_cdclk_squasher(dev_priv))
+ return false;
+
+ return a->cdclk != b->cdclk &&
+ a->vco != 0 &&
+ a->vco == b->vco &&
+ a->ref == b->ref;
+}
+
+/**
+ * intel_cdclk_changed - Determine if two CDCLK configurations are different
+ * @a: first CDCLK configuration
+ * @b: second CDCLK configuration
+ *
+ * Returns:
+ * True if the CDCLK configurations don't match, false if they do.
+ */
+static bool intel_cdclk_changed(const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
+{
+ return intel_cdclk_needs_modeset(a, b) ||
+ a->voltage_level != b->voltage_level;
+}
+
+void intel_cdclk_dump_config(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *cdclk_config,
+ const char *context)
+{
+ drm_dbg_kms(&i915->drm, "%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n",
+ context, cdclk_config->cdclk, cdclk_config->vco,
+ cdclk_config->ref, cdclk_config->bypass,
+ cdclk_config->voltage_level);
+}
+
+/**
+ * intel_set_cdclk - Push the CDCLK configuration to the hardware
+ * @dev_priv: i915 device
+ * @cdclk_config: new CDCLK configuration
+ * @pipe: pipe with which to synchronize the update
+ *
+ * Program the hardware based on the passed in CDCLK state,
+ * if necessary.
+ */
+static void intel_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe)
+{
+ struct intel_encoder *encoder;
+
+ if (!intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config))
+ return;
+
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.funcs.cdclk->set_cdclk))
+ return;
+
+ intel_cdclk_dump_config(dev_priv, cdclk_config, "Changing CDCLK to");
+
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_psr_pause(intel_dp);
+ }
+
+ intel_audio_cdclk_change_pre(dev_priv);
+
+ /*
+ * Lock aux/gmbus while we change cdclk in case those
+ * functions use cdclk. Not all platforms/ports do,
+ * but we'll lock them all for simplicity.
+ */
+ mutex_lock(&dev_priv->display.gmbus.mutex);
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock_nest_lock(&intel_dp->aux.hw_mutex,
+ &dev_priv->display.gmbus.mutex);
+ }
+
+ intel_cdclk_set_cdclk(dev_priv, cdclk_config, pipe);
+
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_unlock(&intel_dp->aux.hw_mutex);
+ }
+ mutex_unlock(&dev_priv->display.gmbus.mutex);
+
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_psr_resume(intel_dp);
+ }
+
+ intel_audio_cdclk_change_post(dev_priv);
+
+ if (drm_WARN(&dev_priv->drm,
+ intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config),
+ "cdclk state doesn't match!\n")) {
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "[hw state]");
+ intel_cdclk_dump_config(dev_priv, cdclk_config, "[sw state]");
+ }
+}
+
+/**
+ * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
+ * @state: intel atomic state
+ *
+ * Program the hardware before updating the HW plane state based on the
+ * new CDCLK state, if necessary.
+ */
+void
+intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_cdclk_state *old_cdclk_state =
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+ enum pipe pipe = new_cdclk_state->pipe;
+
+ if (!intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual))
+ return;
+
+ if (pipe == INVALID_PIPE ||
+ old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
+ drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
+ }
+}
+
+/**
+ * intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware
+ * @state: intel atomic state
+ *
+ * Program the hardware after updating the HW plane state based on the
+ * new CDCLK state, if necessary.
+ */
+void
+intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_cdclk_state *old_cdclk_state =
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+ enum pipe pipe = new_cdclk_state->pipe;
+
+ if (!intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual))
+ return;
+
+ if (pipe != INVALID_PIPE &&
+ old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
+ drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
+ }
+}
+
+static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ int pixel_rate = crtc_state->pixel_rate;
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ return DIV_ROUND_UP(pixel_rate, 2);
+ else if (DISPLAY_VER(dev_priv) == 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return pixel_rate;
+ else if (IS_CHERRYVIEW(dev_priv))
+ return DIV_ROUND_UP(pixel_rate * 100, 95);
+ else if (crtc_state->double_wide)
+ return DIV_ROUND_UP(pixel_rate * 100, 90 * 2);
+ else
+ return DIV_ROUND_UP(pixel_rate * 100, 90);
+}
+
+static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
+ int min_cdclk = 0;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+ min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk);
+
+ return min_cdclk;
+}
+
+int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(crtc_state->uapi.crtc->dev);
+ int min_cdclk;
+
+ if (!crtc_state->hw.enable)
+ return 0;
+
+ min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state))
+ min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95);
+
+ /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
+ * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
+ * there may be audio corruption or screen corruption." This cdclk
+ * restriction for GLK is 316.8 MHz.
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ crtc_state->has_audio &&
+ crtc_state->port_clock >= 540000 &&
+ crtc_state->lane_count == 4) {
+ if (DISPLAY_VER(dev_priv) == 10) {
+ /* Display WA #1145: glk */
+ min_cdclk = max(316800, min_cdclk);
+ } else if (DISPLAY_VER(dev_priv) == 9 || IS_BROADWELL(dev_priv)) {
+ /* Display WA #1144: skl,bxt */
+ min_cdclk = max(432000, min_cdclk);
+ }
+ }
+
+ /*
+ * According to BSpec, "The CD clock frequency must be at least twice
+ * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
+ */
+ if (crtc_state->has_audio && DISPLAY_VER(dev_priv) >= 9)
+ min_cdclk = max(2 * 96000, min_cdclk);
+
+ /*
+ * "For DP audio configuration, cdclk frequency shall be set to
+ * meet the following requirements:
+ * DP Link Frequency(MHz) | Cdclk frequency(MHz)
+ * 270 | 320 or higher
+ * 162 | 200 or higher"
+ */
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
+ min_cdclk = max(crtc_state->port_clock, min_cdclk);
+
+ /*
+ * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
+ * than 320000KHz.
+ */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
+ IS_VALLEYVIEW(dev_priv))
+ min_cdclk = max(320000, min_cdclk);
+
+ /*
+ * On Geminilake once the CDCLK gets as low as 79200
+ * picture gets unstable, despite that values are
+ * correct for DSI PLL and DE PLL.
+ */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
+ IS_GEMINILAKE(dev_priv))
+ min_cdclk = max(158400, min_cdclk);
+
+ /* Account for additional needs from the planes */
+ min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk);
+
+ /*
+ * When we decide to use only one VDSC engine, since
+ * each VDSC operates with 1 ppc throughput, pixel clock
+ * cannot be higher than the VDSC clock (cdclk)
+ */
+ if (crtc_state->dsc.compression_enable && !crtc_state->dsc.dsc_split)
+ min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
+
+ /*
+ * HACK. Currently for TGL/DG2 platforms we calculate
+ * min_cdclk initially based on pixel_rate divided
+ * by 2, accounting for also plane requirements,
+ * however in some cases the lowest possible CDCLK
+ * doesn't work and causing the underruns.
+ * Explicitly stating here that this seems to be currently
+ * rather a Hack, than final solution.
+ */
+ if (IS_TIGERLAKE(dev_priv) || IS_DG2(dev_priv)) {
+ /*
+ * Clamp to max_cdclk_freq in case pixel rate is higher,
+ * in order not to break an 8K, but still leave W/A at place.
+ */
+ min_cdclk = max_t(int, min_cdclk,
+ min_t(int, crtc_state->pixel_rate,
+ dev_priv->display.cdclk.max_cdclk_freq));
+ }
+
+ return min_cdclk;
+}
+
+static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
+{
+ struct intel_atomic_state *state = cdclk_state->base.state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_bw_state *bw_state;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ int min_cdclk, i;
+ enum pipe pipe;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ int ret;
+
+ min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
+ if (min_cdclk < 0)
+ return min_cdclk;
+
+ if (cdclk_state->min_cdclk[crtc->pipe] == min_cdclk)
+ continue;
+
+ cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
+
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
+ if (ret)
+ return ret;
+ }
+
+ bw_state = intel_atomic_get_new_bw_state(state);
+ if (bw_state) {
+ min_cdclk = intel_bw_min_cdclk(dev_priv, bw_state);
+
+ if (cdclk_state->bw_min_cdclk != min_cdclk) {
+ int ret;
+
+ cdclk_state->bw_min_cdclk = min_cdclk;
+
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
+ if (ret)
+ return ret;
+ }
+ }
+
+ min_cdclk = max(cdclk_state->force_min_cdclk,
+ cdclk_state->bw_min_cdclk);
+ for_each_pipe(dev_priv, pipe)
+ min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
+
+ /*
+ * Avoid glk_force_audio_cdclk() causing excessive screen
+ * blinking when multiple pipes are active by making sure
+ * CDCLK frequency is always high enough for audio. With a
+ * single active pipe we can always change CDCLK frequency
+ * by changing the cd2x divider (see glk_cdclk_table[]) and
+ * thus a full modeset won't be needed then.
+ */
+ if (IS_GEMINILAKE(dev_priv) && cdclk_state->active_pipes &&
+ !is_power_of_2(cdclk_state->active_pipes))
+ min_cdclk = max(2 * 96000, min_cdclk);
+
+ if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) {
+ drm_dbg_kms(&dev_priv->drm,
+ "required cdclk (%d kHz) exceeds max (%d kHz)\n",
+ min_cdclk, dev_priv->display.cdclk.max_cdclk_freq);
+ return -EINVAL;
+ }
+
+ return min_cdclk;
+}
+
+/*
+ * Account for port clock min voltage level requirements.
+ * This only really does something on DISPLA_VER >= 11 but can be
+ * called on earlier platforms as well.
+ *
+ * Note that this functions assumes that 0 is
+ * the lowest voltage value, and higher values
+ * correspond to increasingly higher voltages.
+ *
+ * Should that relationship no longer hold on
+ * future platforms this code will need to be
+ * adjusted.
+ */
+static int bxt_compute_min_voltage_level(struct intel_cdclk_state *cdclk_state)
+{
+ struct intel_atomic_state *state = cdclk_state->base.state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ u8 min_voltage_level;
+ int i;
+ enum pipe pipe;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ int ret;
+
+ if (crtc_state->hw.enable)
+ min_voltage_level = crtc_state->min_voltage_level;
+ else
+ min_voltage_level = 0;
+
+ if (cdclk_state->min_voltage_level[crtc->pipe] == min_voltage_level)
+ continue;
+
+ cdclk_state->min_voltage_level[crtc->pipe] = min_voltage_level;
+
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
+ if (ret)
+ return ret;
+ }
+
+ min_voltage_level = 0;
+ for_each_pipe(dev_priv, pipe)
+ min_voltage_level = max(cdclk_state->min_voltage_level[pipe],
+ min_voltage_level);
+
+ return min_voltage_level;
+}
+
+static int vlv_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
+{
+ struct intel_atomic_state *state = cdclk_state->base.state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ int min_cdclk, cdclk;
+
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
+ if (min_cdclk < 0)
+ return min_cdclk;
+
+ cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
+
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
+ vlv_calc_voltage_level(dev_priv, cdclk);
+
+ if (!cdclk_state->active_pipes) {
+ cdclk = vlv_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
+
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
+ vlv_calc_voltage_level(dev_priv, cdclk);
+ } else {
+ cdclk_state->actual = cdclk_state->logical;
+ }
+
+ return 0;
+}
+
+static int bdw_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
+{
+ int min_cdclk, cdclk;
+
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
+ if (min_cdclk < 0)
+ return min_cdclk;
+
+ /*
+ * FIXME should also account for plane ratio
+ * once 64bpp pixel formats are supported.
+ */
+ cdclk = bdw_calc_cdclk(min_cdclk);
+
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
+ bdw_calc_voltage_level(cdclk);
+
+ if (!cdclk_state->active_pipes) {
+ cdclk = bdw_calc_cdclk(cdclk_state->force_min_cdclk);
+
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
+ bdw_calc_voltage_level(cdclk);
+ } else {
+ cdclk_state->actual = cdclk_state->logical;
+ }
+
+ return 0;
+}
+
+static int skl_dpll0_vco(struct intel_cdclk_state *cdclk_state)
+{
+ struct intel_atomic_state *state = cdclk_state->base.state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ int vco, i;
+
+ vco = cdclk_state->logical.vco;
+ if (!vco)
+ vco = dev_priv->skl_preferred_vco_freq;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!crtc_state->hw.enable)
+ continue;
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ continue;
+
+ /*
+ * DPLL0 VCO may need to be adjusted to get the correct
+ * clock for eDP. This will affect cdclk as well.
+ */
+ switch (crtc_state->port_clock / 2) {
+ case 108000:
+ case 216000:
+ vco = 8640000;
+ break;
+ default:
+ vco = 8100000;
+ break;
+ }
+ }
+
+ return vco;
+}
+
+static int skl_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
+{
+ int min_cdclk, cdclk, vco;
+
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
+ if (min_cdclk < 0)
+ return min_cdclk;
+
+ vco = skl_dpll0_vco(cdclk_state);
+
+ /*
+ * FIXME should also account for plane ratio
+ * once 64bpp pixel formats are supported.
+ */
+ cdclk = skl_calc_cdclk(min_cdclk, vco);
+
+ cdclk_state->logical.vco = vco;
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
+ skl_calc_voltage_level(cdclk);
+
+ if (!cdclk_state->active_pipes) {
+ cdclk = skl_calc_cdclk(cdclk_state->force_min_cdclk, vco);
+
+ cdclk_state->actual.vco = vco;
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
+ skl_calc_voltage_level(cdclk);
+ } else {
+ cdclk_state->actual = cdclk_state->logical;
+ }
+
+ return 0;
+}
+
+static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
+{
+ struct intel_atomic_state *state = cdclk_state->base.state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ int min_cdclk, min_voltage_level, cdclk, vco;
+
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
+ if (min_cdclk < 0)
+ return min_cdclk;
+
+ min_voltage_level = bxt_compute_min_voltage_level(cdclk_state);
+ if (min_voltage_level < 0)
+ return min_voltage_level;
+
+ cdclk = bxt_calc_cdclk(dev_priv, min_cdclk);
+ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
+
+ cdclk_state->logical.vco = vco;
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
+ max_t(int, min_voltage_level,
+ intel_cdclk_calc_voltage_level(dev_priv, cdclk));
+
+ if (!cdclk_state->active_pipes) {
+ cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
+ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
+
+ cdclk_state->actual.vco = vco;
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
+ intel_cdclk_calc_voltage_level(dev_priv, cdclk);
+ } else {
+ cdclk_state->actual = cdclk_state->logical;
+ }
+
+ return 0;
+}
+
+static int fixed_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
+{
+ int min_cdclk;
+
+ /*
+ * We can't change the cdclk frequency, but we still want to
+ * check that the required minimum frequency doesn't exceed
+ * the actual cdclk frequency.
+ */
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
+ if (min_cdclk < 0)
+ return min_cdclk;
+
+ return 0;
+}
+
+static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = kmemdup(obj->state, sizeof(*cdclk_state), GFP_KERNEL);
+ if (!cdclk_state)
+ return NULL;
+
+ cdclk_state->pipe = INVALID_PIPE;
+
+ return &cdclk_state->base;
+}
+
+static void intel_cdclk_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_cdclk_funcs = {
+ .atomic_duplicate_state = intel_cdclk_duplicate_state,
+ .atomic_destroy_state = intel_cdclk_destroy_state,
+};
+
+struct intel_cdclk_state *
+intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.cdclk.obj);
+ if (IS_ERR(cdclk_state))
+ return ERR_CAST(cdclk_state);
+
+ return to_intel_cdclk_state(cdclk_state);
+}
+
+int intel_cdclk_atomic_check(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
+{
+ const struct intel_cdclk_state *old_cdclk_state;
+ const struct intel_cdclk_state *new_cdclk_state;
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int ret;
+ int i;
+
+ /*
+ * active_planes bitmask has been updated, and potentially affected
+ * planes are part of the state. We can now compute the minimum cdclk
+ * for each plane.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_bw_calc_min_cdclk(state, need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+
+ if (new_cdclk_state &&
+ old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
+ *need_cdclk_calc = true;
+
+ return 0;
+}
+
+int intel_cdclk_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = kzalloc(sizeof(*cdclk_state), GFP_KERNEL);
+ if (!cdclk_state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->display.cdclk.obj,
+ &cdclk_state->base, &intel_cdclk_funcs);
+
+ return 0;
+}
+
+int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_cdclk_state *old_cdclk_state;
+ struct intel_cdclk_state *new_cdclk_state;
+ enum pipe pipe = INVALID_PIPE;
+ int ret;
+
+ new_cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(new_cdclk_state))
+ return PTR_ERR(new_cdclk_state);
+
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+
+ new_cdclk_state->active_pipes =
+ intel_calc_active_pipes(state, old_cdclk_state->active_pipes);
+
+ ret = intel_cdclk_modeset_calc_cdclk(dev_priv, new_cdclk_state);
+ if (ret)
+ return ret;
+
+ if (intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
+ /*
+ * Also serialize commits across all crtcs
+ * if the actual hw needs to be poked.
+ */
+ ret = intel_atomic_serialize_global_state(&new_cdclk_state->base);
+ if (ret)
+ return ret;
+ } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes ||
+ old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk ||
+ intel_cdclk_changed(&old_cdclk_state->logical,
+ &new_cdclk_state->logical)) {
+ ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
+ if (ret)
+ return ret;
+ } else {
+ return 0;
+ }
+
+ if (is_power_of_2(new_cdclk_state->active_pipes) &&
+ intel_cdclk_can_cd2x_update(dev_priv,
+ &old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+
+ pipe = ilog2(new_cdclk_state->active_pipes);
+ crtc = intel_crtc_for_pipe(dev_priv, pipe);
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
+ pipe = INVALID_PIPE;
+ }
+
+ if (intel_cdclk_can_squash(dev_priv,
+ &old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Can change cdclk via squasher\n");
+ } else if (intel_cdclk_can_crawl(dev_priv,
+ &old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Can change cdclk via crawl\n");
+ } else if (pipe != INVALID_PIPE) {
+ new_cdclk_state->pipe = pipe;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Can change cdclk cd2x divider with pipe %c active\n",
+ pipe_name(pipe));
+ } else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
+ /* All pipes must be switched off while we change the cdclk. */
+ ret = intel_modeset_all_pipes(state);
+ if (ret)
+ return ret;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Modeset required for cdclk change\n");
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "New cdclk calculated to be logical %u kHz, actual %u kHz\n",
+ new_cdclk_state->logical.cdclk,
+ new_cdclk_state->actual.cdclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "New voltage level calculated to be logical %u, actual %u\n",
+ new_cdclk_state->logical.voltage_level,
+ new_cdclk_state->actual.voltage_level);
+
+ return 0;
+}
+
+static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
+{
+ int max_cdclk_freq = dev_priv->display.cdclk.max_cdclk_freq;
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ return 2 * max_cdclk_freq;
+ else if (DISPLAY_VER(dev_priv) == 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return max_cdclk_freq;
+ else if (IS_CHERRYVIEW(dev_priv))
+ return max_cdclk_freq*95/100;
+ else if (DISPLAY_VER(dev_priv) < 4)
+ return 2*max_cdclk_freq*90/100;
+ else
+ return max_cdclk_freq*90/100;
+}
+
+/**
+ * intel_update_max_cdclk - Determine the maximum support CDCLK frequency
+ * @dev_priv: i915 device
+ *
+ * Determine the maximum CDCLK frequency the platform supports, and also
+ * derive the maximum dot clock frequency the maximum CDCLK frequency
+ * allows.
+ */
+void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
+{
+ if (IS_JSL_EHL(dev_priv)) {
+ if (dev_priv->display.cdclk.hw.ref == 24000)
+ dev_priv->display.cdclk.max_cdclk_freq = 552000;
+ else
+ dev_priv->display.cdclk.max_cdclk_freq = 556800;
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
+ if (dev_priv->display.cdclk.hw.ref == 24000)
+ dev_priv->display.cdclk.max_cdclk_freq = 648000;
+ else
+ dev_priv->display.cdclk.max_cdclk_freq = 652800;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ dev_priv->display.cdclk.max_cdclk_freq = 316800;
+ } else if (IS_BROXTON(dev_priv)) {
+ dev_priv->display.cdclk.max_cdclk_freq = 624000;
+ } else if (DISPLAY_VER(dev_priv) == 9) {
+ u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
+ int max_cdclk, vco;
+
+ vco = dev_priv->skl_preferred_vco_freq;
+ drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
+
+ /*
+ * Use the lower (vco 8640) cdclk values as a
+ * first guess. skl_calc_cdclk() will correct it
+ * if the preferred vco is 8100 instead.
+ */
+ if (limit == SKL_DFSM_CDCLK_LIMIT_675)
+ max_cdclk = 617143;
+ else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
+ max_cdclk = 540000;
+ else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
+ max_cdclk = 432000;
+ else
+ max_cdclk = 308571;
+
+ dev_priv->display.cdclk.max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
+ } else if (IS_BROADWELL(dev_priv)) {
+ /*
+ * FIXME with extra cooling we can allow
+ * 540 MHz for ULX and 675 Mhz for ULT.
+ * How can we know if extra cooling is
+ * available? PCI ID, VTB, something else?
+ */
+ if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ dev_priv->display.cdclk.max_cdclk_freq = 450000;
+ else if (IS_BDW_ULX(dev_priv))
+ dev_priv->display.cdclk.max_cdclk_freq = 450000;
+ else if (IS_BDW_ULT(dev_priv))
+ dev_priv->display.cdclk.max_cdclk_freq = 540000;
+ else
+ dev_priv->display.cdclk.max_cdclk_freq = 675000;
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.cdclk.max_cdclk_freq = 320000;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ dev_priv->display.cdclk.max_cdclk_freq = 400000;
+ } else {
+ /* otherwise assume cdclk is fixed */
+ dev_priv->display.cdclk.max_cdclk_freq = dev_priv->display.cdclk.hw.cdclk;
+ }
+
+ dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
+
+ drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n",
+ dev_priv->display.cdclk.max_cdclk_freq);
+
+ drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n",
+ dev_priv->max_dotclk_freq);
+}
+
+/**
+ * intel_update_cdclk - Determine the current CDCLK frequency
+ * @dev_priv: i915 device
+ *
+ * Determine the current CDCLK frequency.
+ */
+void intel_update_cdclk(struct drm_i915_private *dev_priv)
+{
+ intel_cdclk_get_cdclk(dev_priv, &dev_priv->display.cdclk.hw);
+
+ /*
+ * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
+ * Programmng [sic] note: bit[9:2] should be programmed to the number
+ * of cdclk that generates 4MHz reference clock freq which is used to
+ * generate GMBus clock. This will vary with the cdclk freq.
+ */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ intel_de_write(dev_priv, GMBUSFREQ_VLV,
+ DIV_ROUND_UP(dev_priv->display.cdclk.hw.cdclk, 1000));
+}
+
+static int dg1_rawclk(struct drm_i915_private *dev_priv)
+{
+ /*
+ * DG1 always uses a 38.4 MHz rawclk. The bspec tells us
+ * "Program Numerator=2, Denominator=4, Divider=37 decimal."
+ */
+ intel_de_write(dev_priv, PCH_RAWCLK_FREQ,
+ CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2));
+
+ return 38400;
+}
+
+static int cnp_rawclk(struct drm_i915_private *dev_priv)
+{
+ u32 rawclk;
+ int divider, fraction;
+
+ if (intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
+ /* 24 MHz */
+ divider = 24000;
+ fraction = 0;
+ } else {
+ /* 19.2 MHz */
+ divider = 19000;
+ fraction = 200;
+ }
+
+ rawclk = CNP_RAWCLK_DIV(divider / 1000);
+ if (fraction) {
+ int numerator = 1;
+
+ rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
+ fraction) - 1);
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ rawclk |= ICP_RAWCLK_NUM(numerator);
+ }
+
+ intel_de_write(dev_priv, PCH_RAWCLK_FREQ, rawclk);
+ return divider + fraction;
+}
+
+static int pch_rawclk(struct drm_i915_private *dev_priv)
+{
+ return (intel_de_read(dev_priv, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
+}
+
+static int vlv_hrawclk(struct drm_i915_private *dev_priv)
+{
+ /* RAWCLK_FREQ_VLV register updated from power well code */
+ return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
+ CCK_DISPLAY_REF_CLOCK_CONTROL);
+}
+
+static int i9xx_hrawclk(struct drm_i915_private *dev_priv)
+{
+ u32 clkcfg;
+
+ /*
+ * hrawclock is 1/4 the FSB frequency
+ *
+ * Note that this only reads the state of the FSB
+ * straps, not the actual FSB frequency. Some BIOSen
+ * let you configure each independently. Ideally we'd
+ * read out the actual FSB frequency but sadly we
+ * don't know which registers have that information,
+ * and all the relevant docs have gone to bit heaven :(
+ */
+ clkcfg = intel_de_read(dev_priv, CLKCFG) & CLKCFG_FSB_MASK;
+
+ if (IS_MOBILE(dev_priv)) {
+ switch (clkcfg) {
+ case CLKCFG_FSB_400:
+ return 100000;
+ case CLKCFG_FSB_533:
+ return 133333;
+ case CLKCFG_FSB_667:
+ return 166667;
+ case CLKCFG_FSB_800:
+ return 200000;
+ case CLKCFG_FSB_1067:
+ return 266667;
+ case CLKCFG_FSB_1333:
+ return 333333;
+ default:
+ MISSING_CASE(clkcfg);
+ return 133333;
+ }
+ } else {
+ switch (clkcfg) {
+ case CLKCFG_FSB_400_ALT:
+ return 100000;
+ case CLKCFG_FSB_533:
+ return 133333;
+ case CLKCFG_FSB_667:
+ return 166667;
+ case CLKCFG_FSB_800:
+ return 200000;
+ case CLKCFG_FSB_1067_ALT:
+ return 266667;
+ case CLKCFG_FSB_1333_ALT:
+ return 333333;
+ case CLKCFG_FSB_1600_ALT:
+ return 400000;
+ default:
+ return 133333;
+ }
+ }
+}
+
+/**
+ * intel_read_rawclk - Determine the current RAWCLK frequency
+ * @dev_priv: i915 device
+ *
+ * Determine the current RAWCLK frequency. RAWCLK is a fixed
+ * frequency clock so this needs to done only once.
+ */
+u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
+{
+ u32 freq;
+
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ freq = dg1_rawclk(dev_priv);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP)
+ /*
+ * MTL always uses a 38.4 MHz rawclk. The bspec tells us
+ * "RAWCLK_FREQ defaults to the values for 38.4 and does
+ * not need to be programmed."
+ */
+ freq = 38400;
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ freq = cnp_rawclk(dev_priv);
+ else if (HAS_PCH_SPLIT(dev_priv))
+ freq = pch_rawclk(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ freq = vlv_hrawclk(dev_priv);
+ else if (DISPLAY_VER(dev_priv) >= 3)
+ freq = i9xx_hrawclk(dev_priv);
+ else
+ /* no rawclk on other platforms, or no need to know it */
+ return 0;
+
+ return freq;
+}
+
+static const struct intel_cdclk_funcs tgl_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = tgl_calc_voltage_level,
+};
+
+static const struct intel_cdclk_funcs ehl_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = ehl_calc_voltage_level,
+};
+
+static const struct intel_cdclk_funcs icl_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = icl_calc_voltage_level,
+};
+
+static const struct intel_cdclk_funcs bxt_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = bxt_calc_voltage_level,
+};
+
+static const struct intel_cdclk_funcs skl_cdclk_funcs = {
+ .get_cdclk = skl_get_cdclk,
+ .set_cdclk = skl_set_cdclk,
+ .modeset_calc_cdclk = skl_modeset_calc_cdclk,
+};
+
+static const struct intel_cdclk_funcs bdw_cdclk_funcs = {
+ .get_cdclk = bdw_get_cdclk,
+ .set_cdclk = bdw_set_cdclk,
+ .modeset_calc_cdclk = bdw_modeset_calc_cdclk,
+};
+
+static const struct intel_cdclk_funcs chv_cdclk_funcs = {
+ .get_cdclk = vlv_get_cdclk,
+ .set_cdclk = chv_set_cdclk,
+ .modeset_calc_cdclk = vlv_modeset_calc_cdclk,
+};
+
+static const struct intel_cdclk_funcs vlv_cdclk_funcs = {
+ .get_cdclk = vlv_get_cdclk,
+ .set_cdclk = vlv_set_cdclk,
+ .modeset_calc_cdclk = vlv_modeset_calc_cdclk,
+};
+
+static const struct intel_cdclk_funcs hsw_cdclk_funcs = {
+ .get_cdclk = hsw_get_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+/* SNB, IVB, 965G, 945G */
+static const struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = {
+ .get_cdclk = fixed_400mhz_get_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static const struct intel_cdclk_funcs ilk_cdclk_funcs = {
+ .get_cdclk = fixed_450mhz_get_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static const struct intel_cdclk_funcs gm45_cdclk_funcs = {
+ .get_cdclk = gm45_get_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+/* G45 uses G33 */
+
+static const struct intel_cdclk_funcs i965gm_cdclk_funcs = {
+ .get_cdclk = i965gm_get_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+/* i965G uses fixed 400 */
+
+static const struct intel_cdclk_funcs pnv_cdclk_funcs = {
+ .get_cdclk = pnv_get_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static const struct intel_cdclk_funcs g33_cdclk_funcs = {
+ .get_cdclk = g33_get_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static const struct intel_cdclk_funcs i945gm_cdclk_funcs = {
+ .get_cdclk = i945gm_get_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+/* i945G uses fixed 400 */
+
+static const struct intel_cdclk_funcs i915gm_cdclk_funcs = {
+ .get_cdclk = i915gm_get_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static const struct intel_cdclk_funcs i915g_cdclk_funcs = {
+ .get_cdclk = fixed_333mhz_get_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static const struct intel_cdclk_funcs i865g_cdclk_funcs = {
+ .get_cdclk = fixed_266mhz_get_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static const struct intel_cdclk_funcs i85x_cdclk_funcs = {
+ .get_cdclk = i85x_get_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static const struct intel_cdclk_funcs i845g_cdclk_funcs = {
+ .get_cdclk = fixed_200mhz_get_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static const struct intel_cdclk_funcs i830_cdclk_funcs = {
+ .get_cdclk = fixed_133mhz_get_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+/**
+ * intel_init_cdclk_hooks - Initialize CDCLK related modesetting hooks
+ * @dev_priv: i915 device
+ */
+void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
+{
+ if (IS_DG2(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ dev_priv->display.cdclk.table = dg2_cdclk_table;
+ } else if (IS_ALDERLAKE_P(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ /* Wa_22011320316:adl-p[a0] */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ dev_priv->display.cdclk.table = adlp_a_step_cdclk_table;
+ else
+ dev_priv->display.cdclk.table = adlp_cdclk_table;
+ } else if (IS_ROCKETLAKE(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ dev_priv->display.cdclk.table = rkl_cdclk_table;
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ dev_priv->display.cdclk.table = icl_cdclk_table;
+ } else if (IS_JSL_EHL(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &ehl_cdclk_funcs;
+ dev_priv->display.cdclk.table = icl_cdclk_table;
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
+ dev_priv->display.funcs.cdclk = &icl_cdclk_funcs;
+ dev_priv->display.cdclk.table = icl_cdclk_table;
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &bxt_cdclk_funcs;
+ if (IS_GEMINILAKE(dev_priv))
+ dev_priv->display.cdclk.table = glk_cdclk_table;
+ else
+ dev_priv->display.cdclk.table = bxt_cdclk_table;
+ } else if (DISPLAY_VER(dev_priv) == 9) {
+ dev_priv->display.funcs.cdclk = &skl_cdclk_funcs;
+ } else if (IS_BROADWELL(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &bdw_cdclk_funcs;
+ } else if (IS_HASWELL(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &hsw_cdclk_funcs;
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &chv_cdclk_funcs;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &vlv_cdclk_funcs;
+ } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs;
+ } else if (IS_IRONLAKE(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &ilk_cdclk_funcs;
+ } else if (IS_GM45(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &gm45_cdclk_funcs;
+ } else if (IS_G45(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &g33_cdclk_funcs;
+ } else if (IS_I965GM(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &i965gm_cdclk_funcs;
+ } else if (IS_I965G(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs;
+ } else if (IS_PINEVIEW(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &pnv_cdclk_funcs;
+ } else if (IS_G33(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &g33_cdclk_funcs;
+ } else if (IS_I945GM(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &i945gm_cdclk_funcs;
+ } else if (IS_I945G(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs;
+ } else if (IS_I915GM(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &i915gm_cdclk_funcs;
+ } else if (IS_I915G(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &i915g_cdclk_funcs;
+ } else if (IS_I865G(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &i865g_cdclk_funcs;
+ } else if (IS_I85X(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &i85x_cdclk_funcs;
+ } else if (IS_I845G(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &i845g_cdclk_funcs;
+ } else if (IS_I830(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &i830_cdclk_funcs;
+ }
+
+ if (drm_WARN(&dev_priv->drm, !dev_priv->display.funcs.cdclk,
+ "Unknown platform. Assuming i830\n"))
+ dev_priv->display.funcs.cdclk = &i830_cdclk_funcs;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
new file mode 100644
index 000000000..c674879a8
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CDCLK_H__
+#define __INTEL_CDCLK_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+#include "intel_global_state.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc_state;
+
+struct intel_cdclk_config {
+ unsigned int cdclk, vco, ref, bypass;
+ u8 voltage_level;
+};
+
+struct intel_cdclk_state {
+ struct intel_global_state base;
+
+ /*
+ * Logical configuration of cdclk (used for all scaling,
+ * watermark, etc. calculations and checks). This is
+ * computed as if all enabled crtcs were active.
+ */
+ struct intel_cdclk_config logical;
+
+ /*
+ * Actual configuration of cdclk, can be different from the
+ * logical configuration only when all crtc's are DPMS off.
+ */
+ struct intel_cdclk_config actual;
+
+ /* minimum acceptable cdclk to satisfy bandwidth requirements */
+ int bw_min_cdclk;
+ /* minimum acceptable cdclk for each pipe */
+ int min_cdclk[I915_MAX_PIPES];
+ /* minimum acceptable voltage level for each pipe */
+ u8 min_voltage_level[I915_MAX_PIPES];
+
+ /* pipe to which cd2x update is synchronized */
+ enum pipe pipe;
+
+ /* forced minimum cdclk for glk+ audio w/a */
+ int force_min_cdclk;
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
+};
+
+int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
+void intel_cdclk_init_hw(struct drm_i915_private *i915);
+void intel_cdclk_uninit_hw(struct drm_i915_private *i915);
+void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
+void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
+void intel_update_cdclk(struct drm_i915_private *dev_priv);
+u32 intel_read_rawclk(struct drm_i915_private *dev_priv);
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b);
+void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state);
+void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
+void intel_cdclk_dump_config(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *cdclk_config,
+ const char *context);
+int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
+void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config);
+int intel_cdclk_atomic_check(struct intel_atomic_state *state,
+ bool *need_cdclk_calc);
+struct intel_cdclk_state *
+intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
+
+#define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base)
+#define intel_atomic_get_old_cdclk_state(state) \
+ to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj))
+#define intel_atomic_get_new_cdclk_state(state) \
+ to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj))
+
+int intel_cdclk_init(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
new file mode 100644
index 000000000..78211e583
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -0,0 +1,2295 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "intel_color.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dpll.h"
+#include "intel_dsb.h"
+#include "vlv_dsi_pll.h"
+
+struct intel_color_funcs {
+ int (*color_check)(struct intel_crtc_state *crtc_state);
+ /*
+ * Program non-arming double buffered color management registers
+ * before vblank evasion. The registers should then latch after
+ * the arming register is written (by color_commit_arm()) during
+ * the next vblank start, alongside any other double buffered
+ * registers involved with the same commit. This hook is optional.
+ */
+ void (*color_commit_noarm)(const struct intel_crtc_state *crtc_state);
+ /*
+ * Program arming double buffered color management registers
+ * during vblank evasion. The registers (and whatever other registers
+ * they arm that were written by color_commit_noarm) should then latch
+ * during the next vblank start, alongside any other double buffered
+ * registers involved with the same commit.
+ */
+ void (*color_commit_arm)(const struct intel_crtc_state *crtc_state);
+ /*
+ * Load LUTs (and other single buffered color management
+ * registers). Will (hopefully) be called during the vblank
+ * following the latching of any double buffered registers
+ * involved with the same commit.
+ */
+ void (*load_luts)(const struct intel_crtc_state *crtc_state);
+ void (*read_luts)(struct intel_crtc_state *crtc_state);
+};
+
+#define CTM_COEFF_SIGN (1ULL << 63)
+
+#define CTM_COEFF_1_0 (1ULL << 32)
+#define CTM_COEFF_2_0 (CTM_COEFF_1_0 << 1)
+#define CTM_COEFF_4_0 (CTM_COEFF_2_0 << 1)
+#define CTM_COEFF_8_0 (CTM_COEFF_4_0 << 1)
+#define CTM_COEFF_0_5 (CTM_COEFF_1_0 >> 1)
+#define CTM_COEFF_0_25 (CTM_COEFF_0_5 >> 1)
+#define CTM_COEFF_0_125 (CTM_COEFF_0_25 >> 1)
+
+#define CTM_COEFF_LIMITED_RANGE ((235ULL - 16ULL) * CTM_COEFF_1_0 / 255)
+
+#define CTM_COEFF_NEGATIVE(coeff) (((coeff) & CTM_COEFF_SIGN) != 0)
+#define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1))
+
+#define LEGACY_LUT_LENGTH 256
+
+/*
+ * ILK+ csc matrix:
+ *
+ * |R/Cr| | c0 c1 c2 | ( |R/Cr| |preoff0| ) |postoff0|
+ * |G/Y | = | c3 c4 c5 | x ( |G/Y | + |preoff1| ) + |postoff1|
+ * |B/Cb| | c6 c7 c8 | ( |B/Cb| |preoff2| ) |postoff2|
+ *
+ * ILK/SNB don't have explicit post offsets, and instead
+ * CSC_MODE_YUV_TO_RGB and CSC_BLACK_SCREEN_OFFSET are used:
+ * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=0 -> 1/2, 0, 1/2
+ * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/2, 1/16, 1/2
+ * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=0 -> 0, 0, 0
+ * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/16, 1/16, 1/16
+ */
+
+/*
+ * Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
+ * format). This macro takes the coefficient we want transformed and the
+ * number of fractional bits.
+ *
+ * We only have a 9 bits precision window which slides depending on the value
+ * of the CTM coefficient and we write the value from bit 3. We also round the
+ * value.
+ */
+#define ILK_CSC_COEFF_FP(coeff, fbits) \
+ (clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
+
+#define ILK_CSC_COEFF_LIMITED_RANGE 0x0dc0
+#define ILK_CSC_COEFF_1_0 0x7800
+
+#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255)
+
+/* Nop pre/post offsets */
+static const u16 ilk_csc_off_zero[3] = {};
+
+/* Identity matrix */
+static const u16 ilk_csc_coeff_identity[9] = {
+ ILK_CSC_COEFF_1_0, 0, 0,
+ 0, ILK_CSC_COEFF_1_0, 0,
+ 0, 0, ILK_CSC_COEFF_1_0,
+};
+
+/* Limited range RGB post offsets */
+static const u16 ilk_csc_postoff_limited_range[3] = {
+ ILK_CSC_POSTOFF_LIMITED_RANGE,
+ ILK_CSC_POSTOFF_LIMITED_RANGE,
+ ILK_CSC_POSTOFF_LIMITED_RANGE,
+};
+
+/* Full range RGB -> limited range RGB matrix */
+static const u16 ilk_csc_coeff_limited_range[9] = {
+ ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
+ 0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
+ 0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
+};
+
+/* BT.709 full range RGB -> limited range YCbCr matrix */
+static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = {
+ 0x1e08, 0x9cc0, 0xb528,
+ 0x2ba8, 0x09d8, 0x37e8,
+ 0xbce8, 0x9ad8, 0x1e08,
+};
+
+/* Limited range YCbCr post offsets */
+static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
+ 0x0800, 0x0100, 0x0800,
+};
+
+static bool lut_is_legacy(const struct drm_property_blob *lut)
+{
+ return drm_color_lut_size(lut) == LEGACY_LUT_LENGTH;
+}
+
+static bool crtc_state_is_legacy_gamma(const struct intel_crtc_state *crtc_state)
+{
+ return !crtc_state->hw.degamma_lut &&
+ !crtc_state->hw.ctm &&
+ crtc_state->hw.gamma_lut &&
+ lut_is_legacy(crtc_state->hw.gamma_lut);
+}
+
+/*
+ * When using limited range, multiply the matrix given by userspace by
+ * the matrix that we would use for the limited range.
+ */
+static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
+{
+ int i;
+
+ for (i = 0; i < 9; i++) {
+ u64 user_coeff = input[i];
+ u32 limited_coeff = CTM_COEFF_LIMITED_RANGE;
+ u32 abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff), 0,
+ CTM_COEFF_4_0 - 1) >> 2;
+
+ /*
+ * By scaling every co-efficient with limited range (16-235)
+ * vs full range (0-255) the final o/p will be scaled down to
+ * fit in the limited range supported by the panel.
+ */
+ result[i] = mul_u32_u32(limited_coeff, abs_coeff) >> 30;
+ result[i] |= user_coeff & CTM_COEFF_SIGN;
+ }
+
+ return result;
+}
+
+static void ilk_update_pipe_csc(struct intel_crtc *crtc,
+ const u16 preoff[3],
+ const u16 coeff[9],
+ const u16 postoff[3])
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ intel_de_write_fw(dev_priv, PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
+
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_RY_GY(pipe),
+ coeff[0] << 16 | coeff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
+
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_RU_GU(pipe),
+ coeff[3] << 16 | coeff[4]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
+
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_RV_GV(pipe),
+ coeff[6] << 16 | coeff[7]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
+
+ if (DISPLAY_VER(dev_priv) >= 7) {
+ intel_de_write_fw(dev_priv, PIPE_CSC_POSTOFF_HI(pipe),
+ postoff[0]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_POSTOFF_ME(pipe),
+ postoff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_POSTOFF_LO(pipe),
+ postoff[2]);
+ }
+}
+
+static void icl_update_output_csc(struct intel_crtc *crtc,
+ const u16 preoff[3],
+ const u16 coeff[9],
+ const u16 postoff[3])
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
+
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe),
+ coeff[0] << 16 | coeff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_BY(pipe),
+ coeff[2] << 16);
+
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe),
+ coeff[3] << 16 | coeff[4]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_BU(pipe),
+ coeff[5] << 16);
+
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe),
+ coeff[6] << 16 | coeff[7]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_COEFF_BV(pipe),
+ coeff[8] << 16);
+
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
+ intel_de_write_fw(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
+}
+
+static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ /*
+ * FIXME if there's a gamma LUT after the CSC, we should
+ * do the range compression using the gamma LUT instead.
+ */
+ return crtc_state->limited_color_range &&
+ (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
+ IS_DISPLAY_VER(dev_priv, 9, 10));
+}
+
+static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
+ u16 coeffs[9])
+{
+ const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
+ const u64 *input;
+ u64 temp[9];
+ int i;
+
+ if (ilk_csc_limited_range(crtc_state))
+ input = ctm_mult_by_limited(temp, ctm->matrix);
+ else
+ input = ctm->matrix;
+
+ /*
+ * Convert fixed point S31.32 input to format supported by the
+ * hardware.
+ */
+ for (i = 0; i < 9; i++) {
+ u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
+
+ /*
+ * Clamp input value to min/max supported by
+ * hardware.
+ */
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
+
+ coeffs[i] = 0;
+
+ /* sign bit */
+ if (CTM_COEFF_NEGATIVE(input[i]))
+ coeffs[i] |= 1 << 15;
+
+ if (abs_coeff < CTM_COEFF_0_125)
+ coeffs[i] |= (3 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 12);
+ else if (abs_coeff < CTM_COEFF_0_25)
+ coeffs[i] |= (2 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 11);
+ else if (abs_coeff < CTM_COEFF_0_5)
+ coeffs[i] |= (1 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 10);
+ else if (abs_coeff < CTM_COEFF_1_0)
+ coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
+ else if (abs_coeff < CTM_COEFF_2_0)
+ coeffs[i] |= (7 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 8);
+ else
+ coeffs[i] |= (6 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 7);
+ }
+}
+
+static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ bool limited_color_range = ilk_csc_limited_range(crtc_state);
+
+ if (crtc_state->hw.ctm) {
+ u16 coeff[9];
+
+ ilk_csc_convert_ctm(crtc_state, coeff);
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero, coeff,
+ limited_color_range ?
+ ilk_csc_postoff_limited_range :
+ ilk_csc_off_zero);
+ } else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_rgb_to_ycbcr,
+ ilk_csc_postoff_rgb_to_ycbcr);
+ } else if (limited_color_range) {
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_limited_range,
+ ilk_csc_postoff_limited_range);
+ } else if (crtc_state->csc_enable) {
+ /*
+ * On GLK both pipe CSC and degamma LUT are controlled
+ * by csc_enable. Hence for the cases where the degama
+ * LUT is needed but CSC is not we need to load an
+ * identity matrix.
+ */
+ drm_WARN_ON(&dev_priv->drm, !IS_GEMINILAKE(dev_priv));
+
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_identity,
+ ilk_csc_off_zero);
+ }
+}
+
+static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc_state->hw.ctm) {
+ u16 coeff[9];
+
+ ilk_csc_convert_ctm(crtc_state, coeff);
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ coeff, ilk_csc_off_zero);
+ }
+
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
+ icl_update_output_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_rgb_to_ycbcr,
+ ilk_csc_postoff_rgb_to_ycbcr);
+ } else if (crtc_state->limited_color_range) {
+ icl_update_output_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_limited_range,
+ ilk_csc_postoff_limited_range);
+ }
+}
+
+static void chv_load_cgm_csc(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_ctm *ctm = blob->data;
+ enum pipe pipe = crtc->pipe;
+ u16 coeffs[9];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
+ u64 abs_coeff = ((1ULL << 63) - 1) & ctm->matrix[i];
+
+ /* Round coefficient. */
+ abs_coeff += 1 << (32 - 13);
+ /* Clamp to hardware limits. */
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
+
+ coeffs[i] = 0;
+
+ /* Write coefficients in S3.12 format. */
+ if (ctm->matrix[i] & (1ULL << 63))
+ coeffs[i] |= 1 << 15;
+
+ coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
+ coeffs[i] |= (abs_coeff >> 20) & 0xfff;
+ }
+
+ intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF01(pipe),
+ coeffs[1] << 16 | coeffs[0]);
+ intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF23(pipe),
+ coeffs[3] << 16 | coeffs[2]);
+ intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF45(pipe),
+ coeffs[5] << 16 | coeffs[4]);
+ intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF67(pipe),
+ coeffs[7] << 16 | coeffs[6]);
+ intel_de_write_fw(dev_priv, CGM_PIPE_CSC_COEFF8(pipe),
+ coeffs[8]);
+}
+
+/* convert hw value with given bit_precision to lut property val */
+static u32 intel_color_lut_pack(u32 val, int bit_precision)
+{
+ u32 max = 0xffff >> (16 - bit_precision);
+
+ val = clamp_val(val, 0, max);
+
+ if (bit_precision < 16)
+ val <<= 16 - bit_precision;
+
+ return val;
+}
+
+static u32 i9xx_lut_8(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 8) << 16 |
+ drm_color_lut_extract(color->green, 8) << 8 |
+ drm_color_lut_extract(color->blue, 8);
+}
+
+static void i9xx_lut_8_pack(struct drm_color_lut *entry, u32 val)
+{
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_RED_MASK, val), 8);
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_GREEN_MASK, val), 8);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_BLUE_MASK, val), 8);
+}
+
+/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
+static u32 i965_lut_10p6_ldw(const struct drm_color_lut *color)
+{
+ return (color->red & 0xff) << 16 |
+ (color->green & 0xff) << 8 |
+ (color->blue & 0xff);
+}
+
+/* i965+ "10.6" interpolated format "odd DW" (high 8 bits) */
+static u32 i965_lut_10p6_udw(const struct drm_color_lut *color)
+{
+ return (color->red >> 8) << 16 |
+ (color->green >> 8) << 8 |
+ (color->blue >> 8);
+}
+
+static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+ entry->red = REG_FIELD_GET(PALETTE_RED_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_RED_MASK, ldw);
+ entry->green = REG_FIELD_GET(PALETTE_GREEN_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_GREEN_MASK, ldw);
+ entry->blue = REG_FIELD_GET(PALETTE_BLUE_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_BLUE_MASK, ldw);
+}
+
+static u16 i965_lut_11p6_max_pack(u32 val)
+{
+ /* PIPEGCMAX is 11.6, clamp to 10.6 */
+ return clamp_val(val, 0, 0xffff);
+}
+
+static u32 ilk_lut_10(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 10) << 20 |
+ drm_color_lut_extract(color->green, 10) << 10 |
+ drm_color_lut_extract(color->blue, 10);
+}
+
+static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val)
+{
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_RED_MASK, val), 10);
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_GREEN_MASK, val), 10);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10);
+}
+
+static void icl_lut_multi_seg_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+ entry->red = REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_UDW_MASK, udw) << 6 |
+ REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_LDW_MASK, ldw);
+ entry->green = REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_UDW_MASK, udw) << 6 |
+ REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_LDW_MASK, ldw);
+ entry->blue = REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_UDW_MASK, udw) << 6 |
+ REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_LDW_MASK, ldw);
+}
+
+static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+{
+ icl_load_csc_matrix(crtc_state);
+}
+
+static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * Possibly related to display WA #1184, SKL CSC loses the latched
+ * CSC coeff/offset register values if the CSC registers are disarmed
+ * between DC5 exit and PSR exit. This will cause the plane(s) to
+ * output all black (until CSC_MODE is rearmed and properly latched).
+ * Once PSR exit (and proper register latching) has occurred the
+ * danger is over. Thus when PSR is enabled the CSC coeff/offset
+ * register programming will be peformed from skl_color_commit_arm()
+ * which is called after PSR exit.
+ */
+ if (!crtc_state->has_psr)
+ ilk_load_csc_matrix(crtc_state);
+}
+
+static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+{
+ ilk_load_csc_matrix(crtc_state);
+}
+
+static void i9xx_color_commit_arm(const struct intel_crtc_state *crtc_state)
+{
+ /* update PIPECONF GAMMA_MODE */
+ i9xx_set_pipeconf(crtc_state);
+}
+
+static void ilk_color_commit_arm(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /* update PIPECONF GAMMA_MODE */
+ ilk_set_pipeconf(crtc_state);
+
+ intel_de_write_fw(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
+}
+
+static void hsw_color_commit_arm(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe),
+ crtc_state->gamma_mode);
+
+ intel_de_write_fw(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
+}
+
+static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 val = 0;
+
+ if (crtc_state->has_psr)
+ ilk_load_csc_matrix(crtc_state);
+
+ /*
+ * We don't (yet) allow userspace to control the pipe background color,
+ * so force it to black, but apply pipe gamma and CSC appropriately
+ * so that its handling will match how we program our planes.
+ */
+ if (crtc_state->gamma_enable)
+ val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE;
+ if (crtc_state->csc_enable)
+ val |= SKL_BOTTOM_COLOR_CSC_ENABLE;
+ intel_de_write(dev_priv, SKL_BOTTOM_COLOR(pipe), val);
+
+ intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe),
+ crtc_state->gamma_mode);
+
+ intel_de_write_fw(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
+}
+
+static void i9xx_load_lut_8(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut;
+ enum pipe pipe = crtc->pipe;
+ int i;
+
+ if (!blob)
+ return;
+
+ lut = blob->data;
+
+ for (i = 0; i < 256; i++)
+ intel_de_write_fw(dev_priv, PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
+}
+
+static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+
+ assert_pll_enabled(dev_priv, crtc->pipe);
+
+ i9xx_load_lut_8(crtc, gamma_lut);
+}
+
+static void i965_load_lut_10p6(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
+
+ for (i = 0; i < lut_size - 1; i++) {
+ intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0),
+ i965_lut_10p6_ldw(&lut[i]));
+ intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1),
+ i965_lut_10p6_udw(&lut[i]));
+ }
+
+ intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red);
+ intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green);
+ intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue);
+}
+
+static void i965_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ assert_dsi_pll_enabled(dev_priv);
+ else
+ assert_pll_enabled(dev_priv, crtc->pipe);
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ i9xx_load_lut_8(crtc, gamma_lut);
+ else
+ i965_load_lut_10p6(crtc, gamma_lut);
+}
+
+static void ilk_load_lut_8(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut;
+ enum pipe pipe = crtc->pipe;
+ int i;
+
+ if (!blob)
+ return;
+
+ lut = blob->data;
+
+ for (i = 0; i < 256; i++)
+ intel_de_write_fw(dev_priv, LGC_PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
+}
+
+static void ilk_load_lut_10(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
+
+ for (i = 0; i < lut_size; i++)
+ intel_de_write_fw(dev_priv, PREC_PALETTE(pipe, i),
+ ilk_lut_10(&lut[i]));
+}
+
+static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ ilk_load_lut_8(crtc, gamma_lut);
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+ ilk_load_lut_10(crtc, gamma_lut);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
+}
+
+static int ivb_lut_10_size(u32 prec_index)
+{
+ if (prec_index & PAL_PREC_SPLIT_MODE)
+ return 512;
+ else
+ return 1024;
+}
+
+/*
+ * IVB/HSW Bspec / PAL_PREC_INDEX:
+ * "Restriction : Index auto increment mode is not
+ * supported and must not be enabled."
+ */
+static void ivb_load_lut_10(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob,
+ u32 prec_index)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int hw_lut_size = ivb_lut_10_size(prec_index);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
+
+ for (i = 0; i < hw_lut_size; i++) {
+ /* We discard half the user entries in split gamma mode */
+ const struct drm_color_lut *entry =
+ &lut[i * (lut_size - 1) / (hw_lut_size - 1)];
+
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), prec_index++);
+ intel_de_write_fw(dev_priv, PREC_PAL_DATA(pipe),
+ ilk_lut_10(entry));
+ }
+
+ /*
+ * Reset the index, otherwise it prevents the legacy palette to be
+ * written properly.
+ */
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0);
+}
+
+/* On BDW+ the index auto increment mode actually works */
+static void bdw_load_lut_10(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob,
+ u32 prec_index)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int hw_lut_size = ivb_lut_10_size(prec_index);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
+
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe),
+ prec_index | PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < hw_lut_size; i++) {
+ /* We discard half the user entries in split gamma mode */
+ const struct drm_color_lut *entry =
+ &lut[i * (lut_size - 1) / (hw_lut_size - 1)];
+
+ intel_de_write_fw(dev_priv, PREC_PAL_DATA(pipe),
+ ilk_lut_10(entry));
+ }
+
+ /*
+ * Reset the index, otherwise it prevents the legacy palette to be
+ * written properly.
+ */
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0);
+}
+
+static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /* Program the max register to clamp values > 1.0. */
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
+
+ /*
+ * Program the gc max 2 register to clamp values > 1.0.
+ * ToDo: Extend the ABI to be able to program values
+ * from 3.0 to 7.0
+ */
+ if (DISPLAY_VER(dev_priv) >= 10) {
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 0),
+ 1 << 16);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 1),
+ 1 << 16);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 2),
+ 1 << 16);
+ }
+}
+
+static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
+ const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ ilk_load_lut_8(crtc, blob);
+ break;
+ case GAMMA_MODE_MODE_SPLIT:
+ ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_ext_max(crtc_state);
+ ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(512));
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+ ivb_load_lut_10(crtc, blob,
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_ext_max(crtc_state);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
+}
+
+static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
+ const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ ilk_load_lut_8(crtc, blob);
+ break;
+ case GAMMA_MODE_MODE_SPLIT:
+ bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_ext_max(crtc_state);
+ bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(512));
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+
+ bdw_load_lut_10(crtc, blob,
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_ext_max(crtc_state);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
+}
+
+static int glk_degamma_lut_size(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 13)
+ return 131;
+ else
+ return 35;
+}
+
+static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.degamma_lut_size;
+ const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data;
+
+ /*
+ * When setting the auto-increment bit, the hardware seems to
+ * ignore the index bits, so we need to reset it to index 0
+ * separately.
+ */
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_AUTO_INCREMENT);
+
+ for (i = 0; i < lut_size; i++) {
+ /*
+ * First lut_size entries represent range from 0 to 1.0
+ * 3 additional lut entries will represent extended range
+ * inputs 3.0 and 7.0 respectively, currently clamped
+ * at 1.0. Since the precision is 16bit, the user
+ * value can be directly filled to register.
+ * The pipe degamma table in GLK+ onwards doesn't
+ * support different values per channel, so this just
+ * programs green value which will be equal to Red and
+ * Blue into the lut registers.
+ * ToDo: Extend to max 7.0. Enable 32 bit input value
+ * as compared to just 16 to achieve this.
+ */
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe),
+ lut[i].green);
+ }
+
+ /* Clamp values > 1.0. */
+ while (i++ < glk_degamma_lut_size(dev_priv))
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+}
+
+static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.degamma_lut_size;
+
+ /*
+ * When setting the auto-increment bit, the hardware seems to
+ * ignore the index bits, so we need to reset it to index 0
+ * separately.
+ */
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_AUTO_INCREMENT);
+
+ for (i = 0; i < lut_size; i++) {
+ u32 v = (i << 16) / (lut_size - 1);
+
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), v);
+ }
+
+ /* Clamp values > 1.0. */
+ while (i++ < 35)
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+}
+
+static void glk_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ /*
+ * On GLK+ both pipe CSC and degamma LUT are controlled
+ * by csc_enable. Hence for the cases where the CSC is
+ * needed but degamma LUT is not we need to load a
+ * linear degamma LUT. In fact we'll just always load
+ * the degama LUT so that we don't have to reload
+ * it every time the pipe CSC is being enabled.
+ */
+ if (crtc_state->hw.degamma_lut)
+ glk_load_degamma_lut(crtc_state);
+ else
+ glk_load_degamma_lut_linear(crtc_state);
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ ilk_load_lut_8(crtc, gamma_lut);
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+ bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_ext_max(crtc_state);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
+}
+
+/* ilk+ "12.4" interpolated format (high 10 bits) */
+static u32 ilk_lut_12p4_udw(const struct drm_color_lut *color)
+{
+ return (color->red >> 6) << 20 | (color->green >> 6) << 10 |
+ (color->blue >> 6);
+}
+
+/* ilk+ "12.4" interpolated format (low 6 bits) */
+static u32 ilk_lut_12p4_ldw(const struct drm_color_lut *color)
+{
+ return (color->red & 0x3f) << 24 | (color->green & 0x3f) << 14 |
+ (color->blue & 0x3f) << 4;
+}
+
+static void
+icl_load_gcmax(const struct intel_crtc_state *crtc_state,
+ const struct drm_color_lut *color)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+
+ /* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */
+ intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 0), color->red);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 1), color->green);
+ intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 2), color->blue);
+}
+
+static void
+icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
+ const struct drm_color_lut *lut = blob->data;
+ enum pipe pipe = crtc->pipe;
+ int i;
+
+ /*
+ * Program Super Fine segment (let's call it seg1)...
+ *
+ * Super Fine segment's step is 1/(8 * 128 * 256) and it has
+ * 9 entries, corresponding to values 0, 1/(8 * 128 * 256),
+ * 2/(8 * 128 * 256) ... 8/(8 * 128 * 256).
+ */
+ intel_dsb_reg_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < 9; i++) {
+ const struct drm_color_lut *entry = &lut[i];
+
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
+ }
+}
+
+static void
+icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
+ const struct drm_color_lut *lut = blob->data;
+ const struct drm_color_lut *entry;
+ enum pipe pipe = crtc->pipe;
+ int i;
+
+ /*
+ * Program Fine segment (let's call it seg2)...
+ *
+ * Fine segment's step is 1/(128 * 256) i.e. 1/(128 * 256), 2/(128 * 256)
+ * ... 256/(128 * 256). So in order to program fine segment of LUT we
+ * need to pick every 8th entry in the LUT, and program 256 indexes.
+ *
+ * PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
+ * seg2[0] being unused by the hardware.
+ */
+ intel_dsb_reg_write(crtc_state, PREC_PAL_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT);
+ for (i = 1; i < 257; i++) {
+ entry = &lut[i * 8];
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
+ }
+
+ /*
+ * Program Coarse segment (let's call it seg3)...
+ *
+ * Coarse segment starts from index 0 and it's step is 1/256 ie 0,
+ * 1/256, 2/256 ... 256/256. As per the description of each entry in LUT
+ * above, we need to pick every (8 * 128)th entry in LUT, and
+ * program 256 of those.
+ *
+ * Spec is not very clear about if entries seg3[0] and seg3[1] are
+ * being used or not, but we still need to program these to advance
+ * the index.
+ */
+ for (i = 0; i < 256; i++) {
+ entry = &lut[i * 8 * 128];
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
+ }
+
+ /* The last entry in the LUT is to be programmed in GCMAX */
+ entry = &lut[256 * 8 * 128];
+ icl_load_gcmax(crtc_state, entry);
+ ivb_load_lut_ext_max(crtc_state);
+}
+
+static void icl_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc_state->hw.degamma_lut)
+ glk_load_degamma_lut(crtc_state);
+
+ switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
+ case GAMMA_MODE_MODE_8BIT:
+ ilk_load_lut_8(crtc, gamma_lut);
+ break;
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ icl_program_gamma_superfine_segment(crtc_state);
+ icl_program_gamma_multi_segment(crtc_state);
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+ bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_ext_max(crtc_state);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
+
+ intel_dsb_commit(crtc_state);
+}
+
+static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->green, 14) << 16 |
+ drm_color_lut_extract(color->blue, 14);
+}
+
+static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 14);
+}
+
+static void chv_load_cgm_degamma(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
+
+ for (i = 0; i < lut_size; i++) {
+ intel_de_write_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0),
+ chv_cgm_degamma_ldw(&lut[i]));
+ intel_de_write_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1),
+ chv_cgm_degamma_udw(&lut[i]));
+ }
+}
+
+static u32 chv_cgm_gamma_ldw(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->green, 10) << 16 |
+ drm_color_lut_extract(color->blue, 10);
+}
+
+static u32 chv_cgm_gamma_udw(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 10);
+}
+
+static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_MASK, ldw), 10);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_MASK, ldw), 10);
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_MASK, udw), 10);
+}
+
+static void chv_load_cgm_gamma(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
+
+ for (i = 0; i < lut_size; i++) {
+ intel_de_write_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0),
+ chv_cgm_gamma_ldw(&lut[i]));
+ intel_de_write_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1),
+ chv_cgm_gamma_udw(&lut[i]));
+ }
+}
+
+static void chv_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *ctm = crtc_state->hw.ctm;
+
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC)
+ chv_load_cgm_csc(crtc, ctm);
+
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA)
+ chv_load_cgm_degamma(crtc, degamma_lut);
+
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
+ chv_load_cgm_gamma(crtc, gamma_lut);
+ else
+ i965_load_luts(crtc_state);
+
+ intel_de_write_fw(dev_priv, CGM_PIPE_MODE(crtc->pipe),
+ crtc_state->cgm_mode);
+}
+
+void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ dev_priv->display.funcs.color->load_luts(crtc_state);
+}
+
+void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (dev_priv->display.funcs.color->color_commit_noarm)
+ dev_priv->display.funcs.color->color_commit_noarm(crtc_state);
+}
+
+void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ dev_priv->display.funcs.color->color_commit_arm(crtc_state);
+}
+
+static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->uapi.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ return !old_crtc_state->hw.gamma_lut &&
+ !old_crtc_state->hw.degamma_lut;
+}
+
+static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->uapi.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ /*
+ * CGM_PIPE_MODE is itself single buffered. We'd have to
+ * somehow split it out from chv_load_luts() if we wanted
+ * the ability to preload the CGM LUTs/CSC without tearing.
+ */
+ if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode)
+ return false;
+
+ return !old_crtc_state->hw.gamma_lut;
+}
+
+static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->uapi.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ /*
+ * The hardware degamma is active whenever the pipe
+ * CSC is active. Thus even if the old state has no
+ * software degamma we need to avoid clobbering the
+ * linear hardware degamma mid scanout.
+ */
+ return !old_crtc_state->csc_enable &&
+ !old_crtc_state->hw.gamma_lut;
+}
+
+int intel_color_check(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ return dev_priv->display.funcs.color->color_check(crtc_state);
+}
+
+void intel_color_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (dev_priv->display.funcs.color->read_luts)
+ dev_priv->display.funcs.color->read_luts(crtc_state);
+}
+
+static bool need_plane_update(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+ /*
+ * On pre-SKL the pipe gamma enable and pipe csc enable for
+ * the pipe bottom color are configured via the primary plane.
+ * We have to reconfigure that even if the plane is inactive.
+ */
+ return crtc_state->active_planes & BIT(plane->id) ||
+ (DISPLAY_VER(dev_priv) < 9 &&
+ plane->id == PLANE_PRIMARY);
+}
+
+static int
+intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->uapi.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_plane *plane;
+
+ if (!new_crtc_state->hw.active ||
+ drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi))
+ return 0;
+
+ if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable &&
+ new_crtc_state->csc_enable == old_crtc_state->csc_enable)
+ return 0;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+
+ if (!need_plane_update(plane, new_crtc_state))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane->id);
+ }
+
+ return 0;
+}
+
+static int check_lut_size(const struct drm_property_blob *lut, int expected)
+{
+ int len;
+
+ if (!lut)
+ return 0;
+
+ len = drm_color_lut_size(lut);
+ if (len != expected) {
+ DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n",
+ len, expected);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
+ int gamma_length, degamma_length;
+ u32 gamma_tests, degamma_tests;
+
+ /* Always allow legacy gamma LUT with no further checking. */
+ if (crtc_state_is_legacy_gamma(crtc_state))
+ return 0;
+
+ /* C8 relies on its palette being stored in the legacy LUT */
+ if (crtc_state->c8_planes) {
+ drm_dbg_kms(&dev_priv->drm,
+ "C8 pixelformat requires the legacy LUT\n");
+ return -EINVAL;
+ }
+
+ degamma_length = INTEL_INFO(dev_priv)->display.color.degamma_lut_size;
+ gamma_length = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
+ degamma_tests = INTEL_INFO(dev_priv)->display.color.degamma_lut_tests;
+ gamma_tests = INTEL_INFO(dev_priv)->display.color.gamma_lut_tests;
+
+ if (check_lut_size(degamma_lut, degamma_length) ||
+ check_lut_size(gamma_lut, gamma_length))
+ return -EINVAL;
+
+ if (drm_color_lut_check(degamma_lut, degamma_tests) ||
+ drm_color_lut_check(gamma_lut, gamma_tests))
+ return -EINVAL;
+
+ return 0;
+}
+
+static u32 i9xx_gamma_mode(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
+ else
+ return GAMMA_MODE_MODE_10BIT; /* i965+ only */
+}
+
+static int i9xx_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_enable =
+ crtc_state->hw.gamma_lut &&
+ !crtc_state->c8_planes;
+
+ crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state);
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
+ return 0;
+}
+
+static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
+{
+ u32 cgm_mode = 0;
+
+ if (crtc_state_is_legacy_gamma(crtc_state))
+ return 0;
+
+ if (crtc_state->hw.degamma_lut)
+ cgm_mode |= CGM_PIPE_MODE_DEGAMMA;
+ if (crtc_state->hw.ctm)
+ cgm_mode |= CGM_PIPE_MODE_CSC;
+ if (crtc_state->hw.gamma_lut)
+ cgm_mode |= CGM_PIPE_MODE_GAMMA;
+
+ return cgm_mode;
+}
+
+/*
+ * CHV color pipeline:
+ * u0.10 -> CGM degamma -> u0.14 -> CGM csc -> u0.14 -> CGM gamma ->
+ * u0.10 -> WGC csc -> u0.10 -> pipe gamma -> u0.10
+ *
+ * We always bypass the WGC csc and use the CGM csc
+ * instead since it has degamma and better precision.
+ */
+static int chv_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ /*
+ * Pipe gamma will be used only for the legacy LUT.
+ * Otherwise we bypass it and use the CGM gamma instead.
+ */
+ crtc_state->gamma_enable =
+ crtc_state_is_legacy_gamma(crtc_state) &&
+ !crtc_state->c8_planes;
+
+ crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+
+ crtc_state->cgm_mode = chv_cgm_mode(crtc_state);
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->preload_luts = chv_can_preload_luts(crtc_state);
+
+ return 0;
+}
+
+static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
+ else
+ return GAMMA_MODE_MODE_10BIT;
+}
+
+static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * CSC comes after the LUT in RGB->YCbCr mode.
+ * RGB->YCbCr needs the limited range offsets added to
+ * the output. RGB limited range output is handled by
+ * the hw automagically elsewhere.
+ */
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ return CSC_BLACK_SCREEN_OFFSET;
+
+ return CSC_MODE_YUV_TO_RGB |
+ CSC_POSITION_BEFORE_GAMMA;
+}
+
+static int ilk_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_enable =
+ crtc_state->hw.gamma_lut &&
+ !crtc_state->c8_planes;
+
+ /*
+ * We don't expose the ctm on ilk/snb currently, also RGB
+ * limited range output is handled by the hw automagically.
+ */
+ crtc_state->csc_enable =
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB;
+
+ crtc_state->gamma_mode = ilk_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = ilk_csc_mode(crtc_state);
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
+ return 0;
+}
+
+static u32 ivb_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
+ else if (crtc_state->hw.gamma_lut &&
+ crtc_state->hw.degamma_lut)
+ return GAMMA_MODE_MODE_SPLIT;
+ else
+ return GAMMA_MODE_MODE_10BIT;
+}
+
+static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
+{
+ bool limited_color_range = ilk_csc_limited_range(crtc_state);
+
+ /*
+ * CSC comes after the LUT in degamma, RGB->YCbCr,
+ * and RGB full->limited range mode.
+ */
+ if (crtc_state->hw.degamma_lut ||
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ limited_color_range)
+ return 0;
+
+ return CSC_POSITION_BEFORE_GAMMA;
+}
+
+static int ivb_color_check(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ bool limited_color_range = ilk_csc_limited_range(crtc_state);
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
+ crtc_state->hw.ctm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "YCBCR and CTM together are not possible\n");
+ return -EINVAL;
+ }
+
+ crtc_state->gamma_enable =
+ (crtc_state->hw.gamma_lut ||
+ crtc_state->hw.degamma_lut) &&
+ !crtc_state->c8_planes;
+
+ crtc_state->csc_enable =
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ crtc_state->hw.ctm || limited_color_range;
+
+ crtc_state->gamma_mode = ivb_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = ivb_csc_mode(crtc_state);
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
+ return 0;
+}
+
+static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
+ else
+ return GAMMA_MODE_MODE_10BIT;
+}
+
+static int glk_color_check(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
+ crtc_state->hw.ctm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "YCBCR and CTM together are not possible\n");
+ return -EINVAL;
+ }
+
+ crtc_state->gamma_enable =
+ crtc_state->hw.gamma_lut &&
+ !crtc_state->c8_planes;
+
+ /* On GLK+ degamma LUT is controlled by csc_enable */
+ crtc_state->csc_enable =
+ crtc_state->hw.degamma_lut ||
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ crtc_state->hw.ctm || crtc_state->limited_color_range;
+
+ crtc_state->gamma_mode = glk_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = 0;
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->preload_luts = glk_can_preload_luts(crtc_state);
+
+ return 0;
+}
+
+static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ u32 gamma_mode = 0;
+
+ if (crtc_state->hw.degamma_lut)
+ gamma_mode |= PRE_CSC_GAMMA_ENABLE;
+
+ if (crtc_state->hw.gamma_lut &&
+ !crtc_state->c8_planes)
+ gamma_mode |= POST_CSC_GAMMA_ENABLE;
+
+ if (!crtc_state->hw.gamma_lut ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ gamma_mode |= GAMMA_MODE_MODE_8BIT;
+ /*
+ * Enable 10bit gamma for D13
+ * ToDo: Extend to Logarithmic Gamma once the new UAPI
+ * is accepted and implemented by a userspace consumer
+ */
+ else if (DISPLAY_VER(i915) >= 13)
+ gamma_mode |= GAMMA_MODE_MODE_10BIT;
+ else
+ gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED;
+
+ return gamma_mode;
+}
+
+static u32 icl_csc_mode(const struct intel_crtc_state *crtc_state)
+{
+ u32 csc_mode = 0;
+
+ if (crtc_state->hw.ctm)
+ csc_mode |= ICL_CSC_ENABLE;
+
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ crtc_state->limited_color_range)
+ csc_mode |= ICL_OUTPUT_CSC_ENABLE;
+
+ return csc_mode;
+}
+
+static int icl_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_mode = icl_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = icl_csc_mode(crtc_state);
+
+ crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
+
+ return 0;
+}
+
+static int i9xx_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return 0;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 16;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return 0;
+
+ if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
+ return 0;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 10;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+static int chv_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
+ return 10;
+ else
+ return i9xx_gamma_precision(crtc_state);
+}
+
+static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return 0;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 10;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+static int icl_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
+ return 0;
+
+ switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 10;
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ return 16;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv))
+ return chv_gamma_precision(crtc_state);
+ else
+ return i9xx_gamma_precision(crtc_state);
+ } else {
+ if (DISPLAY_VER(dev_priv) >= 11)
+ return icl_gamma_precision(crtc_state);
+ else if (DISPLAY_VER(dev_priv) == 10)
+ return glk_gamma_precision(crtc_state);
+ else if (IS_IRONLAKE(dev_priv))
+ return ilk_gamma_precision(crtc_state);
+ }
+
+ return 0;
+}
+
+static bool err_check(struct drm_color_lut *lut1,
+ struct drm_color_lut *lut2, u32 err)
+{
+ return ((abs((long)lut2->red - lut1->red)) <= err) &&
+ ((abs((long)lut2->blue - lut1->blue)) <= err) &&
+ ((abs((long)lut2->green - lut1->green)) <= err);
+}
+
+static bool intel_color_lut_entries_equal(struct drm_color_lut *lut1,
+ struct drm_color_lut *lut2,
+ int lut_size, u32 err)
+{
+ int i;
+
+ for (i = 0; i < lut_size; i++) {
+ if (!err_check(&lut1[i], &lut2[i], err))
+ return false;
+ }
+
+ return true;
+}
+
+bool intel_color_lut_equal(struct drm_property_blob *blob1,
+ struct drm_property_blob *blob2,
+ u32 gamma_mode, u32 bit_precision)
+{
+ struct drm_color_lut *lut1, *lut2;
+ int lut_size1, lut_size2;
+ u32 err;
+
+ if (!blob1 != !blob2)
+ return false;
+
+ if (!blob1)
+ return true;
+
+ lut_size1 = drm_color_lut_size(blob1);
+ lut_size2 = drm_color_lut_size(blob2);
+
+ /* check sw and hw lut size */
+ if (lut_size1 != lut_size2)
+ return false;
+
+ lut1 = blob1->data;
+ lut2 = blob2->data;
+
+ err = 0xffff >> bit_precision;
+
+ /* check sw and hw lut entry to be equal */
+ switch (gamma_mode & GAMMA_MODE_MODE_MASK) {
+ case GAMMA_MODE_MODE_8BIT:
+ case GAMMA_MODE_MODE_10BIT:
+ if (!intel_color_lut_entries_equal(lut1, lut2,
+ lut_size2, err))
+ return false;
+ break;
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ if (!intel_color_lut_entries_equal(lut1, lut2,
+ 9, err))
+ return false;
+ break;
+ default:
+ MISSING_CASE(gamma_mode);
+ return false;
+ }
+
+ return true;
+}
+
+static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+ int i;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
+ u32 val = intel_de_read_fw(dev_priv, PALETTE(pipe, i));
+
+ i9xx_lut_8_pack(&lut[i], val);
+ }
+
+ return blob;
+}
+
+static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (!crtc_state->gamma_enable)
+ return;
+
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
+}
+
+static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < lut_size - 1; i++) {
+ u32 ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0));
+ u32 udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1));
+
+ i965_lut_10p6_pack(&lut[i], ldw, udw);
+ }
+
+ lut[i].red = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 0)));
+ lut[i].green = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 1)));
+ lut[i].blue = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 2)));
+
+ return blob;
+}
+
+static void i965_read_luts(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (!crtc_state->gamma_enable)
+ return;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
+ else
+ crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc);
+}
+
+static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < lut_size; i++) {
+ u32 ldw = intel_de_read_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0));
+ u32 udw = intel_de_read_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1));
+
+ chv_cgm_gamma_pack(&lut[i], ldw, udw);
+ }
+
+ return blob;
+}
+
+static void chv_read_luts(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
+ crtc_state->hw.gamma_lut = chv_read_cgm_gamma(crtc);
+ else
+ i965_read_luts(crtc_state);
+}
+
+static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+ int i;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
+ u32 val = intel_de_read_fw(dev_priv, LGC_PALETTE(pipe, i));
+
+ i9xx_lut_8_pack(&lut[i], val);
+ }
+
+ return blob;
+}
+
+static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < lut_size; i++) {
+ u32 val = intel_de_read_fw(dev_priv, PREC_PALETTE(pipe, i));
+
+ ilk_lut_10_pack(&lut[i], val);
+ }
+
+ return blob;
+}
+
+static void ilk_read_luts(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (!crtc_state->gamma_enable)
+ return;
+
+ if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
+ return;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+ crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
+}
+
+/* On BDW+ the index auto increment mode actually works */
+static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc,
+ u32 prec_index)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, hw_lut_size = ivb_lut_10_size(prec_index);
+ int lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+
+ drm_WARN_ON(&dev_priv->drm, lut_size != hw_lut_size);
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe),
+ prec_index | PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < lut_size; i++) {
+ u32 val = intel_de_read_fw(dev_priv, PREC_PAL_DATA(pipe));
+
+ ilk_lut_10_pack(&lut[i], val);
+ }
+
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0);
+
+ return blob;
+}
+
+static void glk_read_luts(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (!crtc_state->gamma_enable)
+ return;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+ crtc_state->hw.gamma_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
+}
+
+static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /*
+ * We don't (yet) allow userspace to control the pipe background color,
+ * so force it to black.
+ */
+ intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0);
+
+ intel_de_write(i915, GAMMA_MODE(crtc->pipe),
+ crtc_state->gamma_mode);
+
+ intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
+}
+
+static struct drm_property_blob *
+icl_read_lut_multi_segment(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ intel_de_write_fw(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < 9; i++) {
+ u32 ldw = intel_de_read_fw(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
+ u32 udw = intel_de_read_fw(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
+
+ icl_lut_multi_seg_pack(&lut[i], ldw, udw);
+ }
+
+ intel_de_write_fw(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0);
+
+ /*
+ * FIXME readouts from PAL_PREC_DATA register aren't giving
+ * correct values in the case of fine and coarse segments.
+ * Restricting readouts only for super fine segment as of now.
+ */
+
+ return blob;
+}
+
+static void icl_read_luts(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
+ return;
+
+ switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
+ case GAMMA_MODE_MODE_8BIT:
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+ crtc_state->hw.gamma_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
+ break;
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ crtc_state->hw.gamma_lut = icl_read_lut_multi_segment(crtc);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
+}
+
+static const struct intel_color_funcs chv_color_funcs = {
+ .color_check = chv_color_check,
+ .color_commit_arm = i9xx_color_commit_arm,
+ .load_luts = chv_load_luts,
+ .read_luts = chv_read_luts,
+};
+
+static const struct intel_color_funcs i965_color_funcs = {
+ .color_check = i9xx_color_check,
+ .color_commit_arm = i9xx_color_commit_arm,
+ .load_luts = i965_load_luts,
+ .read_luts = i965_read_luts,
+};
+
+static const struct intel_color_funcs i9xx_color_funcs = {
+ .color_check = i9xx_color_check,
+ .color_commit_arm = i9xx_color_commit_arm,
+ .load_luts = i9xx_load_luts,
+ .read_luts = i9xx_read_luts,
+};
+
+static const struct intel_color_funcs icl_color_funcs = {
+ .color_check = icl_color_check,
+ .color_commit_noarm = icl_color_commit_noarm,
+ .color_commit_arm = icl_color_commit_arm,
+ .load_luts = icl_load_luts,
+ .read_luts = icl_read_luts,
+};
+
+static const struct intel_color_funcs glk_color_funcs = {
+ .color_check = glk_color_check,
+ .color_commit_noarm = skl_color_commit_noarm,
+ .color_commit_arm = skl_color_commit_arm,
+ .load_luts = glk_load_luts,
+ .read_luts = glk_read_luts,
+};
+
+static const struct intel_color_funcs skl_color_funcs = {
+ .color_check = ivb_color_check,
+ .color_commit_noarm = skl_color_commit_noarm,
+ .color_commit_arm = skl_color_commit_arm,
+ .load_luts = bdw_load_luts,
+ .read_luts = NULL,
+};
+
+static const struct intel_color_funcs bdw_color_funcs = {
+ .color_check = ivb_color_check,
+ .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_arm = hsw_color_commit_arm,
+ .load_luts = bdw_load_luts,
+ .read_luts = NULL,
+};
+
+static const struct intel_color_funcs hsw_color_funcs = {
+ .color_check = ivb_color_check,
+ .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_arm = hsw_color_commit_arm,
+ .load_luts = ivb_load_luts,
+ .read_luts = NULL,
+};
+
+static const struct intel_color_funcs ivb_color_funcs = {
+ .color_check = ivb_color_check,
+ .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_arm = ilk_color_commit_arm,
+ .load_luts = ivb_load_luts,
+ .read_luts = NULL,
+};
+
+static const struct intel_color_funcs ilk_color_funcs = {
+ .color_check = ilk_color_check,
+ .color_commit_noarm = ilk_color_commit_noarm,
+ .color_commit_arm = ilk_color_commit_arm,
+ .load_luts = ilk_load_luts,
+ .read_luts = ilk_read_luts,
+};
+
+void intel_color_init(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ bool has_ctm = INTEL_INFO(dev_priv)->display.color.degamma_lut_size != 0;
+
+ drm_mode_crtc_set_gamma_size(&crtc->base, 256);
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.funcs.color = &chv_color_funcs;
+ } else if (DISPLAY_VER(dev_priv) >= 4) {
+ dev_priv->display.funcs.color = &i965_color_funcs;
+ } else {
+ dev_priv->display.funcs.color = &i9xx_color_funcs;
+ }
+ } else {
+ if (DISPLAY_VER(dev_priv) >= 11)
+ dev_priv->display.funcs.color = &icl_color_funcs;
+ else if (DISPLAY_VER(dev_priv) == 10)
+ dev_priv->display.funcs.color = &glk_color_funcs;
+ else if (DISPLAY_VER(dev_priv) == 9)
+ dev_priv->display.funcs.color = &skl_color_funcs;
+ else if (DISPLAY_VER(dev_priv) == 8)
+ dev_priv->display.funcs.color = &bdw_color_funcs;
+ else if (DISPLAY_VER(dev_priv) == 7) {
+ if (IS_HASWELL(dev_priv))
+ dev_priv->display.funcs.color = &hsw_color_funcs;
+ else
+ dev_priv->display.funcs.color = &ivb_color_funcs;
+ } else
+ dev_priv->display.funcs.color = &ilk_color_funcs;
+ }
+
+ drm_crtc_enable_color_mgmt(&crtc->base,
+ INTEL_INFO(dev_priv)->display.color.degamma_lut_size,
+ has_ctm,
+ INTEL_INFO(dev_priv)->display.color.gamma_lut_size);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
new file mode 100644
index 000000000..fd873425e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_COLOR_H__
+#define __INTEL_COLOR_H__
+
+#include <linux/types.h>
+
+struct intel_crtc_state;
+struct intel_crtc;
+struct drm_property_blob;
+
+void intel_color_init(struct intel_crtc *crtc);
+int intel_color_check(struct intel_crtc_state *crtc_state);
+void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state);
+void intel_color_commit_arm(const struct intel_crtc_state *crtc_state);
+void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
+void intel_color_get_config(struct intel_crtc_state *crtc_state);
+int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state);
+bool intel_color_lut_equal(struct drm_property_blob *blob1,
+ struct drm_property_blob *blob2,
+ u32 gamma_mode, u32 bit_precision);
+
+#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
new file mode 100644
index 000000000..64890f39c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "intel_combo_phy.h"
+#include "intel_combo_phy_regs.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+
+#define for_each_combo_phy(__dev_priv, __phy) \
+ for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
+ for_each_if(intel_phy_is_combo(__dev_priv, __phy))
+
+#define for_each_combo_phy_reverse(__dev_priv, __phy) \
+ for ((__phy) = I915_MAX_PHYS; (__phy)-- > PHY_A;) \
+ for_each_if(intel_phy_is_combo(__dev_priv, __phy))
+
+enum {
+ PROCMON_0_85V_DOT_0,
+ PROCMON_0_95V_DOT_0,
+ PROCMON_0_95V_DOT_1,
+ PROCMON_1_05V_DOT_0,
+ PROCMON_1_05V_DOT_1,
+};
+
+static const struct icl_procmon {
+ const char *name;
+ u32 dw1, dw9, dw10;
+} icl_procmon_values[] = {
+ [PROCMON_0_85V_DOT_0] = {
+ .name = "0.85V dot0 (low-voltage)",
+ .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96,
+ },
+ [PROCMON_0_95V_DOT_0] = {
+ .name = "0.95V dot0",
+ .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB,
+ },
+ [PROCMON_0_95V_DOT_1] = {
+ .name = "0.95V dot1",
+ .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5,
+ },
+ [PROCMON_1_05V_DOT_0] = {
+ .name = "1.05V dot0",
+ .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1,
+ },
+ [PROCMON_1_05V_DOT_1] = {
+ .name = "1.05V dot1",
+ .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1,
+ },
+};
+
+static const struct icl_procmon *
+icl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
+{
+ const struct icl_procmon *procmon;
+ u32 val;
+
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW3(phy));
+ switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
+ default:
+ MISSING_CASE(val);
+ fallthrough;
+ case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
+ procmon = &icl_procmon_values[PROCMON_0_85V_DOT_0];
+ break;
+ case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
+ procmon = &icl_procmon_values[PROCMON_0_95V_DOT_0];
+ break;
+ case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
+ procmon = &icl_procmon_values[PROCMON_0_95V_DOT_1];
+ break;
+ case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
+ procmon = &icl_procmon_values[PROCMON_1_05V_DOT_0];
+ break;
+ case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
+ procmon = &icl_procmon_values[PROCMON_1_05V_DOT_1];
+ break;
+ }
+
+ return procmon;
+}
+
+static void icl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
+ enum phy phy)
+{
+ const struct icl_procmon *procmon;
+ u32 val;
+
+ procmon = icl_get_procmon_ref_values(dev_priv, phy);
+
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW1(phy));
+ val &= ~((0xff << 16) | 0xff);
+ val |= procmon->dw1;
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW1(phy), val);
+
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10);
+}
+
+static bool check_phy_reg(struct drm_i915_private *dev_priv,
+ enum phy phy, i915_reg_t reg, u32 mask,
+ u32 expected_val)
+{
+ u32 val = intel_de_read(dev_priv, reg);
+
+ if ((val & mask) != expected_val) {
+ drm_dbg(&dev_priv->drm,
+ "Combo PHY %c reg %08x state mismatch: "
+ "current %08x mask %08x expected %08x\n",
+ phy_name(phy),
+ reg.reg, val, mask, expected_val);
+ return false;
+ }
+
+ return true;
+}
+
+static bool icl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
+ enum phy phy)
+{
+ const struct icl_procmon *procmon;
+ bool ret;
+
+ procmon = icl_get_procmon_ref_values(dev_priv, phy);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Combo PHY %c Voltage/Process Info : %s\n",
+ phy_name(phy), procmon->name);
+
+ ret = check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW1(phy),
+ (0xff << 16) | 0xff, procmon->dw1);
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW9(phy),
+ -1U, procmon->dw9);
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW10(phy),
+ -1U, procmon->dw10);
+
+ return ret;
+}
+
+static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
+{
+ /*
+ * Some platforms only expect PHY_MISC to be programmed for PHY-A and
+ * PHY-B and may not even have instances of the register for the
+ * other combo PHY's.
+ *
+ * ADL-S technically has three instances of PHY_MISC, but only requires
+ * that we program it for PHY A.
+ */
+
+ if (IS_ALDERLAKE_S(i915))
+ return phy == PHY_A;
+ else if (IS_JSL_EHL(i915) ||
+ IS_ROCKETLAKE(i915) ||
+ IS_DG1(i915))
+ return phy < PHY_C;
+
+ return true;
+}
+
+static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
+ enum phy phy)
+{
+ /* The PHY C added by EHL has no PHY_MISC register */
+ if (!has_phy_misc(dev_priv, phy))
+ return intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
+ else
+ return !(intel_de_read(dev_priv, ICL_PHY_MISC(phy)) &
+ ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
+ (intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
+}
+
+static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915)
+{
+ bool ddi_a_present = intel_bios_is_port_present(i915, PORT_A);
+ bool ddi_d_present = intel_bios_is_port_present(i915, PORT_D);
+ bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
+
+ /*
+ * VBT's 'dvo port' field for child devices references the DDI, not
+ * the PHY. So if combo PHY A is wired up to drive an external
+ * display, we should see a child device present on PORT_D and
+ * nothing on PORT_A and no DSI.
+ */
+ if (ddi_d_present && !ddi_a_present && !dsi_present)
+ return true;
+
+ /*
+ * If we encounter a VBT that claims to have an external display on
+ * DDI-D _and_ an internal display on DDI-A/DSI leave an error message
+ * in the log and let the internal display win.
+ */
+ if (ddi_d_present)
+ drm_err(&i915->drm,
+ "VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n");
+
+ return false;
+}
+
+static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy)
+{
+ /*
+ * Certain PHYs are connected to compensation resistors and act
+ * as masters to other PHYs.
+ *
+ * ICL,TGL:
+ * A(master) -> B(slave), C(slave)
+ * RKL,DG1:
+ * A(master) -> B(slave)
+ * C(master) -> D(slave)
+ * ADL-S:
+ * A(master) -> B(slave), C(slave)
+ * D(master) -> E(slave)
+ *
+ * We must set the IREFGEN bit for any PHY acting as a master
+ * to another PHY.
+ */
+ if (phy == PHY_A)
+ return true;
+ else if (IS_ALDERLAKE_S(dev_priv))
+ return phy == PHY_D;
+ else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ return phy == PHY_C;
+
+ return false;
+}
+
+static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum phy phy)
+{
+ bool ret = true;
+ u32 expected_val = 0;
+
+ if (!icl_combo_phy_enabled(dev_priv, phy))
+ return false;
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN(0, phy),
+ ICL_PORT_TX_DW8_ODCC_CLK_SEL |
+ ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK,
+ ICL_PORT_TX_DW8_ODCC_CLK_SEL |
+ ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2);
+
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN(0, phy),
+ DCC_MODE_SELECT_MASK,
+ DCC_MODE_SELECT_CONTINUOSLY);
+ }
+
+ ret &= icl_verify_procmon_ref_values(dev_priv, phy);
+
+ if (phy_is_master(dev_priv, phy)) {
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
+ IREFGEN, IREFGEN);
+
+ if (IS_JSL_EHL(dev_priv)) {
+ if (ehl_vbt_ddi_d_present(dev_priv))
+ expected_val = ICL_PHY_MISC_MUX_DDID;
+
+ ret &= check_phy_reg(dev_priv, phy, ICL_PHY_MISC(phy),
+ ICL_PHY_MISC_MUX_DDID,
+ expected_val);
+ }
+ }
+
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_CL_DW5(phy),
+ CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
+
+ return ret;
+}
+
+void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
+ enum phy phy, bool is_dsi,
+ int lane_count, bool lane_reversal)
+{
+ u8 lane_mask;
+ u32 val;
+
+ if (is_dsi) {
+ drm_WARN_ON(&dev_priv->drm, lane_reversal);
+
+ switch (lane_count) {
+ case 1:
+ lane_mask = PWR_DOWN_LN_3_1_0;
+ break;
+ case 2:
+ lane_mask = PWR_DOWN_LN_3_1;
+ break;
+ case 3:
+ lane_mask = PWR_DOWN_LN_3;
+ break;
+ default:
+ MISSING_CASE(lane_count);
+ fallthrough;
+ case 4:
+ lane_mask = PWR_UP_ALL_LANES;
+ break;
+ }
+ } else {
+ switch (lane_count) {
+ case 1:
+ lane_mask = lane_reversal ? PWR_DOWN_LN_2_1_0 :
+ PWR_DOWN_LN_3_2_1;
+ break;
+ case 2:
+ lane_mask = lane_reversal ? PWR_DOWN_LN_1_0 :
+ PWR_DOWN_LN_3_2;
+ break;
+ default:
+ MISSING_CASE(lane_count);
+ fallthrough;
+ case 4:
+ lane_mask = PWR_UP_ALL_LANES;
+ break;
+ }
+ }
+
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy));
+ val &= ~PWR_DOWN_LN_MASK;
+ val |= lane_mask;
+ intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val);
+}
+
+static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
+{
+ enum phy phy;
+
+ for_each_combo_phy(dev_priv, phy) {
+ u32 val;
+
+ if (icl_combo_phy_verify_state(dev_priv, phy)) {
+ drm_dbg(&dev_priv->drm,
+ "Combo PHY %c already enabled, won't reprogram it.\n",
+ phy_name(phy));
+ continue;
+ }
+
+ if (!has_phy_misc(dev_priv, phy))
+ goto skip_phy_misc;
+
+ /*
+ * EHL's combo PHY A can be hooked up to either an external
+ * display (via DDI-D) or an internal display (via DDI-A or
+ * the DSI DPHY). This is a motherboard design decision that
+ * can't be changed on the fly, so initialize the PHY's mux
+ * based on whether our VBT indicates the presence of any
+ * "internal" child devices.
+ */
+ val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
+ if (IS_JSL_EHL(dev_priv) && phy == PHY_A) {
+ val &= ~ICL_PHY_MISC_MUX_DDID;
+
+ if (ehl_vbt_ddi_d_present(dev_priv))
+ val |= ICL_PHY_MISC_MUX_DDID;
+ }
+
+ val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+ intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
+
+skip_phy_misc:
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN(0, phy));
+ val &= ~ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK;
+ val |= ICL_PORT_TX_DW8_ODCC_CLK_SEL;
+ val |= ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2;
+ intel_de_write(dev_priv, ICL_PORT_TX_DW8_GRP(phy), val);
+
+ val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
+ val &= ~DCC_MODE_SELECT_MASK;
+ val |= DCC_MODE_SELECT_CONTINUOSLY;
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
+ }
+
+ icl_set_procmon_ref_values(dev_priv, phy);
+
+ if (phy_is_master(dev_priv, phy)) {
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy));
+ val |= IREFGEN;
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val);
+ }
+
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
+ val |= COMP_INIT;
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
+
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
+ val |= CL_POWER_DOWN_ENABLE;
+ intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
+ }
+}
+
+static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+{
+ enum phy phy;
+
+ for_each_combo_phy_reverse(dev_priv, phy) {
+ u32 val;
+
+ if (phy == PHY_A &&
+ !icl_combo_phy_verify_state(dev_priv, phy)) {
+ if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) {
+ /*
+ * A known problem with old ifwi:
+ * https://gitlab.freedesktop.org/drm/intel/-/issues/2411
+ * Suppress the warning for CI. Remove ASAP!
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "Combo PHY %c HW state changed unexpectedly\n",
+ phy_name(phy));
+ } else {
+ drm_warn(&dev_priv->drm,
+ "Combo PHY %c HW state changed unexpectedly\n",
+ phy_name(phy));
+ }
+ }
+
+ if (!has_phy_misc(dev_priv, phy))
+ goto skip_phy_misc;
+
+ val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
+ val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+ intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
+
+skip_phy_misc:
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
+ val &= ~COMP_INIT;
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
+ }
+}
+
+void intel_combo_phy_init(struct drm_i915_private *i915)
+{
+ icl_combo_phys_init(i915);
+}
+
+void intel_combo_phy_uninit(struct drm_i915_private *i915)
+{
+ icl_combo_phys_uninit(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.h b/drivers/gpu/drm/i915/display/intel_combo_phy.h
new file mode 100644
index 000000000..660886f86
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_COMBO_PHY_H__
+#define __INTEL_COMBO_PHY_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+enum phy;
+
+void intel_combo_phy_init(struct drm_i915_private *dev_priv);
+void intel_combo_phy_uninit(struct drm_i915_private *dev_priv);
+void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
+ enum phy phy, bool is_dsi,
+ int lane_count, bool lane_reversal);
+
+#endif /* __INTEL_COMBO_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
new file mode 100644
index 000000000..2ed65193c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_COMBO_PHY_REGS__
+#define __INTEL_COMBO_PHY_REGS__
+
+#include "i915_reg_defs.h"
+
+#define _ICL_COMBOPHY_A 0x162000
+#define _ICL_COMBOPHY_B 0x6C000
+#define _EHL_COMBOPHY_C 0x160000
+#define _RKL_COMBOPHY_D 0x161000
+#define _ADL_COMBOPHY_E 0x16B000
+
+#define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \
+ _ICL_COMBOPHY_B, \
+ _EHL_COMBOPHY_C, \
+ _RKL_COMBOPHY_D, \
+ _ADL_COMBOPHY_E)
+
+/* ICL Port CL_DW registers */
+#define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \
+ 4 * (dw))
+
+#define ICL_PORT_CL_DW5(phy) _MMIO(_ICL_PORT_CL_DW(5, phy))
+#define CL_POWER_DOWN_ENABLE (1 << 4)
+#define SUS_CLOCK_CONFIG (3 << 0)
+
+#define ICL_PORT_CL_DW10(phy) _MMIO(_ICL_PORT_CL_DW(10, phy))
+#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
+#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
+#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
+#define PWR_UP_ALL_LANES (0x0 << 4)
+#define PWR_DOWN_LN_3_2_1 (0xe << 4)
+#define PWR_DOWN_LN_3_2 (0xc << 4)
+#define PWR_DOWN_LN_3 (0x8 << 4)
+#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
+#define PWR_DOWN_LN_1_0 (0x3 << 4)
+#define PWR_DOWN_LN_3_1 (0xa << 4)
+#define PWR_DOWN_LN_3_1_0 (0xb << 4)
+#define PWR_DOWN_LN_MASK (0xf << 4)
+#define PWR_DOWN_LN_SHIFT 4
+#define EDP4K2K_MODE_OVRD_EN (1 << 3)
+#define EDP4K2K_MODE_OVRD_OPTIMIZED (1 << 2)
+
+#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy))
+#define ICL_LANE_ENABLE_AUX (1 << 0)
+
+/* ICL Port COMP_DW registers */
+#define _ICL_PORT_COMP 0x100
+#define _ICL_PORT_COMP_DW(dw, phy) (_ICL_COMBOPHY(phy) + \
+ _ICL_PORT_COMP + 4 * (dw))
+
+#define ICL_PORT_COMP_DW0(phy) _MMIO(_ICL_PORT_COMP_DW(0, phy))
+#define COMP_INIT (1 << 31)
+
+#define ICL_PORT_COMP_DW1(phy) _MMIO(_ICL_PORT_COMP_DW(1, phy))
+
+#define ICL_PORT_COMP_DW3(phy) _MMIO(_ICL_PORT_COMP_DW(3, phy))
+#define PROCESS_INFO_DOT_0 (0 << 26)
+#define PROCESS_INFO_DOT_1 (1 << 26)
+#define PROCESS_INFO_DOT_4 (2 << 26)
+#define PROCESS_INFO_MASK (7 << 26)
+#define PROCESS_INFO_SHIFT 26
+#define VOLTAGE_INFO_0_85V (0 << 24)
+#define VOLTAGE_INFO_0_95V (1 << 24)
+#define VOLTAGE_INFO_1_05V (2 << 24)
+#define VOLTAGE_INFO_MASK (3 << 24)
+#define VOLTAGE_INFO_SHIFT 24
+
+#define ICL_PORT_COMP_DW8(phy) _MMIO(_ICL_PORT_COMP_DW(8, phy))
+#define IREFGEN (1 << 24)
+
+#define ICL_PORT_COMP_DW9(phy) _MMIO(_ICL_PORT_COMP_DW(9, phy))
+
+#define ICL_PORT_COMP_DW10(phy) _MMIO(_ICL_PORT_COMP_DW(10, phy))
+
+/* ICL Port PCS registers */
+#define _ICL_PORT_PCS_AUX 0x300
+#define _ICL_PORT_PCS_GRP 0x600
+#define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100)
+#define _ICL_PORT_PCS_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \
+ _ICL_PORT_PCS_AUX + 4 * (dw))
+#define _ICL_PORT_PCS_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \
+ _ICL_PORT_PCS_GRP + 4 * (dw))
+#define _ICL_PORT_PCS_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \
+ _ICL_PORT_PCS_LN(ln) + 4 * (dw))
+#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy))
+#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy))
+#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy))
+#define DCC_MODE_SELECT_MASK (0x3 << 20)
+#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20)
+#define COMMON_KEEPER_EN (1 << 26)
+#define LATENCY_OPTIM_MASK (0x3 << 2)
+#define LATENCY_OPTIM_VAL(x) ((x) << 2)
+
+/* ICL Port TX registers */
+#define _ICL_PORT_TX_AUX 0x380
+#define _ICL_PORT_TX_GRP 0x680
+#define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100)
+
+#define _ICL_PORT_TX_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \
+ _ICL_PORT_TX_AUX + 4 * (dw))
+#define _ICL_PORT_TX_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \
+ _ICL_PORT_TX_GRP + 4 * (dw))
+#define _ICL_PORT_TX_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \
+ _ICL_PORT_TX_LN(ln) + 4 * (dw))
+
+#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy))
+#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy))
+#define ICL_PORT_TX_DW2_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(2, ln, phy))
+#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
+#define SWING_SEL_UPPER_MASK (1 << 15)
+#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
+#define SWING_SEL_LOWER_MASK (0x7 << 11)
+#define FRC_LATENCY_OPTIM_MASK (0x7 << 8)
+#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8)
+#define RCOMP_SCALAR(x) ((x) << 0)
+#define RCOMP_SCALAR_MASK (0xFF << 0)
+
+#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy))
+#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy))
+#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy))
+#define LOADGEN_SELECT (1 << 31)
+#define POST_CURSOR_1(x) ((x) << 12)
+#define POST_CURSOR_1_MASK (0x3F << 12)
+#define POST_CURSOR_2(x) ((x) << 6)
+#define POST_CURSOR_2_MASK (0x3F << 6)
+#define CURSOR_COEFF(x) ((x) << 0)
+#define CURSOR_COEFF_MASK (0x3F << 0)
+
+#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy))
+#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy))
+#define ICL_PORT_TX_DW5_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(5, ln, phy))
+#define TX_TRAINING_EN (1 << 31)
+#define TAP2_DISABLE (1 << 30)
+#define TAP3_DISABLE (1 << 29)
+#define SCALING_MODE_SEL(x) ((x) << 18)
+#define SCALING_MODE_SEL_MASK (0x7 << 18)
+#define RTERM_SELECT(x) ((x) << 3)
+#define RTERM_SELECT_MASK (0x7 << 3)
+
+#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy))
+#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy))
+#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy))
+#define N_SCALAR(x) ((x) << 24)
+#define N_SCALAR_MASK (0x7F << 24)
+
+#define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy))
+#define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy))
+#define ICL_PORT_TX_DW8_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(8, ln, phy))
+#define ICL_PORT_TX_DW8_ODCC_CLK_SEL REG_BIT(31)
+#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK REG_GENMASK(30, 29)
+#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2 REG_FIELD_PREP(ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, 0x1)
+
+#define _ICL_DPHY_CHKN_REG 0x194
+#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG)
+#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7)
+
+#endif /* __INTEL_COMBO_PHY_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
new file mode 100644
index 000000000..8bb296f3d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007, 2010 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+
+#include "i915_drv.h"
+#include "intel_backlight.h"
+#include "intel_connector.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_types.h"
+#include "intel_hdcp.h"
+#include "intel_panel.h"
+
+int intel_connector_init(struct intel_connector *connector)
+{
+ struct intel_digital_connector_state *conn_state;
+
+ /*
+ * Allocate enough memory to hold intel_digital_connector_state,
+ * This might be a few bytes too many, but for connectors that don't
+ * need it we'll free the state and allocate a smaller one on the first
+ * successful commit anyway.
+ */
+ conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
+ if (!conn_state)
+ return -ENOMEM;
+
+ __drm_atomic_helper_connector_reset(&connector->base,
+ &conn_state->base);
+
+ intel_panel_init_alloc(connector);
+
+ return 0;
+}
+
+struct intel_connector *intel_connector_alloc(void)
+{
+ struct intel_connector *connector;
+
+ connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return NULL;
+
+ if (intel_connector_init(connector) < 0) {
+ kfree(connector);
+ return NULL;
+ }
+
+ return connector;
+}
+
+/*
+ * Free the bits allocated by intel_connector_alloc.
+ * This should only be used after intel_connector_alloc has returned
+ * successfully, and before drm_connector_init returns successfully.
+ * Otherwise the destroy callbacks for the connector and the state should
+ * take care of proper cleanup/free (see intel_connector_destroy).
+ */
+void intel_connector_free(struct intel_connector *connector)
+{
+ kfree(to_intel_digital_connector_state(connector->base.state));
+ kfree(connector);
+}
+
+/*
+ * Connector type independent destroy hook for drm_connector_funcs.
+ */
+void intel_connector_destroy(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ kfree(intel_connector->detect_edid);
+
+ intel_hdcp_cleanup(intel_connector);
+
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ kfree(intel_connector->edid);
+
+ intel_panel_fini(intel_connector);
+
+ drm_connector_cleanup(connector);
+
+ if (intel_connector->port)
+ drm_dp_mst_put_port_malloc(intel_connector->port);
+
+ kfree(connector);
+}
+
+int intel_connector_register(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ int ret;
+
+ ret = intel_backlight_device_register(intel_connector);
+ if (ret)
+ goto err;
+
+ if (i915_inject_probe_failure(to_i915(connector->dev))) {
+ ret = -EFAULT;
+ goto err_backlight;
+ }
+
+ intel_connector_debugfs_add(intel_connector);
+
+ return 0;
+
+err_backlight:
+ intel_backlight_device_unregister(intel_connector);
+err:
+ return ret;
+}
+
+void intel_connector_unregister(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ intel_backlight_device_unregister(intel_connector);
+}
+
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_connector_attach_encoder(&connector->base, &encoder->base);
+}
+
+/*
+ * Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector.
+ */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
+{
+ enum pipe pipe = 0;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+
+ return encoder->get_hw_state(encoder, &pipe);
+}
+
+enum pipe intel_connector_get_pipe(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+
+ drm_WARN_ON(dev,
+ !drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+ if (!connector->base.state->crtc)
+ return INVALID_PIPE;
+
+ return to_intel_crtc(connector->base.state->crtc)->pipe;
+}
+
+/**
+ * intel_connector_update_modes - update connector from edid
+ * @connector: DRM connector device to use
+ * @edid: previously read EDID information
+ */
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ int ret;
+
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+
+ return ret;
+}
+
+/**
+ * intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ * @adapter: i2c adapter
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, adapter);
+ if (!edid)
+ return 0;
+
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
+
+static const struct drm_prop_enum_list force_audio_names[] = {
+ { HDMI_AUDIO_OFF_DVI, "force-dvi" },
+ { HDMI_AUDIO_OFF, "off" },
+ { HDMI_AUDIO_AUTO, "auto" },
+ { HDMI_AUDIO_ON, "on" },
+};
+
+void
+intel_attach_force_audio_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_property *prop;
+
+ prop = dev_priv->display.properties.force_audio;
+ if (prop == NULL) {
+ prop = drm_property_create_enum(dev, 0,
+ "audio",
+ force_audio_names,
+ ARRAY_SIZE(force_audio_names));
+ if (prop == NULL)
+ return;
+
+ dev_priv->display.properties.force_audio = prop;
+ }
+ drm_object_attach_property(&connector->base, prop, 0);
+}
+
+static const struct drm_prop_enum_list broadcast_rgb_names[] = {
+ { INTEL_BROADCAST_RGB_AUTO, "Automatic" },
+ { INTEL_BROADCAST_RGB_FULL, "Full" },
+ { INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
+};
+
+void
+intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_property *prop;
+
+ prop = dev_priv->display.properties.broadcast_rgb;
+ if (prop == NULL) {
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ broadcast_rgb_names,
+ ARRAY_SIZE(broadcast_rgb_names));
+ if (prop == NULL)
+ return;
+
+ dev_priv->display.properties.broadcast_rgb = prop;
+ }
+
+ drm_object_attach_property(&connector->base, prop, 0);
+}
+
+void
+intel_attach_aspect_ratio_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_aspect_ratio_property(connector->dev))
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.aspect_ratio_property,
+ DRM_MODE_PICTURE_ASPECT_NONE);
+}
+
+void
+intel_attach_hdmi_colorspace_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_hdmi_colorspace_property(connector))
+ drm_connector_attach_colorspace_property(connector);
+}
+
+void
+intel_attach_dp_colorspace_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_dp_colorspace_property(connector))
+ drm_connector_attach_colorspace_property(connector);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h
new file mode 100644
index 000000000..661a37a3c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_connector.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONNECTOR_H__
+#define __INTEL_CONNECTOR_H__
+
+#include "intel_display.h"
+
+struct drm_connector;
+struct edid;
+struct i2c_adapter;
+struct intel_connector;
+struct intel_encoder;
+
+int intel_connector_init(struct intel_connector *connector);
+struct intel_connector *intel_connector_alloc(void);
+void intel_connector_free(struct intel_connector *connector);
+void intel_connector_destroy(struct drm_connector *connector);
+int intel_connector_register(struct drm_connector *connector);
+void intel_connector_unregister(struct drm_connector *connector);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+enum pipe intel_connector_get_pipe(struct intel_connector *connector);
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+void intel_attach_aspect_ratio_property(struct drm_connector *connector);
+void intel_attach_hdmi_colorspace_property(struct drm_connector *connector);
+void intel_attach_dp_colorspace_property(struct drm_connector *connector);
+
+#endif /* __INTEL_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
new file mode 100644
index 000000000..e60b2cf84
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -0,0 +1,1124 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
+
+#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_crt.h"
+#include "intel_crtc.h"
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_fdi.h"
+#include "intel_fifo_underrun.h"
+#include "intel_gmbus.h"
+#include "intel_hotplug.h"
+#include "intel_pch_display.h"
+#include "intel_pch_refclk.h"
+
+/* Here's the desired hotplug mode */
+#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
+ ADPA_CRT_HOTPLUG_WARMUP_10MS | \
+ ADPA_CRT_HOTPLUG_SAMPLE_4S | \
+ ADPA_CRT_HOTPLUG_VOLTAGE_50 | \
+ ADPA_CRT_HOTPLUG_VOLREF_325MV | \
+ ADPA_CRT_HOTPLUG_ENABLE)
+
+struct intel_crt {
+ struct intel_encoder base;
+ /* DPMS state is stored in the connector, which we need in the
+ * encoder's enable/disable callbacks */
+ struct intel_connector *connector;
+ bool force_hotplug_required;
+ i915_reg_t adpa_reg;
+};
+
+static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
+{
+ return container_of(encoder, struct intel_crt, base);
+}
+
+static struct intel_crt *intel_attached_crt(struct intel_connector *connector)
+{
+ return intel_encoder_to_crt(intel_attached_encoder(connector));
+}
+
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t adpa_reg, enum pipe *pipe)
+{
+ u32 val;
+
+ val = intel_de_read(dev_priv, adpa_reg);
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+ *pipe = (val & ADPA_PIPE_SEL_MASK_CPT) >> ADPA_PIPE_SEL_SHIFT_CPT;
+ else
+ *pipe = (val & ADPA_PIPE_SEL_MASK) >> ADPA_PIPE_SEL_SHIFT;
+
+ return val & ADPA_DAC_ENABLE;
+}
+
+static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe);
+
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+
+ return ret;
+}
+
+static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 tmp, flags = 0;
+
+ tmp = intel_de_read(dev_priv, crt->adpa_reg);
+
+ if (tmp & ADPA_HSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (tmp & ADPA_VSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ return flags;
+}
+
+static void intel_crt_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
+
+ pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+
+ pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
+static void hsw_crt_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ lpt_pch_get_config(pipe_config);
+
+ hsw_ddi_get_config(encoder, pipe_config);
+
+ pipe_config->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+ DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_NVSYNC);
+ pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+}
+
+/* Note: The caller is required to filter out dpms modes not supported by the
+ * platform. */
+static void intel_crt_set_dpms(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ u32 adpa;
+
+ if (DISPLAY_VER(dev_priv) >= 5)
+ adpa = ADPA_HOTPLUG_BITS;
+ else
+ adpa = 0;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+ /* For CPT allow 3 pipe config, for others just use A or B */
+ if (HAS_PCH_LPT(dev_priv))
+ ; /* Those bits don't exist here */
+ else if (HAS_PCH_CPT(dev_priv))
+ adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe);
+ else
+ adpa |= ADPA_PIPE_SEL(crtc->pipe);
+
+ if (!HAS_PCH_SPLIT(dev_priv))
+ intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ adpa |= ADPA_DAC_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ adpa |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ adpa |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ adpa |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ }
+
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
+}
+
+static void intel_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF);
+}
+
+static void pch_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+}
+
+static void pch_post_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_disable_crt(state, encoder, old_crtc_state, old_conn_state);
+}
+
+static void hsw_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+}
+
+static void hsw_post_disable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_disable_transcoder(old_crtc_state);
+
+ intel_ddi_disable_transcoder_func(old_crtc_state);
+
+ ilk_pfit_disable(old_crtc_state);
+
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
+ pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state);
+
+ lpt_pch_disable(state, crtc);
+
+ hsw_fdi_disable(encoder);
+
+ drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+}
+
+static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+}
+
+static void hsw_pre_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
+
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
+ hsw_fdi_link_train(encoder, crtc_state);
+
+ intel_ddi_enable_pipe_clock(encoder, crtc_state);
+}
+
+static void hsw_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
+
+ intel_ddi_enable_transcoder_func(encoder, crtc_state);
+
+ intel_enable_transcoder(crtc_state);
+
+ lpt_pch_enable(state, crtc);
+
+ intel_crtc_vblank_on(crtc_state);
+
+ intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON);
+
+ intel_crtc_wait_for_next_vblank(crtc);
+ intel_crtc_wait_for_next_vblank(crtc);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+}
+
+static void intel_enable_crt(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON);
+}
+
+static enum drm_mode_status
+intel_crt_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int max_dotclk = dev_priv->max_dotclk_freq;
+ enum drm_mode_status status;
+ int max_clock;
+
+ status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+ if (status != MODE_OK)
+ return status;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (mode->clock < 25000)
+ return MODE_CLOCK_LOW;
+
+ if (HAS_PCH_LPT(dev_priv))
+ max_clock = 180000;
+ else if (IS_VALLEYVIEW(dev_priv))
+ /*
+ * 270 MHz due to current DPLL limits,
+ * DAC limit supposedly 355 MHz.
+ */
+ max_clock = 270000;
+ else if (IS_DISPLAY_VER(dev_priv, 3, 4))
+ max_clock = 400000;
+ else
+ max_clock = 350000;
+ if (mode->clock > max_clock)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
+
+ /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
+ if (HAS_PCH_LPT(dev_priv) &&
+ ilk_get_lanes_required(mode->clock, 270000, 24) > 2)
+ return MODE_CLOCK_HIGH;
+
+ /* HSW/BDW FDI limited to 4k */
+ if (mode->hdisplay > 4096)
+ return MODE_H_ILLEGAL;
+
+ return MODE_OK;
+}
+
+static int intel_crt_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return -EINVAL;
+
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
+ return 0;
+}
+
+static int pch_crt_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return -EINVAL;
+
+ pipe_config->has_pch_encoder = true;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
+ return 0;
+}
+
+static int hsw_crt_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return -EINVAL;
+
+ /* HSW/BDW FDI limited to 4k */
+ if (adjusted_mode->crtc_hdisplay > 4096 ||
+ adjusted_mode->crtc_hblank_start > 4096)
+ return -EINVAL;
+
+ pipe_config->has_pch_encoder = true;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
+ /* LPT FDI RX only supports 8bpc. */
+ if (HAS_PCH_LPT(dev_priv)) {
+ if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
+ drm_dbg_kms(&dev_priv->drm,
+ "LPT only supports 24bpp\n");
+ return -EINVAL;
+ }
+
+ pipe_config->pipe_bpp = 24;
+ }
+
+ /* FDI must always be 2.7 GHz */
+ pipe_config->port_clock = 135000 * 2;
+
+ adjusted_mode->crtc_clock = lpt_iclkip(pipe_config);
+
+ return 0;
+}
+
+static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 adpa;
+ bool ret;
+
+ /* The first time through, trigger an explicit detection cycle */
+ if (crt->force_hotplug_required) {
+ bool turn_off_dac = HAS_PCH_SPLIT(dev_priv);
+ u32 save_adpa;
+
+ crt->force_hotplug_required = false;
+
+ save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg);
+ drm_dbg_kms(&dev_priv->drm,
+ "trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+ adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+ if (turn_off_dac)
+ adpa &= ~ADPA_DAC_ENABLE;
+
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
+
+ if (intel_de_wait_for_clear(dev_priv,
+ crt->adpa_reg,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
+ 1000))
+ drm_dbg_kms(&dev_priv->drm,
+ "timed out waiting for FORCE_TRIGGER");
+
+ if (turn_off_dac) {
+ intel_de_write(dev_priv, crt->adpa_reg, save_adpa);
+ intel_de_posting_read(dev_priv, crt->adpa_reg);
+ }
+ }
+
+ /* Check the status to see if both blue and green are on now */
+ adpa = intel_de_read(dev_priv, crt->adpa_reg);
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
+ ret = true;
+ else
+ ret = false;
+ drm_dbg_kms(&dev_priv->drm, "ironlake hotplug adpa=0x%x, result %d\n",
+ adpa, ret);
+
+ return ret;
+}
+
+static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ bool reenable_hpd;
+ u32 adpa;
+ bool ret;
+ u32 save_adpa;
+
+ /*
+ * Doing a force trigger causes a hpd interrupt to get sent, which can
+ * get us stuck in a loop if we're polling:
+ * - We enable power wells and reset the ADPA
+ * - output_poll_exec does force probe on VGA, triggering a hpd
+ * - HPD handler waits for poll to unlock dev->mode_config.mutex
+ * - output_poll_exec shuts off the ADPA, unlocks
+ * dev->mode_config.mutex
+ * - HPD handler runs, resets ADPA and brings us back to the start
+ *
+ * Just disable HPD interrupts here to prevent this
+ */
+ reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
+
+ save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg);
+ drm_dbg_kms(&dev_priv->drm,
+ "trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+ adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
+
+ if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "timed out waiting for FORCE_TRIGGER");
+ intel_de_write(dev_priv, crt->adpa_reg, save_adpa);
+ }
+
+ /* Check the status to see if both blue and green are on now */
+ adpa = intel_de_read(dev_priv, crt->adpa_reg);
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
+ ret = true;
+ else
+ ret = false;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+
+ if (reenable_hpd)
+ intel_hpd_enable(dev_priv, crt->base.hpd_pin);
+
+ return ret;
+}
+
+static bool intel_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 stat;
+ bool ret = false;
+ int i, tries = 0;
+
+ if (HAS_PCH_SPLIT(dev_priv))
+ return ilk_crt_detect_hotplug(connector);
+
+ if (IS_VALLEYVIEW(dev_priv))
+ return valleyview_crt_detect_hotplug(connector);
+
+ /*
+ * On 4 series desktop, CRT detect sequence need to be done twice
+ * to get a reliable result.
+ */
+
+ if (IS_G45(dev_priv))
+ tries = 2;
+ else
+ tries = 1;
+
+ for (i = 0; i < tries ; i++) {
+ /* turn on the FORCE_DETECT */
+ i915_hotplug_interrupt_update(dev_priv,
+ CRT_HOTPLUG_FORCE_DETECT,
+ CRT_HOTPLUG_FORCE_DETECT);
+ /* wait for FORCE_DETECT to go off */
+ if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN,
+ CRT_HOTPLUG_FORCE_DETECT, 1000))
+ drm_dbg_kms(&dev_priv->drm,
+ "timed out waiting for FORCE_DETECT to go off");
+ }
+
+ stat = intel_de_read(dev_priv, PORT_HOTPLUG_STAT);
+ if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
+ ret = true;
+
+ /* clear the interrupt we just generated, if any */
+ intel_de_write(dev_priv, PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+
+ i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
+
+ return ret;
+}
+
+static struct edid *intel_crt_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *i2c)
+{
+ struct edid *edid;
+
+ edid = drm_get_edid(connector, i2c);
+
+ if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+ drm_dbg_kms(connector->dev,
+ "CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ intel_gmbus_force_bit(i2c, true);
+ edid = drm_get_edid(connector, i2c);
+ intel_gmbus_force_bit(i2c, false);
+ }
+
+ return edid;
+}
+
+/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
+static int intel_crt_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid;
+ int ret;
+
+ edid = intel_crt_get_edid(connector, adapter);
+ if (!edid)
+ return 0;
+
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
+
+static bool intel_crt_detect_ddc(struct drm_connector *connector)
+{
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
+ struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
+ struct edid *edid;
+ struct i2c_adapter *i2c;
+ bool ret = false;
+
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->display.vbt.crt_ddc_pin);
+ edid = intel_crt_get_edid(connector, i2c);
+
+ if (edid) {
+ bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
+
+ /*
+ * This may be a DVI-I connector with a shared DDC
+ * link between analog and digital outputs, so we
+ * have to check the EDID input spec of the attached device.
+ */
+ if (!is_digital) {
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT detected via DDC:0x50 [EDID]\n");
+ ret = true;
+ } else {
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ }
+ } else {
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT not detected via DDC:0x50 [no valid EDID found]\n");
+ }
+
+ kfree(edid);
+
+ return ret;
+}
+
+static enum drm_connector_status
+intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
+{
+ struct drm_device *dev = crt->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
+ u32 save_bclrpat;
+ u32 save_vtotal;
+ u32 vtotal, vactive;
+ u32 vsample;
+ u32 vblank, vblank_start, vblank_end;
+ u32 dsl;
+ i915_reg_t bclrpat_reg, vtotal_reg,
+ vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg;
+ u8 st00;
+ enum drm_connector_status status;
+
+ drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n");
+
+ bclrpat_reg = BCLRPAT(pipe);
+ vtotal_reg = VTOTAL(pipe);
+ vblank_reg = VBLANK(pipe);
+ vsync_reg = VSYNC(pipe);
+ pipeconf_reg = PIPECONF(pipe);
+ pipe_dsl_reg = PIPEDSL(pipe);
+
+ save_bclrpat = intel_uncore_read(uncore, bclrpat_reg);
+ save_vtotal = intel_uncore_read(uncore, vtotal_reg);
+ vblank = intel_uncore_read(uncore, vblank_reg);
+
+ vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
+ vactive = (save_vtotal & 0x7ff) + 1;
+
+ vblank_start = (vblank & 0xfff) + 1;
+ vblank_end = ((vblank >> 16) & 0xfff) + 1;
+
+ /* Set the border color to purple. */
+ intel_uncore_write(uncore, bclrpat_reg, 0x500050);
+
+ if (DISPLAY_VER(dev_priv) != 2) {
+ u32 pipeconf = intel_uncore_read(uncore, pipeconf_reg);
+ intel_uncore_write(uncore,
+ pipeconf_reg,
+ pipeconf | PIPECONF_FORCE_BORDER);
+ intel_uncore_posting_read(uncore, pipeconf_reg);
+ /* Wait for next Vblank to substitue
+ * border color for Color info */
+ intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
+ st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE);
+ status = ((st00 & (1 << 4)) != 0) ?
+ connector_status_connected :
+ connector_status_disconnected;
+
+ intel_uncore_write(uncore, pipeconf_reg, pipeconf);
+ } else {
+ bool restore_vblank = false;
+ int count, detect;
+
+ /*
+ * If there isn't any border, add some.
+ * Yes, this will flicker
+ */
+ if (vblank_start <= vactive && vblank_end >= vtotal) {
+ u32 vsync = intel_de_read(dev_priv, vsync_reg);
+ u32 vsync_start = (vsync & 0xffff) + 1;
+
+ vblank_start = vsync_start;
+ intel_uncore_write(uncore,
+ vblank_reg,
+ (vblank_start - 1) |
+ ((vblank_end - 1) << 16));
+ restore_vblank = true;
+ }
+ /* sample in the vertical border, selecting the larger one */
+ if (vblank_start - vactive >= vtotal - vblank_end)
+ vsample = (vblank_start + vactive) >> 1;
+ else
+ vsample = (vtotal + vblank_end) >> 1;
+
+ /*
+ * Wait for the border to be displayed
+ */
+ while (intel_uncore_read(uncore, pipe_dsl_reg) >= vactive)
+ ;
+ while ((dsl = intel_uncore_read(uncore, pipe_dsl_reg)) <=
+ vsample)
+ ;
+ /*
+ * Watch ST00 for an entire scanline
+ */
+ detect = 0;
+ count = 0;
+ do {
+ count++;
+ /* Read the ST00 VGA status register */
+ st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE);
+ if (st00 & (1 << 4))
+ detect++;
+ } while ((intel_uncore_read(uncore, pipe_dsl_reg) == dsl));
+
+ /* restore vblank if necessary */
+ if (restore_vblank)
+ intel_uncore_write(uncore, vblank_reg, vblank);
+ /*
+ * If more than 3/4 of the scanline detected a monitor,
+ * then it is assumed to be present. This works even on i830,
+ * where there isn't any way to force the border color across
+ * the screen
+ */
+ status = detect * 4 > count * 3 ?
+ connector_status_connected :
+ connector_status_disconnected;
+ }
+
+ /* Restore previous settings */
+ intel_uncore_write(uncore, bclrpat_reg, save_bclrpat);
+
+ return status;
+}
+
+static int intel_spurious_crt_detect_dmi_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_DRIVER("Skipping CRT detection for %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_spurious_crt_detect[] = {
+ {
+ .callback = intel_spurious_crt_detect_dmi_callback,
+ .ident = "ACER ZGB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ },
+ {
+ .callback = intel_spurious_crt_detect_dmi_callback,
+ .ident = "Intel DZ77BH-55K",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "DZ77BH-55K"),
+ },
+ },
+ { }
+};
+
+static int
+intel_crt_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
+ struct intel_encoder *intel_encoder = &crt->base;
+ intel_wakeref_t wakeref;
+ int status, ret;
+ struct intel_load_detect_pipe tmp;
+
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, connector->name,
+ force);
+
+ if (!INTEL_DISPLAY_ENABLED(dev_priv))
+ return connector_status_disconnected;
+
+ if (dev_priv->params.load_detect_test) {
+ wakeref = intel_display_power_get(dev_priv,
+ intel_encoder->power_domain);
+ goto load_detect;
+ }
+
+ /* Skip machines without VGA that falsely report hotplug events */
+ if (dmi_check_system(intel_spurious_crt_detect))
+ return connector_status_disconnected;
+
+ wakeref = intel_display_power_get(dev_priv,
+ intel_encoder->power_domain);
+
+ if (I915_HAS_HOTPLUG(dev_priv)) {
+ /* We can not rely on the HPD pin always being correctly wired
+ * up, for example many KVM do not pass it through, and so
+ * only trust an assertion that the monitor is connected.
+ */
+ if (intel_crt_detect_hotplug(connector)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT detected via hotplug\n");
+ status = connector_status_connected;
+ goto out;
+ } else
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT not detected via hotplug\n");
+ }
+
+ if (intel_crt_detect_ddc(connector)) {
+ status = connector_status_connected;
+ goto out;
+ }
+
+ /* Load detection is broken on HPD capable machines. Whoever wants a
+ * broken monitor (without edid) to work behind a broken kvm (that fails
+ * to have the right resistors for HP detection) needs to fix this up.
+ * For now just bail out. */
+ if (I915_HAS_HOTPLUG(dev_priv)) {
+ status = connector_status_disconnected;
+ goto out;
+ }
+
+load_detect:
+ if (!force) {
+ status = connector->status;
+ goto out;
+ }
+
+ /* for pre-945g platforms use load detect */
+ ret = intel_get_load_detect_pipe(connector, &tmp, ctx);
+ if (ret > 0) {
+ if (intel_crt_detect_ddc(connector))
+ status = connector_status_connected;
+ else if (DISPLAY_VER(dev_priv) < 4)
+ status = intel_crt_load_detect(crt,
+ to_intel_crtc(connector->state->crtc)->pipe);
+ else if (dev_priv->params.load_detect_test)
+ status = connector_status_disconnected;
+ else
+ status = connector_status_unknown;
+ intel_release_load_detect_pipe(connector, &tmp, ctx);
+ } else if (ret == 0) {
+ status = connector_status_unknown;
+ } else {
+ status = ret;
+ }
+
+out:
+ intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+
+ /*
+ * Make sure the refs for power wells enabled during detect are
+ * dropped to avoid a new detect cycle triggered by HPD polling.
+ */
+ intel_display_power_flush_work(dev_priv);
+
+ return status;
+}
+
+static int intel_crt_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
+ struct intel_encoder *intel_encoder = &crt->base;
+ intel_wakeref_t wakeref;
+ struct i2c_adapter *i2c;
+ int ret;
+
+ wakeref = intel_display_power_get(dev_priv,
+ intel_encoder->power_domain);
+
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->display.vbt.crt_ddc_pin);
+ ret = intel_crt_ddc_get_modes(connector, i2c);
+ if (ret || !IS_G4X(dev_priv))
+ goto out;
+
+ /* Try to probe digital port for output in DVI-I -> VGA mode. */
+ i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPB);
+ ret = intel_crt_ddc_get_modes(connector, i2c);
+
+out:
+ intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+
+ return ret;
+}
+
+void intel_crt_reset(struct drm_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
+
+ if (DISPLAY_VER(dev_priv) >= 5) {
+ u32 adpa;
+
+ adpa = intel_de_read(dev_priv, crt->adpa_reg);
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa |= ADPA_HOTPLUG_BITS;
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
+ intel_de_posting_read(dev_priv, crt->adpa_reg);
+
+ drm_dbg_kms(&dev_priv->drm, "crt adpa set to 0x%x\n", adpa);
+ crt->force_hotplug_required = true;
+ }
+
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+
+static const struct drm_connector_funcs intel_crt_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
+ .destroy = intel_connector_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
+ .detect_ctx = intel_crt_detect,
+ .mode_valid = intel_crt_mode_valid,
+ .get_modes = intel_crt_get_modes,
+};
+
+static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+ .reset = intel_crt_reset,
+ .destroy = intel_encoder_destroy,
+};
+
+void intel_crt_init(struct drm_i915_private *dev_priv)
+{
+ struct drm_connector *connector;
+ struct intel_crt *crt;
+ struct intel_connector *intel_connector;
+ i915_reg_t adpa_reg;
+ u32 adpa;
+
+ if (HAS_PCH_SPLIT(dev_priv))
+ adpa_reg = PCH_ADPA;
+ else if (IS_VALLEYVIEW(dev_priv))
+ adpa_reg = VLV_ADPA;
+ else
+ adpa_reg = ADPA;
+
+ adpa = intel_de_read(dev_priv, adpa_reg);
+ if ((adpa & ADPA_DAC_ENABLE) == 0) {
+ /*
+ * On some machines (some IVB at least) CRT can be
+ * fused off, but there's no known fuse bit to
+ * indicate that. On these machine the ADPA register
+ * works normally, except the DAC enable bit won't
+ * take. So the only way to tell is attempt to enable
+ * it and see what happens.
+ */
+ intel_de_write(dev_priv, adpa_reg,
+ adpa | ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ if ((intel_de_read(dev_priv, adpa_reg) & ADPA_DAC_ENABLE) == 0)
+ return;
+ intel_de_write(dev_priv, adpa_reg, adpa);
+ }
+
+ crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
+ if (!crt)
+ return;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
+ kfree(crt);
+ return;
+ }
+
+ connector = &intel_connector->base;
+ crt->connector = intel_connector;
+ drm_connector_init(&dev_priv->drm, &intel_connector->base,
+ &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_encoder_init(&dev_priv->drm, &crt->base.base, &intel_crt_enc_funcs,
+ DRM_MODE_ENCODER_DAC, "CRT");
+
+ intel_connector_attach_encoder(intel_connector, &crt->base);
+
+ crt->base.type = INTEL_OUTPUT_ANALOG;
+ crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
+ if (IS_I830(dev_priv))
+ crt->base.pipe_mask = BIT(PIPE_A);
+ else
+ crt->base.pipe_mask = ~0;
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ connector->interlace_allowed = 0;
+ else
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+
+ crt->adpa_reg = adpa_reg;
+
+ crt->base.power_domain = POWER_DOMAIN_PORT_CRT;
+
+ if (I915_HAS_HOTPLUG(dev_priv) &&
+ !dmi_check_system(intel_spurious_crt_detect)) {
+ crt->base.hpd_pin = HPD_CRT;
+ crt->base.hotplug = intel_encoder_hotplug;
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
+ } else {
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ }
+
+ if (HAS_DDI(dev_priv)) {
+ crt->base.port = PORT_E;
+ crt->base.get_config = hsw_crt_get_config;
+ crt->base.get_hw_state = intel_ddi_get_hw_state;
+ crt->base.compute_config = hsw_crt_compute_config;
+ crt->base.pre_pll_enable = hsw_pre_pll_enable_crt;
+ crt->base.pre_enable = hsw_pre_enable_crt;
+ crt->base.enable = hsw_enable_crt;
+ crt->base.disable = hsw_disable_crt;
+ crt->base.post_disable = hsw_post_disable_crt;
+ crt->base.enable_clock = hsw_ddi_enable_clock;
+ crt->base.disable_clock = hsw_ddi_disable_clock;
+ crt->base.is_clock_enabled = hsw_ddi_is_clock_enabled;
+
+ intel_ddi_buf_trans_init(&crt->base);
+ } else {
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ crt->base.compute_config = pch_crt_compute_config;
+ crt->base.disable = pch_disable_crt;
+ crt->base.post_disable = pch_post_disable_crt;
+ } else {
+ crt->base.compute_config = intel_crt_compute_config;
+ crt->base.disable = intel_disable_crt;
+ }
+ crt->base.port = PORT_NONE;
+ crt->base.get_config = intel_crt_get_config;
+ crt->base.get_hw_state = intel_crt_get_hw_state;
+ crt->base.enable = intel_enable_crt;
+ }
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+
+ /*
+ * TODO: find a proper way to discover whether we need to set the the
+ * polarity and link reversal bits or not, instead of relying on the
+ * BIOS.
+ */
+ if (HAS_PCH_LPT(dev_priv)) {
+ u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
+ FDI_RX_LINK_REVERSAL_OVERRIDE;
+
+ dev_priv->display.fdi.rx_config = intel_de_read(dev_priv,
+ FDI_RX_CTL(PIPE_A)) & fdi_config;
+ }
+
+ intel_crt_reset(&crt->base.base);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_crt.h b/drivers/gpu/drm/i915/display/intel_crt.h
new file mode 100644
index 000000000..c6071efd9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crt.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CRT_H__
+#define __INTEL_CRT_H__
+
+#include "i915_reg_defs.h"
+
+enum pipe;
+struct drm_encoder;
+struct drm_i915_private;
+
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t adpa_reg, enum pipe *pipe);
+void intel_crt_init(struct drm_i915_private *dev_priv);
+void intel_crt_reset(struct drm_encoder *encoder);
+
+#endif /* __INTEL_CRT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
new file mode 100644
index 000000000..6792a9056
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -0,0 +1,705 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+#include <linux/pm_qos.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_vblank_work.h>
+
+#include "i915_irq.h"
+#include "i915_vgpu.h"
+#include "i9xx_plane.h"
+#include "icl_dsi.h"
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_color.h"
+#include "intel_crtc.h"
+#include "intel_cursor.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_trace.h"
+#include "intel_display_types.h"
+#include "intel_drrs.h"
+#include "intel_dsi.h"
+#include "intel_pipe_crc.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+#include "intel_vrr.h"
+#include "skl_universal_plane.h"
+
+static void assert_vblank_disabled(struct drm_crtc *crtc)
+{
+ if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
+ drm_crtc_vblank_put(crtc);
+}
+
+struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915)
+{
+ return to_intel_crtc(drm_crtc_from_index(&i915->drm, 0));
+}
+
+struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915,
+ enum pipe pipe)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ if (crtc->pipe == pipe)
+ return crtc;
+ }
+
+ return NULL;
+}
+
+void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc)
+{
+ drm_crtc_wait_one_vblank(&crtc->base);
+}
+
+void intel_wait_for_vblank_if_active(struct drm_i915_private *i915,
+ enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
+
+ if (crtc->active)
+ intel_crtc_wait_for_next_vblank(crtc);
+}
+
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
+
+ if (!crtc->active)
+ return 0;
+
+ if (!vblank->max_vblank_count)
+ return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
+
+ return crtc->base.funcs->get_vblank_counter(&crtc->base);
+}
+
+u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ /*
+ * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
+ * have updated at the beginning of TE, if we want to use
+ * the hw counter, then we would find it updated in only
+ * the next TE, hence switching to sw counter.
+ */
+ if (crtc_state->mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 |
+ I915_MODE_FLAG_DSI_USE_TE1))
+ return 0;
+
+ /*
+ * On i965gm the hardware frame counter reads
+ * zero when the TV encoder is enabled :(
+ */
+ if (IS_I965GM(dev_priv) &&
+ (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
+ return 0;
+
+ if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
+ return 0xffffffff; /* full 32 bit counter */
+ else if (DISPLAY_VER(dev_priv) >= 3)
+ return 0xffffff; /* only 24 bits of frame count */
+ else
+ return 0; /* Gen2 doesn't have a hardware frame counter */
+}
+
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ assert_vblank_disabled(&crtc->base);
+ drm_crtc_set_max_vblank_count(&crtc->base,
+ intel_crtc_max_vblank_count(crtc_state));
+ drm_crtc_vblank_on(&crtc->base);
+
+ /*
+ * Should really happen exactly when we enable the pipe
+ * but we want the frame counters in the trace, and that
+ * requires vblank support on some platforms/outputs.
+ */
+ trace_intel_pipe_enable(crtc);
+}
+
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ /*
+ * Should really happen exactly when we disable the pipe
+ * but we want the frame counters in the trace, and that
+ * requires vblank support on some platforms/outputs.
+ */
+ trace_intel_pipe_disable(crtc);
+
+ drm_crtc_vblank_off(&crtc->base);
+ assert_vblank_disabled(&crtc->base);
+}
+
+struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
+
+ if (crtc_state)
+ intel_crtc_state_reset(crtc_state, crtc);
+
+ return crtc_state;
+}
+
+void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+ struct intel_crtc *crtc)
+{
+ memset(crtc_state, 0, sizeof(*crtc_state));
+
+ __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
+
+ crtc_state->cpu_transcoder = INVALID_TRANSCODER;
+ crtc_state->master_transcoder = INVALID_TRANSCODER;
+ crtc_state->hsw_workaround_pipe = INVALID_PIPE;
+ crtc_state->scaler_state.scaler_id = -1;
+ crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
+}
+
+static struct intel_crtc *intel_crtc_alloc(void)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
+ if (!crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc_state = intel_crtc_state_alloc(crtc);
+ if (!crtc_state) {
+ kfree(crtc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ crtc->base.state = &crtc_state->uapi;
+ crtc->config = crtc_state;
+
+ return crtc;
+}
+
+static void intel_crtc_free(struct intel_crtc *crtc)
+{
+ intel_crtc_destroy_state(&crtc->base, crtc->base.state);
+ kfree(crtc);
+}
+
+static void intel_crtc_destroy(struct drm_crtc *_crtc)
+{
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+
+ cpu_latency_qos_remove_request(&crtc->vblank_pm_qos);
+
+ drm_crtc_cleanup(&crtc->base);
+ kfree(crtc);
+}
+
+static int intel_crtc_late_register(struct drm_crtc *crtc)
+{
+ intel_crtc_debugfs_add(crtc);
+ return 0;
+}
+
+#define INTEL_CRTC_FUNCS \
+ .set_config = drm_atomic_helper_set_config, \
+ .destroy = intel_crtc_destroy, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = intel_crtc_duplicate_state, \
+ .atomic_destroy_state = intel_crtc_destroy_state, \
+ .set_crc_source = intel_crtc_set_crc_source, \
+ .verify_crc_source = intel_crtc_verify_crc_source, \
+ .get_crc_sources = intel_crtc_get_crc_sources, \
+ .late_register = intel_crtc_late_register
+
+static const struct drm_crtc_funcs bdw_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = bdw_enable_vblank,
+ .disable_vblank = bdw_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs ilk_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = ilk_enable_vblank,
+ .disable_vblank = ilk_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs g4x_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i965_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i915gm_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i915gm_enable_vblank,
+ .disable_vblank = i915gm_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i915_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs i8xx_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ /* no hw vblank counter */
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
+};
+
+int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_plane *primary, *cursor;
+ const struct drm_crtc_funcs *funcs;
+ struct intel_crtc *crtc;
+ int sprite, ret;
+
+ crtc = intel_crtc_alloc();
+ if (IS_ERR(crtc))
+ return PTR_ERR(crtc);
+
+ crtc->pipe = pipe;
+ crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ primary = skl_universal_plane_create(dev_priv, pipe,
+ PLANE_PRIMARY);
+ else
+ primary = intel_primary_plane_create(dev_priv, pipe);
+ if (IS_ERR(primary)) {
+ ret = PTR_ERR(primary);
+ goto fail;
+ }
+ crtc->plane_ids_mask |= BIT(primary->id);
+
+ for_each_sprite(dev_priv, pipe, sprite) {
+ struct intel_plane *plane;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ plane = skl_universal_plane_create(dev_priv, pipe,
+ PLANE_SPRITE0 + sprite);
+ else
+ plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ crtc->plane_ids_mask |= BIT(plane->id);
+ }
+
+ cursor = intel_cursor_plane_create(dev_priv, pipe);
+ if (IS_ERR(cursor)) {
+ ret = PTR_ERR(cursor);
+ goto fail;
+ }
+ crtc->plane_ids_mask |= BIT(cursor->id);
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
+ funcs = &g4x_crtc_funcs;
+ else if (DISPLAY_VER(dev_priv) == 4)
+ funcs = &i965_crtc_funcs;
+ else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
+ funcs = &i915gm_crtc_funcs;
+ else if (DISPLAY_VER(dev_priv) == 3)
+ funcs = &i915_crtc_funcs;
+ else
+ funcs = &i8xx_crtc_funcs;
+ } else {
+ if (DISPLAY_VER(dev_priv) >= 8)
+ funcs = &bdw_crtc_funcs;
+ else
+ funcs = &ilk_crtc_funcs;
+ }
+
+ ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
+ &primary->base, &cursor->base,
+ funcs, "pipe %c", pipe_name(pipe));
+ if (ret)
+ goto fail;
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ drm_crtc_create_scaling_filter_property(&crtc->base,
+ BIT(DRM_SCALING_FILTER_DEFAULT) |
+ BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
+
+ intel_color_init(crtc);
+
+ intel_crtc_drrs_init(crtc);
+ intel_crtc_crc_init(crtc);
+
+ cpu_latency_qos_add_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
+
+ return 0;
+
+fail:
+ intel_crtc_free(crtc);
+
+ return ret;
+}
+
+static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->hw.active &&
+ !intel_crtc_needs_modeset(crtc_state) &&
+ !crtc_state->preload_luts &&
+ (crtc_state->uapi.color_mgmt_changed ||
+ crtc_state->update_pipe);
+}
+
+static void intel_crtc_vblank_work(struct kthread_work *base)
+{
+ struct drm_vblank_work *work = to_drm_vblank_work(base);
+ struct intel_crtc_state *crtc_state =
+ container_of(work, typeof(*crtc_state), vblank_work);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ trace_intel_crtc_vblank_work_start(crtc);
+
+ intel_color_load_luts(crtc_state);
+
+ if (crtc_state->uapi.event) {
+ spin_lock_irq(&crtc->base.dev->event_lock);
+ drm_crtc_send_vblank_event(&crtc->base, crtc_state->uapi.event);
+ crtc_state->uapi.event = NULL;
+ spin_unlock_irq(&crtc->base.dev->event_lock);
+ }
+
+ trace_intel_crtc_vblank_work_end(crtc);
+}
+
+static void intel_crtc_vblank_work_init(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ drm_vblank_work_init(&crtc_state->vblank_work, &crtc->base,
+ intel_crtc_vblank_work);
+ /*
+ * Interrupt latency is critical for getting the vblank
+ * work executed as early as possible during the vblank.
+ */
+ cpu_latency_qos_update_request(&crtc->vblank_pm_qos, 0);
+}
+
+void intel_wait_for_vblank_workers(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!intel_crtc_needs_vblank_work(crtc_state))
+ continue;
+
+ drm_vblank_work_flush(&crtc_state->vblank_work);
+ cpu_latency_qos_update_request(&crtc->vblank_pm_qos,
+ PM_QOS_DEFAULT_VALUE);
+ }
+}
+
+int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+ int usecs)
+{
+ /* paranoia */
+ if (!adjusted_mode->crtc_htotal)
+ return 1;
+
+ return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock,
+ 1000 * adjusted_mode->crtc_htotal);
+}
+
+static int intel_mode_vblank_start(const struct drm_display_mode *mode)
+{
+ int vblank_start = mode->crtc_vblank_start;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vblank_start = DIV_ROUND_UP(vblank_start, 2);
+
+ return vblank_start;
+}
+
+/**
+ * intel_pipe_update_start() - start update of a set of display registers
+ * @new_crtc_state: the new crtc state
+ *
+ * Mark the start of an update to pipe registers that should be updated
+ * atomically regarding vblank. If the next vblank will happens within
+ * the next 100 us, this function waits until the vblank passes.
+ *
+ * After a successful call to this function, interrupts will be disabled
+ * until a subsequent call to intel_pipe_update_end(). That is done to
+ * avoid random delays.
+ */
+void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
+ long timeout = msecs_to_jiffies_timeout(1);
+ int scanline, min, max, vblank_start;
+ wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+ bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
+ DEFINE_WAIT(wait);
+
+ intel_psr_lock(new_crtc_state);
+
+ if (new_crtc_state->do_async_flip)
+ return;
+
+ if (intel_crtc_needs_vblank_work(new_crtc_state))
+ intel_crtc_vblank_work_init(new_crtc_state);
+
+ if (new_crtc_state->vrr.enable) {
+ if (intel_vrr_is_push_sent(new_crtc_state))
+ vblank_start = intel_vrr_vmin_vblank_start(new_crtc_state);
+ else
+ vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
+ } else {
+ vblank_start = intel_mode_vblank_start(adjusted_mode);
+ }
+
+ /* FIXME needs to be calibrated sensibly */
+ min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
+ VBLANK_EVASION_TIME_US);
+ max = vblank_start - 1;
+
+ if (min <= 0 || max <= 0)
+ goto irq_disable;
+
+ if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
+ goto irq_disable;
+
+ /*
+ * Wait for psr to idle out after enabling the VBL interrupts
+ * VBL interrupts will start the PSR exit and prevent a PSR
+ * re-entry as well.
+ */
+ intel_psr_wait_for_idle_locked(new_crtc_state);
+
+ local_irq_disable();
+
+ crtc->debug.min_vbl = min;
+ crtc->debug.max_vbl = max;
+ trace_intel_pipe_update_start(crtc);
+
+ for (;;) {
+ /*
+ * prepare_to_wait() has a memory barrier, which guarantees
+ * other CPUs can see the task state update by the time we
+ * read the scanline.
+ */
+ prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
+
+ scanline = intel_get_crtc_scanline(crtc);
+ if (scanline < min || scanline > max)
+ break;
+
+ if (!timeout) {
+ drm_err(&dev_priv->drm,
+ "Potential atomic update failure on pipe %c\n",
+ pipe_name(crtc->pipe));
+ break;
+ }
+
+ local_irq_enable();
+
+ timeout = schedule_timeout(timeout);
+
+ local_irq_disable();
+ }
+
+ finish_wait(wq, &wait);
+
+ drm_crtc_vblank_put(&crtc->base);
+
+ /*
+ * On VLV/CHV DSI the scanline counter would appear to
+ * increment approx. 1/3 of a scanline before start of vblank.
+ * The registers still get latched at start of vblank however.
+ * This means we must not write any registers on the first
+ * line of vblank (since not the whole line is actually in
+ * vblank). And unfortunately we can't use the interrupt to
+ * wait here since it will fire too soon. We could use the
+ * frame start interrupt instead since it will fire after the
+ * critical scanline, but that would require more changes
+ * in the interrupt code. So for now we'll just do the nasty
+ * thing and poll for the bad scanline to pass us by.
+ *
+ * FIXME figure out if BXT+ DSI suffers from this as well
+ */
+ while (need_vlv_dsi_wa && scanline == vblank_start)
+ scanline = intel_get_crtc_scanline(crtc);
+
+ crtc->debug.scanline_start = scanline;
+ crtc->debug.start_vbl_time = ktime_get();
+ crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
+
+ trace_intel_pipe_update_vblank_evaded(crtc);
+ return;
+
+irq_disable:
+ local_irq_disable();
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end)
+{
+ u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time));
+ unsigned int h;
+
+ h = ilog2(delta >> 9);
+ if (h >= ARRAY_SIZE(crtc->debug.vbl.times))
+ h = ARRAY_SIZE(crtc->debug.vbl.times) - 1;
+ crtc->debug.vbl.times[h]++;
+
+ crtc->debug.vbl.sum += delta;
+ if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min)
+ crtc->debug.vbl.min = delta;
+ if (delta > crtc->debug.vbl.max)
+ crtc->debug.vbl.max = delta;
+
+ if (delta > 1000 * VBLANK_EVASION_TIME_US) {
+ drm_dbg_kms(crtc->base.dev,
+ "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
+ pipe_name(crtc->pipe),
+ div_u64(delta, 1000),
+ VBLANK_EVASION_TIME_US);
+ crtc->debug.vbl.over++;
+ }
+}
+#else
+static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
+#endif
+
+/**
+ * intel_pipe_update_end() - end update of a set of display registers
+ * @new_crtc_state: the new crtc state
+ *
+ * Mark the end of an update started with intel_pipe_update_start(). This
+ * re-enables interrupts and verifies the update was actually completed
+ * before a vblank.
+ */
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ int scanline_end = intel_get_crtc_scanline(crtc);
+ u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
+ ktime_t end_vbl_time = ktime_get();
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ intel_psr_unlock(new_crtc_state);
+
+ if (new_crtc_state->do_async_flip)
+ return;
+
+ trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end);
+
+ /*
+ * Incase of mipi dsi command mode, we need to set frame update
+ * request for every commit.
+ */
+ if (DISPLAY_VER(dev_priv) >= 11 &&
+ intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
+ icl_dsi_frame_update(new_crtc_state);
+
+ /* We're still in the vblank-evade critical section, this can't race.
+ * Would be slightly nice to just grab the vblank count and arm the
+ * event outside of the critical section - the spinlock might spin for a
+ * while ... */
+ if (intel_crtc_needs_vblank_work(new_crtc_state)) {
+ drm_vblank_work_schedule(&new_crtc_state->vblank_work,
+ drm_crtc_accurate_vblank_count(&crtc->base) + 1,
+ false);
+ } else if (new_crtc_state->uapi.event) {
+ drm_WARN_ON(&dev_priv->drm,
+ drm_crtc_vblank_get(&crtc->base) != 0);
+
+ spin_lock(&crtc->base.dev->event_lock);
+ drm_crtc_arm_vblank_event(&crtc->base,
+ new_crtc_state->uapi.event);
+ spin_unlock(&crtc->base.dev->event_lock);
+
+ new_crtc_state->uapi.event = NULL;
+ }
+
+ /*
+ * Send VRR Push to terminate Vblank. If we are already in vblank
+ * this has to be done _after_ sampling the frame counter, as
+ * otherwise the push would immediately terminate the vblank and
+ * the sampled frame counter would correspond to the next frame
+ * instead of the current frame.
+ *
+ * There is a tiny race here (iff vblank evasion failed us) where
+ * we might sample the frame counter just before vmax vblank start
+ * but the push would be sent just after it. That would cause the
+ * push to affect the next frame instead of the current frame,
+ * which would cause the next frame to terminate already at vmin
+ * vblank start instead of vmax vblank start.
+ */
+ intel_vrr_send_push(new_crtc_state);
+
+ local_irq_enable();
+
+ if (intel_vgpu_active(dev_priv))
+ return;
+
+ if (crtc->debug.start_vbl_count &&
+ crtc->debug.start_vbl_count != end_vbl_count) {
+ drm_err(&dev_priv->drm,
+ "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
+ pipe_name(pipe), crtc->debug.start_vbl_count,
+ end_vbl_count,
+ ktime_us_delta(end_vbl_time,
+ crtc->debug.start_vbl_time),
+ crtc->debug.min_vbl, crtc->debug.max_vbl,
+ crtc->debug.scanline_start, scanline_end);
+ }
+
+ dbg_vblank_evade(crtc, end_vbl_time);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
new file mode 100644
index 000000000..73077137f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_CRTC_H_
+#define _INTEL_CRTC_H_
+
+#include <linux/types.h>
+
+enum i9xx_plane_id;
+enum pipe;
+struct drm_display_mode;
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+
+int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+ int usecs);
+u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state);
+int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe);
+struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
+void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+ struct intel_crtc *crtc);
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state);
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
+void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state);
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
+void intel_wait_for_vblank_workers(struct intel_atomic_state *state);
+struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915);
+struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915,
+ enum pipe pipe);
+void intel_wait_for_vblank_if_active(struct drm_i915_private *i915,
+ enum pipe pipe);
+void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
new file mode 100644
index 000000000..e9212f69c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_crtc_state_dump.h"
+#include "intel_display_types.h"
+#include "intel_hdmi.h"
+#include "intel_vrr.h"
+
+static void intel_dump_crtc_timings(struct drm_i915_private *i915,
+ const struct drm_display_mode *mode)
+{
+ drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
+ "type: 0x%x flags: 0x%x\n",
+ mode->crtc_clock,
+ mode->crtc_hdisplay, mode->crtc_hsync_start,
+ mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vsync_start,
+ mode->crtc_vsync_end, mode->crtc_vtotal,
+ mode->type, mode->flags);
+}
+
+static void
+intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
+ const char *id, unsigned int lane_count,
+ const struct intel_link_m_n *m_n)
+{
+ struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
+
+ drm_dbg_kms(&i915->drm,
+ "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ id, lane_count,
+ m_n->data_m, m_n->data_n,
+ m_n->link_m, m_n->link_n, m_n->tu);
+}
+
+static void
+intel_dump_infoframe(struct drm_i915_private *i915,
+ const union hdmi_infoframe *frame)
+{
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame);
+}
+
+static void
+intel_dump_dp_vsc_sdp(struct drm_i915_private *i915,
+ const struct drm_dp_vsc_sdp *vsc)
+{
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ drm_dp_vsc_sdp_log(KERN_DEBUG, i915->drm.dev, vsc);
+}
+
+#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
+
+static const char * const output_type_str[] = {
+ OUTPUT_TYPE(UNUSED),
+ OUTPUT_TYPE(ANALOG),
+ OUTPUT_TYPE(DVO),
+ OUTPUT_TYPE(SDVO),
+ OUTPUT_TYPE(LVDS),
+ OUTPUT_TYPE(TVOUT),
+ OUTPUT_TYPE(HDMI),
+ OUTPUT_TYPE(DP),
+ OUTPUT_TYPE(EDP),
+ OUTPUT_TYPE(DSI),
+ OUTPUT_TYPE(DDI),
+ OUTPUT_TYPE(DP_MST),
+};
+
+#undef OUTPUT_TYPE
+
+static void snprintf_output_types(char *buf, size_t len,
+ unsigned int output_types)
+{
+ char *str = buf;
+ int i;
+
+ str[0] = '\0';
+
+ for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
+ int r;
+
+ if ((output_types & BIT(i)) == 0)
+ continue;
+
+ r = snprintf(str, len, "%s%s",
+ str != buf ? "," : "", output_type_str[i]);
+ if (r >= len)
+ break;
+ str += r;
+ len -= r;
+
+ output_types &= ~BIT(i);
+ }
+
+ WARN_ON_ONCE(output_types != 0);
+}
+
+static const char * const output_format_str[] = {
+ [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
+ [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
+ [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
+};
+
+static const char *output_formats(enum intel_output_format format)
+{
+ if (format >= ARRAY_SIZE(output_format_str))
+ return "invalid";
+ return output_format_str[format];
+}
+
+static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (!fb) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ str_yes_no(plane_state->uapi.visible));
+ return;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ fb->base.id, fb->width, fb->height, &fb->format->format,
+ fb->modifier, str_yes_no(plane_state->uapi.visible));
+ drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n",
+ plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter);
+ if (plane_state->uapi.visible)
+ drm_dbg_kms(&i915->drm,
+ "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst));
+}
+
+void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
+ struct intel_atomic_state *state,
+ const char *context)
+{
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ char buf[64];
+ int i;
+
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] enable: %s [%s]\n",
+ crtc->base.base.id, crtc->base.name,
+ str_yes_no(pipe_config->hw.enable), context);
+
+ if (!pipe_config->hw.enable)
+ goto dump_planes;
+
+ snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
+ drm_dbg_kms(&i915->drm,
+ "active: %s, output_types: %s (0x%x), output format: %s\n",
+ str_yes_no(pipe_config->hw.active),
+ buf, pipe_config->output_types,
+ output_formats(pipe_config->output_format));
+
+ drm_dbg_kms(&i915->drm,
+ "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+ transcoder_name(pipe_config->cpu_transcoder),
+ pipe_config->pipe_bpp, pipe_config->dither);
+
+ drm_dbg_kms(&i915->drm, "MST master transcoder: %s\n",
+ transcoder_name(pipe_config->mst_master_transcoder));
+
+ drm_dbg_kms(&i915->drm,
+ "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
+ transcoder_name(pipe_config->master_transcoder),
+ pipe_config->sync_mode_slaves_mask);
+
+ drm_dbg_kms(&i915->drm, "bigjoiner: %s, pipes: 0x%x\n",
+ intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
+ intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
+ pipe_config->bigjoiner_pipes);
+
+ drm_dbg_kms(&i915->drm, "splitter: %s, link count %d, overlap %d\n",
+ str_enabled_disabled(pipe_config->splitter.enable),
+ pipe_config->splitter.link_count,
+ pipe_config->splitter.pixel_overlap);
+
+ if (pipe_config->has_pch_encoder)
+ intel_dump_m_n_config(pipe_config, "fdi",
+ pipe_config->fdi_lanes,
+ &pipe_config->fdi_m_n);
+
+ if (intel_crtc_has_dp_encoder(pipe_config)) {
+ intel_dump_m_n_config(pipe_config, "dp m_n",
+ pipe_config->lane_count,
+ &pipe_config->dp_m_n);
+ intel_dump_m_n_config(pipe_config, "dp m2_n2",
+ pipe_config->lane_count,
+ &pipe_config->dp_m2_n2);
+ }
+
+ drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n",
+ pipe_config->framestart_delay, pipe_config->msa_timing_delay);
+
+ drm_dbg_kms(&i915->drm,
+ "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
+ pipe_config->has_audio, pipe_config->has_infoframe,
+ pipe_config->infoframes.enable);
+
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
+ drm_dbg_kms(&i915->drm, "GCP: 0x%x\n",
+ pipe_config->infoframes.gcp);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
+ intel_dump_infoframe(i915, &pipe_config->infoframes.avi);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
+ intel_dump_infoframe(i915, &pipe_config->infoframes.spd);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
+ intel_dump_infoframe(i915, &pipe_config->infoframes.hdmi);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
+ intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
+ intel_dump_infoframe(i915, &pipe_config->infoframes.drm);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(DP_SDP_VSC))
+ intel_dump_dp_vsc_sdp(i915, &pipe_config->infoframes.vsc);
+
+ drm_dbg_kms(&i915->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
+ str_yes_no(pipe_config->vrr.enable),
+ pipe_config->vrr.vmin, pipe_config->vrr.vmax,
+ pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
+ pipe_config->vrr.flipline,
+ intel_vrr_vmin_vblank_start(pipe_config),
+ intel_vrr_vmax_vblank_start(pipe_config));
+
+ drm_dbg_kms(&i915->drm, "requested mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.mode));
+ drm_dbg_kms(&i915->drm, "adjusted mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
+ intel_dump_crtc_timings(i915, &pipe_config->hw.adjusted_mode);
+ drm_dbg_kms(&i915->drm, "pipe mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
+ intel_dump_crtc_timings(i915, &pipe_config->hw.pipe_mode);
+ drm_dbg_kms(&i915->drm,
+ "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
+ pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
+ pipe_config->pixel_rate);
+
+ drm_dbg_kms(&i915->drm, "linetime: %d, ips linetime: %d\n",
+ pipe_config->linetime, pipe_config->ips_linetime);
+
+ if (DISPLAY_VER(i915) >= 9)
+ drm_dbg_kms(&i915->drm,
+ "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
+ crtc->num_scalers,
+ pipe_config->scaler_state.scaler_users,
+ pipe_config->scaler_state.scaler_id,
+ pipe_config->hw.scaling_filter);
+
+ if (HAS_GMCH(i915))
+ drm_dbg_kms(&i915->drm,
+ "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
+ else
+ drm_dbg_kms(&i915->drm,
+ "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
+ DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
+ str_enabled_disabled(pipe_config->pch_pfit.enabled),
+ str_yes_no(pipe_config->pch_pfit.force_thru));
+
+ drm_dbg_kms(&i915->drm, "ips: %i, double wide: %i, drrs: %i\n",
+ pipe_config->ips_enabled, pipe_config->double_wide,
+ pipe_config->has_drrs);
+
+ intel_dpll_dump_hw_state(i915, &pipe_config->dpll_hw_state);
+
+ if (IS_CHERRYVIEW(i915))
+ drm_dbg_kms(&i915->drm,
+ "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->cgm_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+ else
+ drm_dbg_kms(&i915->drm,
+ "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->csc_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+
+ drm_dbg_kms(&i915->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
+ pipe_config->hw.degamma_lut ?
+ drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
+ pipe_config->hw.gamma_lut ?
+ drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
+
+dump_planes:
+ if (!state)
+ return;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->pipe == crtc->pipe)
+ intel_dump_plane_state(plane_state);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.h b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.h
new file mode 100644
index 000000000..9399c35b7
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_CRTC_STATE_DUMP_H__
+#define __INTEL_CRTC_STATE_DUMP_H__
+
+struct intel_crtc_state;
+struct intel_atomic_state;
+
+void intel_crtc_state_dump(const struct intel_crtc_state *crtc_state,
+ struct intel_atomic_state *state,
+ const char *context);
+
+#endif /* __INTEL_CRTC_STATE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
new file mode 100644
index 000000000..87899e89b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -0,0 +1,828 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include <linux/kernel.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_cursor.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_display.h"
+#include "intel_fb.h"
+#include "intel_fb_pin.h"
+#include "intel_frontbuffer.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+#include "skl_watermark.h"
+
+/* Cursor formats */
+static const u32 intel_cursor_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ u32 base;
+
+ if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
+ base = sg_dma_address(obj->mm.pages->sgl);
+ else
+ base = intel_plane_ggtt_offset(plane_state);
+
+ return base + plane_state->view.color_plane[0].offset;
+}
+
+static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
+{
+ int x = plane_state->uapi.dst.x1;
+ int y = plane_state->uapi.dst.y1;
+ u32 pos = 0;
+
+ if (x < 0) {
+ pos |= CURSOR_POS_X_SIGN;
+ x = -x;
+ }
+ pos |= CURSOR_POS_X(x);
+
+ if (y < 0) {
+ pos |= CURSOR_POS_Y_SIGN;
+ y = -y;
+ }
+ pos |= CURSOR_POS_Y(y);
+
+ return pos;
+}
+
+static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ const struct drm_mode_config *config =
+ &plane_state->uapi.plane->dev->mode_config;
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
+
+ return width > 0 && width <= config->cursor_width &&
+ height > 0 && height <= config->cursor_height;
+}
+
+static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_x, src_y;
+ u32 offset;
+ int ret;
+
+ ret = intel_plane_compute_gtt(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+
+ intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+ offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
+ plane_state, 0);
+
+ if (src_x != 0 || src_y != 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Arbitrary cursor panning not supported\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->uapi.src,
+ src_x << 16, src_y << 16);
+
+ /* ILK+ do this automagically in hardware */
+ if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ offset += (src_h * src_w - 1) * fb->format->cpp[0];
+ }
+
+ plane_state->view.color_plane[0].offset = offset;
+ plane_state->view.color_plane[0].x = src_x;
+ plane_state->view.color_plane[0].y = src_y;
+
+ return 0;
+}
+
+static int intel_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_rect src = plane_state->uapi.src;
+ const struct drm_rect dst = plane_state->uapi.dst;
+ int ret;
+
+ if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
+ return -EINVAL;
+ }
+
+ ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true);
+ if (ret)
+ return ret;
+
+ /* Use the unclipped src/dst rectangles, which we program to hw */
+ plane_state->uapi.src = src;
+ plane_state->uapi.dst = dst;
+
+ /* final plane coordinates will be relative to the plane's pipe */
+ drm_rect_translate(&plane_state->uapi.dst,
+ -crtc_state->pipe_src.x1,
+ -crtc_state->pipe_src.y1);
+
+ ret = intel_cursor_check_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static unsigned int
+i845_cursor_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ return 2048;
+}
+
+static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ u32 cntl = 0;
+
+ if (crtc_state->gamma_enable)
+ cntl |= CURSOR_PIPE_GAMMA_ENABLE;
+
+ return cntl;
+}
+
+static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ return CURSOR_ENABLE |
+ CURSOR_FORMAT_ARGB |
+ CURSOR_STRIDE(plane_state->view.color_plane[0].mapping_stride);
+}
+
+static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ int width = drm_rect_width(&plane_state->uapi.dst);
+
+ /*
+ * 845g/865g are only limited by the width of their cursors,
+ * the height is arbitrary up to the precision of the register.
+ */
+ return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
+}
+
+static int i845_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ int ret;
+
+ ret = intel_check_cursor(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!fb)
+ return 0;
+
+ /* Check for which cursor types we support */
+ if (!i845_cursor_size_ok(plane_state)) {
+ drm_dbg_kms(&i915->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
+ return -EINVAL;
+ }
+
+ drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
+ plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]);
+
+ switch (fb->pitches[0]) {
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ break;
+ default:
+ drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
+ fb->pitches[0]);
+ return -EINVAL;
+ }
+
+ plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+/* TODO: split into noarm+arm pair */
+static void i845_cursor_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 cntl = 0, base = 0, pos = 0, size = 0;
+
+ if (plane_state && plane_state->uapi.visible) {
+ unsigned int width = drm_rect_width(&plane_state->uapi.dst);
+ unsigned int height = drm_rect_height(&plane_state->uapi.dst);
+
+ cntl = plane_state->ctl |
+ i845_cursor_ctl_crtc(crtc_state);
+
+ size = CURSOR_HEIGHT(height) | CURSOR_WIDTH(width);
+
+ base = intel_cursor_base(plane_state);
+ pos = intel_cursor_position(plane_state);
+ }
+
+ /* On these chipsets we can only modify the base/size/stride
+ * whilst the cursor is disabled.
+ */
+ if (plane->cursor.base != base ||
+ plane->cursor.size != size ||
+ plane->cursor.cntl != cntl) {
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
+ intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
+ intel_de_write_fw(dev_priv, CURSIZE(PIPE_A), size);
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
+
+ plane->cursor.base = base;
+ plane->cursor.size = size;
+ plane->cursor.cntl = cntl;
+ } else {
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+ }
+}
+
+static void i845_cursor_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ i845_cursor_update_arm(plane, crtc_state, NULL);
+}
+
+static bool i845_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(PIPE_A);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+
+ *pipe = PIPE_A;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static unsigned int
+i9xx_cursor_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ return plane->base.dev->mode_config.cursor_width * 4;
+}
+
+static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 cntl = 0;
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ return cntl;
+
+ if (crtc_state->gamma_enable)
+ cntl = MCURSOR_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ cntl |= MCURSOR_PIPE_CSC_ENABLE;
+
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ cntl |= MCURSOR_PIPE_SEL(crtc->pipe);
+
+ return cntl;
+}
+
+static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ u32 cntl = 0;
+
+ if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
+
+ switch (drm_rect_width(&plane_state->uapi.dst)) {
+ case 64:
+ cntl |= MCURSOR_MODE_64_ARGB_AX;
+ break;
+ case 128:
+ cntl |= MCURSOR_MODE_128_ARGB_AX;
+ break;
+ case 256:
+ cntl |= MCURSOR_MODE_256_ARGB_AX;
+ break;
+ default:
+ MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
+ return 0;
+ }
+
+ if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
+ cntl |= MCURSOR_ROTATE_180;
+
+ /* Wa_22012358565:adl-p */
+ if (DISPLAY_VER(dev_priv) == 13)
+ cntl |= MCURSOR_ARB_SLOTS(1);
+
+ return cntl;
+}
+
+static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
+
+ if (!intel_cursor_size_ok(plane_state))
+ return false;
+
+ /* Cursor width is limited to a few power-of-two sizes */
+ switch (width) {
+ case 256:
+ case 128:
+ case 64:
+ break;
+ default:
+ return false;
+ }
+
+ /*
+ * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
+ * height from 8 lines up to the cursor width, when the
+ * cursor is not rotated. Everything else requires square
+ * cursors.
+ */
+ if (HAS_CUR_FBC(dev_priv) &&
+ plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
+ if (height < 8 || height > width)
+ return false;
+ } else {
+ if (height != width)
+ return false;
+ }
+
+ return true;
+}
+
+static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum pipe pipe = plane->pipe;
+ int ret;
+
+ ret = intel_check_cursor(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!fb)
+ return 0;
+
+ /* Check for which cursor types we support */
+ if (!i9xx_cursor_size_ok(plane_state)) {
+ drm_dbg(&dev_priv->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
+ return -EINVAL;
+ }
+
+ drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
+ plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]);
+
+ if (fb->pitches[0] !=
+ drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid cursor stride (%u) (cursor width %d)\n",
+ fb->pitches[0],
+ drm_rect_width(&plane_state->uapi.dst));
+ return -EINVAL;
+ }
+
+ /*
+ * There's something wrong with the cursor on CHV pipe C.
+ * If it straddles the left edge of the screen then
+ * moving it away from the edge or disabling it often
+ * results in a pipe underrun, and often that can lead to
+ * dead pipe (constant underrun reported, and it scans
+ * out just a solid color). To recover from that, the
+ * display power well must be turned off and on again.
+ * Refuse the put the cursor into that compromised position.
+ */
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
+ plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "CHV cursor C not allowed to straddle the left screen edge\n");
+ return -EINVAL;
+ }
+
+ plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+/* TODO: split into noarm+arm pair */
+static void i9xx_cursor_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
+
+ if (plane_state && plane_state->uapi.visible) {
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
+
+ cntl = plane_state->ctl |
+ i9xx_cursor_ctl_crtc(crtc_state);
+
+ if (width != height)
+ fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1);
+
+ base = intel_cursor_base(plane_state);
+ pos = intel_cursor_position(plane_state);
+ }
+
+ /*
+ * On some platforms writing CURCNTR first will also
+ * cause CURPOS to be armed by the CURBASE write.
+ * Without the CURCNTR write the CURPOS write would
+ * arm itself. Thus we always update CURCNTR before
+ * CURPOS.
+ *
+ * On other platforms CURPOS always requires the
+ * CURBASE write to arm the update. Additonally
+ * a write to any of the cursor register will cancel
+ * an already armed cursor update. Thus leaving out
+ * the CURBASE write after CURPOS could lead to a
+ * cursor that doesn't appear to move, or even change
+ * shape. Thus we always write CURBASE.
+ *
+ * The other registers are armed by the CURBASE write
+ * except when the plane is getting enabled at which time
+ * the CURCNTR write arms the update.
+ */
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ skl_write_cursor_wm(plane, crtc_state);
+
+ if (plane_state)
+ intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
+ else
+ intel_psr2_disable_plane_sel_fetch(plane, crtc_state);
+
+ if (plane->cursor.base != base ||
+ plane->cursor.size != fbc_ctl ||
+ plane->cursor.cntl != cntl) {
+ if (HAS_CUR_FBC(dev_priv))
+ intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
+ fbc_ctl);
+ intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
+
+ plane->cursor.base = base;
+ plane->cursor.size = fbc_ctl;
+ plane->cursor.cntl = cntl;
+ } else {
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
+ }
+}
+
+static void i9xx_cursor_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ i9xx_cursor_update_arm(plane, crtc_state, NULL);
+}
+
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ bool ret;
+ u32 val;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-3 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
+
+ ret = val & MCURSOR_MODE_MASK;
+
+ if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
+ *pipe = plane->pipe;
+ else
+ *pipe = REG_FIELD_GET(MCURSOR_PIPE_SEL_MASK, val);
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
+ return false;
+
+ return format == DRM_FORMAT_ARGB8888;
+}
+
+static int
+intel_legacy_cursor_update(struct drm_plane *_plane,
+ struct drm_crtc *_crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_plane_state *new_plane_state;
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc_state *new_crtc_state;
+ int ret;
+
+ /*
+ * When crtc is inactive or there is a modeset pending,
+ * wait for it to complete in the slowpath.
+ * PSR2 selective fetch also requires the slow path as
+ * PSR2 plane and transcoder registers can only be updated during
+ * vblank.
+ *
+ * FIXME bigjoiner fastpath would be good
+ */
+ if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state) ||
+ crtc_state->update_pipe || crtc_state->bigjoiner_pipes)
+ goto slow;
+
+ /*
+ * Don't do an async update if there is an outstanding commit modifying
+ * the plane. This prevents our async update's changes from getting
+ * overridden by a previous synchronous update's state.
+ */
+ if (old_plane_state->uapi.commit &&
+ !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
+ goto slow;
+
+ /*
+ * If any parameters change that may affect watermarks,
+ * take the slowpath. Only changing fb or position should be
+ * in the fastpath.
+ */
+ if (old_plane_state->uapi.crtc != &crtc->base ||
+ old_plane_state->uapi.src_w != src_w ||
+ old_plane_state->uapi.src_h != src_h ||
+ old_plane_state->uapi.crtc_w != crtc_w ||
+ old_plane_state->uapi.crtc_h != crtc_h ||
+ !old_plane_state->uapi.fb != !fb)
+ goto slow;
+
+ new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
+ if (!new_plane_state)
+ return -ENOMEM;
+
+ new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
+ if (!new_crtc_state) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
+
+ new_plane_state->uapi.src_x = src_x;
+ new_plane_state->uapi.src_y = src_y;
+ new_plane_state->uapi.src_w = src_w;
+ new_plane_state->uapi.src_h = src_h;
+ new_plane_state->uapi.crtc_x = crtc_x;
+ new_plane_state->uapi.crtc_y = crtc_y;
+ new_plane_state->uapi.crtc_w = crtc_w;
+ new_plane_state->uapi.crtc_h = crtc_h;
+
+ intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc);
+
+ ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
+ old_plane_state, new_plane_state);
+ if (ret)
+ goto out_free;
+
+ ret = intel_plane_pin_fb(new_plane_state);
+ if (ret)
+ goto out_free;
+
+ intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
+ ORIGIN_CURSOR_UPDATE);
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+ to_intel_frontbuffer(new_plane_state->hw.fb),
+ plane->frontbuffer_bit);
+
+ /* Swap plane state */
+ plane->base.state = &new_plane_state->uapi;
+
+ /*
+ * We cannot swap crtc_state as it may be in use by an atomic commit or
+ * page flip that's running simultaneously. If we swap crtc_state and
+ * destroy the old state, we will cause a use-after-free there.
+ *
+ * Only update active_planes, which is needed for our internal
+ * bookkeeping. Either value will do the right thing when updating
+ * planes atomically. If the cursor was part of the atomic update then
+ * we would have taken the slowpath.
+ */
+ crtc_state->active_planes = new_crtc_state->active_planes;
+
+ /*
+ * Technically we should do a vblank evasion here to make
+ * sure all the cursor registers update on the same frame.
+ * For now just make sure the register writes happen as
+ * quickly as possible to minimize the race window.
+ */
+ local_irq_disable();
+
+ if (new_plane_state->uapi.visible) {
+ intel_plane_update_noarm(plane, crtc_state, new_plane_state);
+ intel_plane_update_arm(plane, crtc_state, new_plane_state);
+ } else {
+ intel_plane_disable_arm(plane, crtc_state);
+ }
+
+ local_irq_enable();
+
+ intel_plane_unpin_fb(old_plane_state);
+
+out_free:
+ if (new_crtc_state)
+ intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
+ if (ret)
+ intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
+ else
+ intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
+ return ret;
+
+slow:
+ return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h, ctx);
+}
+
+static const struct drm_plane_funcs intel_cursor_plane_funcs = {
+ .update_plane = intel_legacy_cursor_update,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = intel_cursor_format_mod_supported,
+};
+
+struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct intel_plane *cursor;
+ int ret, zpos;
+ u64 *modifiers;
+
+ cursor = intel_plane_alloc();
+ if (IS_ERR(cursor))
+ return cursor;
+
+ cursor->pipe = pipe;
+ cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
+ cursor->id = PLANE_CURSOR;
+ cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
+
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+ cursor->max_stride = i845_cursor_max_stride;
+ cursor->update_arm = i845_cursor_update_arm;
+ cursor->disable_arm = i845_cursor_disable_arm;
+ cursor->get_hw_state = i845_cursor_get_hw_state;
+ cursor->check_plane = i845_check_cursor;
+ } else {
+ cursor->max_stride = i9xx_cursor_max_stride;
+ cursor->update_arm = i9xx_cursor_update_arm;
+ cursor->disable_arm = i9xx_cursor_disable_arm;
+ cursor->get_hw_state = i9xx_cursor_get_hw_state;
+ cursor->check_plane = i9xx_check_cursor;
+ }
+
+ cursor->cursor.base = ~0;
+ cursor->cursor.cntl = ~0;
+
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
+ cursor->cursor.size = ~0;
+
+ modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_NONE);
+
+ ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
+ 0, &intel_cursor_plane_funcs,
+ intel_cursor_formats,
+ ARRAY_SIZE(intel_cursor_formats),
+ modifiers,
+ DRM_PLANE_TYPE_CURSOR,
+ "cursor %c", pipe_name(pipe));
+
+ kfree(modifiers);
+
+ if (ret)
+ goto fail;
+
+ if (DISPLAY_VER(dev_priv) >= 4)
+ drm_plane_create_rotation_property(&cursor->base,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180);
+
+ zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
+ drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ drm_plane_enable_fb_damage_clips(&cursor->base);
+
+ intel_plane_helper_add(cursor);
+
+ return cursor;
+
+fail:
+ intel_plane_free(cursor);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.h b/drivers/gpu/drm/i915/display/intel_cursor.h
new file mode 100644
index 000000000..ce333bf4c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cursor.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_CURSOR_H_
+#define _INTEL_CURSOR_H_
+
+enum pipe;
+struct drm_i915_private;
+struct intel_plane;
+
+struct intel_plane *
+intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
new file mode 100644
index 000000000..706e2d956
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -0,0 +1,4525 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include <linux/string_helpers.h>
+
+#include <drm/display/drm_scdc_helper.h>
+#include <drm/drm_privacy_screen_consumer.h>
+
+#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_audio_regs.h"
+#include "intel_backlight.h"
+#include "intel_combo_phy.h"
+#include "intel_combo_phy_regs.h"
+#include "intel_connector.h"
+#include "intel_crtc.h"
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
+#include "intel_de.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
+#include "intel_dkl_phy.h"
+#include "intel_dp.h"
+#include "intel_dp_link_training.h"
+#include "intel_dp_mst.h"
+#include "intel_dpio_phy.h"
+#include "intel_dsi.h"
+#include "intel_fdi.h"
+#include "intel_fifo_underrun.h"
+#include "intel_gmbus.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_hotplug.h"
+#include "intel_lspcon.h"
+#include "intel_pps.h"
+#include "intel_psr.h"
+#include "intel_quirks.h"
+#include "intel_snps_phy.h"
+#include "intel_sprite.h"
+#include "intel_tc.h"
+#include "intel_tc_phy_regs.h"
+#include "intel_vdsc.h"
+#include "intel_vrr.h"
+#include "skl_scaler.h"
+#include "skl_universal_plane.h"
+
+static const u8 index_to_dp_signal_levels[] = {
+ [0] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0,
+ [1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1,
+ [2] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2,
+ [3] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3,
+ [4] = DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0,
+ [5] = DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1,
+ [6] = DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2,
+ [7] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0,
+ [8] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1,
+ [9] = DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0,
+};
+
+static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
+ const struct intel_ddi_buf_trans *trans)
+{
+ int level;
+
+ level = intel_bios_hdmi_level_shift(encoder);
+ if (level < 0)
+ level = trans->hdmi_default_entry;
+
+ return level;
+}
+
+static bool has_buf_trans_select(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) < 10 && !IS_BROXTON(i915);
+}
+
+static bool has_iboost(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915);
+}
+
+/*
+ * Starting with Haswell, DDI port buffers must be programmed with correct
+ * values in advance. This function programs the correct values for
+ * DP/eDP/FDI use cases.
+ */
+void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 iboost_bit = 0;
+ int i, n_entries;
+ enum port port = encoder->port;
+ const struct intel_ddi_buf_trans *trans;
+
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ return;
+
+ /* If we're boosting the current, set bit 31 of trans1 */
+ if (has_iboost(dev_priv) &&
+ intel_bios_encoder_dp_boost_level(encoder->devdata))
+ iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
+
+ for (i = 0; i < n_entries; i++) {
+ intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
+ trans->entries[i].hsw.trans1 | iboost_bit);
+ intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
+ trans->entries[i].hsw.trans2);
+ }
+}
+
+/*
+ * Starting with Haswell, DDI port buffers must be programmed with correct
+ * values in advance. This function programs the correct values for
+ * HDMI/DVI use cases.
+ */
+static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int level = intel_ddi_level(encoder, crtc_state, 0);
+ u32 iboost_bit = 0;
+ int n_entries;
+ enum port port = encoder->port;
+ const struct intel_ddi_buf_trans *trans;
+
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ return;
+
+ /* If we're boosting the current, set bit 31 of trans1 */
+ if (has_iboost(dev_priv) &&
+ intel_bios_encoder_hdmi_boost_level(encoder->devdata))
+ iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
+
+ /* Entry 9 is for HDMI: */
+ intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
+ trans->entries[level].hsw.trans1 | iboost_bit);
+ intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
+ trans->entries[level].hsw.trans2);
+}
+
+void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (IS_BROXTON(dev_priv)) {
+ udelay(16);
+ return;
+ }
+
+ if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
+ DDI_BUF_IS_IDLE), 8))
+ drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get idle\n",
+ port_name(port));
+}
+
+static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ int ret;
+
+ /* Wait > 518 usecs for DDI_BUF_CTL to be non idle */
+ if (DISPLAY_VER(dev_priv) < 10) {
+ usleep_range(518, 1000);
+ return;
+ }
+
+ ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
+ DDI_BUF_IS_IDLE), IS_DG2(dev_priv) ? 1200 : 500, 10, 10);
+
+ if (ret)
+ drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get active\n",
+ port_name(port));
+}
+
+static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
+{
+ switch (pll->info->id) {
+ case DPLL_ID_WRPLL1:
+ return PORT_CLK_SEL_WRPLL1;
+ case DPLL_ID_WRPLL2:
+ return PORT_CLK_SEL_WRPLL2;
+ case DPLL_ID_SPLL:
+ return PORT_CLK_SEL_SPLL;
+ case DPLL_ID_LCPLL_810:
+ return PORT_CLK_SEL_LCPLL_810;
+ case DPLL_ID_LCPLL_1350:
+ return PORT_CLK_SEL_LCPLL_1350;
+ case DPLL_ID_LCPLL_2700:
+ return PORT_CLK_SEL_LCPLL_2700;
+ default:
+ MISSING_CASE(pll->info->id);
+ return PORT_CLK_SEL_NONE;
+ }
+}
+
+static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ int clock = crtc_state->port_clock;
+ const enum intel_dpll_id id = pll->info->id;
+
+ switch (id) {
+ default:
+ /*
+ * DPLL_ID_ICL_DPLL0 and DPLL_ID_ICL_DPLL1 should not be used
+ * here, so do warn if this get passed in
+ */
+ MISSING_CASE(id);
+ return DDI_CLK_SEL_NONE;
+ case DPLL_ID_ICL_TBTPLL:
+ switch (clock) {
+ case 162000:
+ return DDI_CLK_SEL_TBT_162;
+ case 270000:
+ return DDI_CLK_SEL_TBT_270;
+ case 540000:
+ return DDI_CLK_SEL_TBT_540;
+ case 810000:
+ return DDI_CLK_SEL_TBT_810;
+ default:
+ MISSING_CASE(clock);
+ return DDI_CLK_SEL_NONE;
+ }
+ case DPLL_ID_ICL_MGPLL1:
+ case DPLL_ID_ICL_MGPLL2:
+ case DPLL_ID_ICL_MGPLL3:
+ case DPLL_ID_ICL_MGPLL4:
+ case DPLL_ID_TGL_MGPLL5:
+ case DPLL_ID_TGL_MGPLL6:
+ return DDI_CLK_SEL_MG;
+ }
+}
+
+static u32 ddi_buf_phy_link_rate(int port_clock)
+{
+ switch (port_clock) {
+ case 162000:
+ return DDI_BUF_PHY_LINK_RATE(0);
+ case 216000:
+ return DDI_BUF_PHY_LINK_RATE(4);
+ case 243000:
+ return DDI_BUF_PHY_LINK_RATE(5);
+ case 270000:
+ return DDI_BUF_PHY_LINK_RATE(1);
+ case 324000:
+ return DDI_BUF_PHY_LINK_RATE(6);
+ case 432000:
+ return DDI_BUF_PHY_LINK_RATE(7);
+ case 540000:
+ return DDI_BUF_PHY_LINK_RATE(2);
+ case 810000:
+ return DDI_BUF_PHY_LINK_RATE(3);
+ default:
+ MISSING_CASE(port_clock);
+ return DDI_BUF_PHY_LINK_RATE(0);
+ }
+}
+
+static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ /* DDI_BUF_CTL_ENABLE will be set by intel_ddi_prepare_link_retrain() later */
+ intel_dp->DP = dig_port->saved_port_bits |
+ DDI_PORT_WIDTH(crtc_state->lane_count) |
+ DDI_BUF_TRANS_SELECT(0);
+
+ if (IS_ALDERLAKE_P(i915) && intel_phy_is_tc(i915, phy)) {
+ intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port))
+ intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+ }
+}
+
+static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ u32 val = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+
+ switch (val) {
+ case DDI_CLK_SEL_NONE:
+ return 0;
+ case DDI_CLK_SEL_TBT_162:
+ return 162000;
+ case DDI_CLK_SEL_TBT_270:
+ return 270000;
+ case DDI_CLK_SEL_TBT_540:
+ return 540000;
+ case DDI_CLK_SEL_TBT_810:
+ return 810000;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
+{
+ /* CRT dotclock is determined via other means */
+ if (pipe_config->has_pch_encoder)
+ return;
+
+ pipe_config->hw.adjusted_mode.crtc_clock =
+ intel_crtc_dotclock(pipe_config);
+}
+
+void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 temp;
+
+ if (!intel_crtc_has_dp_encoder(crtc_state))
+ return;
+
+ drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
+
+ temp = DP_MSA_MISC_SYNC_CLOCK;
+
+ switch (crtc_state->pipe_bpp) {
+ case 18:
+ temp |= DP_MSA_MISC_6_BPC;
+ break;
+ case 24:
+ temp |= DP_MSA_MISC_8_BPC;
+ break;
+ case 30:
+ temp |= DP_MSA_MISC_10_BPC;
+ break;
+ case 36:
+ temp |= DP_MSA_MISC_12_BPC;
+ break;
+ default:
+ MISSING_CASE(crtc_state->pipe_bpp);
+ break;
+ }
+
+ /* nonsense combination */
+ drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->limited_color_range)
+ temp |= DP_MSA_MISC_COLOR_CEA_RGB;
+
+ /*
+ * As per DP 1.2 spec section 2.3.4.3 while sending
+ * YCBCR 444 signals we should program MSA MISC1/0 fields with
+ * colorspace information.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ temp |= DP_MSA_MISC_COLOR_YCBCR_444_BT709;
+
+ /*
+ * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
+ * of Color Encoding Format and Content Color Gamut] while sending
+ * YCBCR 420, HDR BT.2020 signals we should program MSA MISC1 fields
+ * which indicate VSC SDP for the Pixel Encoding/Colorimetry Format.
+ */
+ if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+ temp |= DP_MSA_MISC_COLOR_VSC_SDP;
+
+ intel_de_write(dev_priv, TRANS_MSA_MISC(cpu_transcoder), temp);
+}
+
+static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder)
+{
+ if (master_transcoder == TRANSCODER_EDP)
+ return 0;
+ else
+ return master_transcoder + 1;
+}
+
+static void
+intel_ddi_config_transcoder_dp2(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val = 0;
+
+ if (intel_dp_is_uhbr(crtc_state))
+ val = TRANS_DP2_128B132B_CHANNEL_CODING;
+
+ intel_de_write(i915, TRANS_DP2_CTL(cpu_transcoder), val);
+}
+
+/*
+ * Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
+ *
+ * Only intended to be used by intel_ddi_enable_transcoder_func() and
+ * intel_ddi_config_transcoder_func().
+ */
+static u32
+intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ enum port port = encoder->port;
+ u32 temp;
+
+ /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+ temp = TRANS_DDI_FUNC_ENABLE;
+ if (DISPLAY_VER(dev_priv) >= 12)
+ temp |= TGL_TRANS_DDI_SELECT_PORT(port);
+ else
+ temp |= TRANS_DDI_SELECT_PORT(port);
+
+ switch (crtc_state->pipe_bpp) {
+ default:
+ MISSING_CASE(crtc_state->pipe_bpp);
+ fallthrough;
+ case 18:
+ temp |= TRANS_DDI_BPC_6;
+ break;
+ case 24:
+ temp |= TRANS_DDI_BPC_8;
+ break;
+ case 30:
+ temp |= TRANS_DDI_BPC_10;
+ break;
+ case 36:
+ temp |= TRANS_DDI_BPC_12;
+ break;
+ }
+
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DDI_PVSYNC;
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DDI_PHSYNC;
+
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ switch (pipe) {
+ default:
+ MISSING_CASE(pipe);
+ fallthrough;
+ case PIPE_A:
+ /* On Haswell, can only use the always-on power well for
+ * eDP when not using the panel fitter, and when not
+ * using motion blur mitigation (which we don't
+ * support). */
+ if (crtc_state->pch_pfit.force_thru)
+ temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+ else
+ temp |= TRANS_DDI_EDP_INPUT_A_ON;
+ break;
+ case PIPE_B:
+ temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+ break;
+ case PIPE_C:
+ temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+ break;
+ }
+ }
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ if (crtc_state->has_hdmi_sink)
+ temp |= TRANS_DDI_MODE_SELECT_HDMI;
+ else
+ temp |= TRANS_DDI_MODE_SELECT_DVI;
+
+ if (crtc_state->hdmi_scrambling)
+ temp |= TRANS_DDI_HDMI_SCRAMBLING;
+ if (crtc_state->hdmi_high_tmds_clock_ratio)
+ temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+ temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
+ temp |= (crtc_state->fdi_lanes - 1) << 1;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ if (intel_dp_is_uhbr(crtc_state))
+ temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
+ else
+ temp |= TRANS_DDI_MODE_SELECT_DP_MST;
+ temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ enum transcoder master;
+
+ master = crtc_state->mst_master_transcoder;
+ drm_WARN_ON(&dev_priv->drm,
+ master == INVALID_TRANSCODER);
+ temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
+ }
+ } else {
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+ temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
+ }
+
+ if (IS_DISPLAY_VER(dev_priv, 8, 10) &&
+ crtc_state->master_transcoder != INVALID_TRANSCODER) {
+ u8 master_select =
+ bdw_trans_port_sync_master_select(crtc_state->master_transcoder);
+
+ temp |= TRANS_DDI_PORT_SYNC_ENABLE |
+ TRANS_DDI_PORT_SYNC_MASTER_SELECT(master_select);
+ }
+
+ return temp;
+}
+
+void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ enum transcoder master_transcoder = crtc_state->master_transcoder;
+ u32 ctl2 = 0;
+
+ if (master_transcoder != INVALID_TRANSCODER) {
+ u8 master_select =
+ bdw_trans_port_sync_master_select(master_transcoder);
+
+ ctl2 |= PORT_SYNC_MODE_ENABLE |
+ PORT_SYNC_MODE_MASTER_SELECT(master_select);
+ }
+
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(cpu_transcoder), ctl2);
+ }
+
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder),
+ intel_ddi_transcoder_func_reg_val_get(encoder,
+ crtc_state));
+}
+
+/*
+ * Same as intel_ddi_enable_transcoder_func(), but it does not set the enable
+ * bit.
+ */
+static void
+intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 ctl;
+
+ ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
+ ctl &= ~TRANS_DDI_FUNC_ENABLE;
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
+}
+
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 ctl;
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(cpu_transcoder), 0);
+
+ ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ drm_WARN_ON(crtc->base.dev, ctl & TRANS_DDI_HDCP_SIGNALLING);
+
+ ctl &= ~TRANS_DDI_FUNC_ENABLE;
+
+ if (IS_DISPLAY_VER(dev_priv, 8, 10))
+ ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE |
+ TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ if (!intel_dp_mst_is_master_trans(crtc_state)) {
+ ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
+ TRANS_DDI_MODE_SELECT_MASK);
+ }
+ } else {
+ ctl &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
+ }
+
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
+
+ if (intel_has_quirk(dev_priv, QUIRK_INCREASE_DDI_DISABLED_TIME) &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Quirk Increase DDI disabled time\n");
+ /* Quirk time at 100ms for reliable operation */
+ msleep(100);
+ }
+}
+
+int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
+ enum transcoder cpu_transcoder,
+ bool enable, u32 hdcp_mask)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+ u32 tmp;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ intel_encoder->power_domain);
+ if (drm_WARN_ON(dev, !wakeref))
+ return -ENXIO;
+
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (enable)
+ tmp |= hdcp_mask;
+ else
+ tmp &= ~hdcp_mask;
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp);
+ intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+ return ret;
+}
+
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
+{
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
+ int type = intel_connector->base.connector_type;
+ enum port port = encoder->port;
+ enum transcoder cpu_transcoder;
+ intel_wakeref_t wakeref;
+ enum pipe pipe = 0;
+ u32 tmp;
+ bool ret;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
+ return false;
+
+ if (!encoder->get_hw_state(encoder, &pipe)) {
+ ret = false;
+ goto out;
+ }
+
+ if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
+ cpu_transcoder = TRANSCODER_EDP;
+ else
+ cpu_transcoder = (enum transcoder) pipe;
+
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
+ case TRANS_DDI_MODE_SELECT_HDMI:
+ case TRANS_DDI_MODE_SELECT_DVI:
+ ret = type == DRM_MODE_CONNECTOR_HDMIA;
+ break;
+
+ case TRANS_DDI_MODE_SELECT_DP_SST:
+ ret = type == DRM_MODE_CONNECTOR_eDP ||
+ type == DRM_MODE_CONNECTOR_DisplayPort;
+ break;
+
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ /* if the transcoder is in MST state then
+ * connector isn't connected */
+ ret = false;
+ break;
+
+ case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B:
+ if (HAS_DP20(dev_priv))
+ /* 128b/132b */
+ ret = false;
+ else
+ /* FDI */
+ ret = type == DRM_MODE_CONNECTOR_VGA;
+ break;
+
+ default:
+ ret = false;
+ break;
+ }
+
+out:
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+
+ return ret;
+}
+
+static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
+ u8 *pipe_mask, bool *is_dp_mst)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum port port = encoder->port;
+ intel_wakeref_t wakeref;
+ enum pipe p;
+ u32 tmp;
+ u8 mst_pipe_mask;
+
+ *pipe_mask = 0;
+ *is_dp_mst = false;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
+ return;
+
+ tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ if (!(tmp & DDI_BUF_CTL_ENABLE))
+ goto out;
+
+ if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) {
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ default:
+ MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK);
+ fallthrough;
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ *pipe_mask = BIT(PIPE_A);
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ *pipe_mask = BIT(PIPE_B);
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ *pipe_mask = BIT(PIPE_C);
+ break;
+ }
+
+ goto out;
+ }
+
+ mst_pipe_mask = 0;
+ for_each_pipe(dev_priv, p) {
+ enum transcoder cpu_transcoder = (enum transcoder)p;
+ unsigned int port_mask, ddi_select;
+ intel_wakeref_t trans_wakeref;
+
+ trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder));
+ if (!trans_wakeref)
+ continue;
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ port_mask = TGL_TRANS_DDI_PORT_MASK;
+ ddi_select = TGL_TRANS_DDI_SELECT_PORT(port);
+ } else {
+ port_mask = TRANS_DDI_PORT_MASK;
+ ddi_select = TRANS_DDI_SELECT_PORT(port);
+ }
+
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
+ trans_wakeref);
+
+ if ((tmp & port_mask) != ddi_select)
+ continue;
+
+ if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST ||
+ (HAS_DP20(dev_priv) &&
+ (tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B))
+ mst_pipe_mask |= BIT(p);
+
+ *pipe_mask |= BIT(p);
+ }
+
+ if (!*pipe_mask)
+ drm_dbg_kms(&dev_priv->drm,
+ "No pipe for [ENCODER:%d:%s] found\n",
+ encoder->base.base.id, encoder->base.name);
+
+ if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask);
+ *pipe_mask = BIT(ffs(*pipe_mask) - 1);
+ }
+
+ if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
+ drm_dbg_kms(&dev_priv->drm,
+ "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask, mst_pipe_mask);
+ else
+ *is_dp_mst = mst_pipe_mask;
+
+out:
+ if (*pipe_mask && (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))) {
+ tmp = intel_de_read(dev_priv, BXT_PHY_CTL(port));
+ if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
+ drm_err(&dev_priv->drm,
+ "[ENCODER:%d:%s] enabled but PHY powered down? (PHY_CTL %08x)\n",
+ encoder->base.base.id, encoder->base.name, tmp);
+ }
+
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+}
+
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ u8 pipe_mask;
+ bool is_mst;
+
+ intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
+
+ if (is_mst || !pipe_mask)
+ return false;
+
+ *pipe = ffs(pipe_mask) - 1;
+
+ return true;
+}
+
+static enum intel_display_power_domain
+intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
+{
+ /* ICL+ HW requires corresponding AUX IOs to be powered up for PSR with
+ * DC states enabled at the same time, while for driver initiated AUX
+ * transfers we need the same AUX IOs to be powered but with DC states
+ * disabled. Accordingly use the AUX power domain here which leaves DC
+ * states enabled.
+ * However, for non-A AUX ports the corresponding non-EDP transcoders
+ * would have already enabled power well 2 and DC_OFF. This means we can
+ * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
+ * specific AUX_IO reference without powering up any extra wells.
+ * Note that PSR is enabled only on Port A even though this function
+ * returns the correct domain for other ports too.
+ */
+ return dig_port->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
+ intel_aux_power_domain(dig_port);
+}
+
+static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ /*
+ * TODO: Add support for MST encoders. Atm, the following should never
+ * happen since fake-MST encoders don't set their get_power_domains()
+ * hook.
+ */
+ if (drm_WARN_ON(&dev_priv->drm,
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
+ return;
+
+ dig_port = enc_to_dig_port(encoder);
+
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+ }
+
+ /*
+ * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
+ * ports.
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state) ||
+ intel_phy_is_tc(dev_priv, phy)) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref);
+ dig_port->aux_wakeref =
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+ }
+}
+
+void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ u32 val;
+
+ if (cpu_transcoder != TRANSCODER_EDP) {
+ if (DISPLAY_VER(dev_priv) >= 13)
+ val = TGL_TRANS_CLK_SEL_PORT(phy);
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ val = TGL_TRANS_CLK_SEL_PORT(encoder->port);
+ else
+ val = TRANS_CLK_SEL_PORT(encoder->port);
+
+ intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val);
+ }
+}
+
+void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (cpu_transcoder != TRANSCODER_EDP) {
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_de_write(dev_priv,
+ TRANS_CLK_SEL(cpu_transcoder),
+ TGL_TRANS_CLK_SEL_DISABLED);
+ else
+ intel_de_write(dev_priv,
+ TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_DISABLED);
+ }
+}
+
+static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+ enum port port, u8 iboost)
+{
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, DISPIO_CR_TX_BMU_CR0);
+ tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
+ if (iboost)
+ tmp |= iboost << BALANCE_LEG_SHIFT(port);
+ else
+ tmp |= BALANCE_LEG_DISABLE(port);
+ intel_de_write(dev_priv, DISPIO_CR_TX_BMU_CR0, tmp);
+}
+
+static void skl_ddi_set_iboost(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int level)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u8 iboost;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ iboost = intel_bios_encoder_hdmi_boost_level(encoder->devdata);
+ else
+ iboost = intel_bios_encoder_dp_boost_level(encoder->devdata);
+
+ if (iboost == 0) {
+ const struct intel_ddi_buf_trans *trans;
+ int n_entries;
+
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ return;
+
+ iboost = trans->entries[level].hsw.i_boost;
+ }
+
+ /* Make sure that the requested I_boost is valid */
+ if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
+ drm_err(&dev_priv->drm, "Invalid I_boost value %u\n", iboost);
+ return;
+ }
+
+ _skl_ddi_set_iboost(dev_priv, encoder->port, iboost);
+
+ if (encoder->port == PORT_A && dig_port->max_lanes == 4)
+ _skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
+}
+
+static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int n_entries;
+
+ encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+
+ if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
+ n_entries = 1;
+ if (drm_WARN_ON(&dev_priv->drm,
+ n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
+ n_entries = ARRAY_SIZE(index_to_dp_signal_levels);
+
+ return index_to_dp_signal_levels[n_entries - 1] &
+ DP_TRAIN_VOLTAGE_SWING_MASK;
+}
+
+/*
+ * We assume that the full set of pre-emphasis values can be
+ * used on all DDI platforms. Should that change we need to
+ * rethink this code.
+ */
+static u8 intel_ddi_dp_preemph_max(struct intel_dp *intel_dp)
+{
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
+}
+
+static u32 icl_combo_phy_loadgen_select(const struct intel_crtc_state *crtc_state,
+ int lane)
+{
+ if (crtc_state->port_clock > 600000)
+ return 0;
+
+ if (crtc_state->lane_count == 4)
+ return lane >= 1 ? LOADGEN_SELECT : 0;
+ else
+ return lane == 1 || lane == 2 ? LOADGEN_SELECT : 0;
+}
+
+static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct intel_ddi_buf_trans *trans;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ int n_entries, ln;
+ u32 val;
+
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ return;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED;
+ intel_dp->hobl_active = is_hobl_buf_trans(trans);
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), val,
+ intel_dp->hobl_active ? val : 0);
+ }
+
+ /* Set PORT_TX_DW5 */
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
+ TAP2_DISABLE | TAP3_DISABLE);
+ val |= SCALING_MODE_SEL(0x2);
+ val |= RTERM_SELECT(0x6);
+ val |= TAP3_DISABLE;
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+
+ /* Program PORT_TX_DW2 */
+ for (ln = 0; ln < 4; ln++) {
+ int level = intel_ddi_level(encoder, crtc_state, ln);
+
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_LN(ln, phy),
+ SWING_SEL_UPPER_MASK | SWING_SEL_LOWER_MASK | RCOMP_SCALAR_MASK,
+ SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel) |
+ SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel) |
+ RCOMP_SCALAR(0x98));
+ }
+
+ /* Program PORT_TX_DW4 */
+ /* We cannot write to GRP. It would overwrite individual loadgen. */
+ for (ln = 0; ln < 4; ln++) {
+ int level = intel_ddi_level(encoder, crtc_state, ln);
+
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK,
+ POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1) |
+ POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2) |
+ CURSOR_COEFF(trans->entries[level].icl.dw4_cursor_coeff));
+ }
+
+ /* Program PORT_TX_DW7 */
+ for (ln = 0; ln < 4; ln++) {
+ int level = intel_ddi_level(encoder, crtc_state, ln);
+
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW7_LN(ln, phy),
+ N_SCALAR_MASK,
+ N_SCALAR(trans->entries[level].icl.dw7_n_scalar));
+ }
+}
+
+static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ u32 val;
+ int ln;
+
+ /*
+ * 1. If port type is eDP or DP,
+ * set PORT_PCS_DW1 cmnkeeper_enable to 1b,
+ * else clear to 0b.
+ */
+ val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ val &= ~COMMON_KEEPER_EN;
+ else
+ val |= COMMON_KEEPER_EN;
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
+
+ /* 2. Program loadgen select */
+ /*
+ * Program PORT_TX_DW4 depending on Bit rate and used lanes
+ * <= 6 GHz and 4 lanes (LN0=0, LN1=1, LN2=1, LN3=1)
+ * <= 6 GHz and 1,2 lanes (LN0=0, LN1=1, LN2=1, LN3=0)
+ * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
+ */
+ for (ln = 0; ln < 4; ln++) {
+ intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy),
+ LOADGEN_SELECT,
+ icl_combo_phy_loadgen_select(crtc_state, ln));
+ }
+
+ /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy),
+ 0, SUS_CLOCK_CONFIG);
+
+ /* 4. Clear training enable to change swing values */
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val &= ~TX_TRAINING_EN;
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+
+ /* 5. Program swing and de-emphasis */
+ icl_ddi_combo_vswing_program(encoder, crtc_state);
+
+ /* 6. Set training enable to trigger update */
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ val |= TX_TRAINING_EN;
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
+}
+
+static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
+ const struct intel_ddi_buf_trans *trans;
+ int n_entries, ln;
+
+ if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
+ return;
+
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ return;
+
+ for (ln = 0; ln < 2; ln++) {
+ intel_de_rmw(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port),
+ CRI_USE_FS32, 0);
+ intel_de_rmw(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port),
+ CRI_USE_FS32, 0);
+ }
+
+ /* Program MG_TX_SWINGCTRL with values from vswing table */
+ for (ln = 0; ln < 2; ln++) {
+ int level;
+
+ level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
+
+ intel_de_rmw(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port),
+ CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
+ CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
+
+ level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
+
+ intel_de_rmw(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port),
+ CRI_TXDEEMPH_OVERRIDE_17_12_MASK,
+ CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12));
+ }
+
+ /* Program MG_TX_DRVCTRL with values from vswing table */
+ for (ln = 0; ln < 2; ln++) {
+ int level;
+
+ level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
+
+ intel_de_rmw(dev_priv, MG_TX1_DRVCTRL(ln, tc_port),
+ CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
+ CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
+ CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
+ CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) |
+ CRI_TXDEEMPH_OVERRIDE_EN);
+
+ level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
+
+ intel_de_rmw(dev_priv, MG_TX2_DRVCTRL(ln, tc_port),
+ CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
+ CRI_TXDEEMPH_OVERRIDE_5_0_MASK,
+ CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) |
+ CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) |
+ CRI_TXDEEMPH_OVERRIDE_EN);
+
+ /* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
+ }
+
+ /*
+ * Program MG_CLKHUB<LN, port being used> with value from frequency table
+ * In case of Legacy mode on MG PHY, both TX1 and TX2 enabled so use the
+ * values from table for which TX1 and TX2 enabled.
+ */
+ for (ln = 0; ln < 2; ln++) {
+ intel_de_rmw(dev_priv, MG_CLKHUB(ln, tc_port),
+ CFG_LOW_RATE_LKREN_EN,
+ crtc_state->port_clock < 300000 ? CFG_LOW_RATE_LKREN_EN : 0);
+ }
+
+ /* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
+ for (ln = 0; ln < 2; ln++) {
+ intel_de_rmw(dev_priv, MG_TX1_DCC(ln, tc_port),
+ CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
+ CFG_AMI_CK_DIV_OVERRIDE_EN,
+ crtc_state->port_clock > 500000 ?
+ CFG_AMI_CK_DIV_OVERRIDE_VAL(1) |
+ CFG_AMI_CK_DIV_OVERRIDE_EN : 0);
+
+ intel_de_rmw(dev_priv, MG_TX2_DCC(ln, tc_port),
+ CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK |
+ CFG_AMI_CK_DIV_OVERRIDE_EN,
+ crtc_state->port_clock > 500000 ?
+ CFG_AMI_CK_DIV_OVERRIDE_VAL(1) |
+ CFG_AMI_CK_DIV_OVERRIDE_EN : 0);
+ }
+
+ /* Program MG_TX_PISO_READLOAD with values from vswing table */
+ for (ln = 0; ln < 2; ln++) {
+ intel_de_rmw(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port),
+ 0, CRI_CALCINIT);
+ intel_de_rmw(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port),
+ 0, CRI_CALCINIT);
+ }
+}
+
+static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
+ const struct intel_ddi_buf_trans *trans;
+ int n_entries, ln;
+
+ if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
+ return;
+
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ return;
+
+ for (ln = 0; ln < 2; ln++) {
+ int level;
+
+ intel_dkl_phy_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), ln, 0);
+
+ level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
+
+ intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port), ln,
+ DKL_TX_PRESHOOT_COEFF_MASK |
+ DKL_TX_DE_EMPAHSIS_COEFF_MASK |
+ DKL_TX_VSWING_CONTROL_MASK,
+ DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) |
+ DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) |
+ DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing));
+
+ level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
+
+ intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port), ln,
+ DKL_TX_PRESHOOT_COEFF_MASK |
+ DKL_TX_DE_EMPAHSIS_COEFF_MASK |
+ DKL_TX_VSWING_CONTROL_MASK,
+ DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) |
+ DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) |
+ DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing));
+
+ intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), ln,
+ DKL_TX_DP20BITMODE, 0);
+
+ if (IS_ALDERLAKE_P(dev_priv)) {
+ u32 val;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ if (ln == 0) {
+ val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0);
+ val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(2);
+ } else {
+ val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(3);
+ val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(3);
+ }
+ } else {
+ val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0);
+ val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0);
+ }
+
+ intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), ln,
+ DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK |
+ DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK,
+ val);
+ }
+ }
+}
+
+static int translate_signal_level(struct intel_dp *intel_dp,
+ u8 signal_levels)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) {
+ if (index_to_dp_signal_levels[i] == signal_levels)
+ return i;
+ }
+
+ drm_WARN(&i915->drm, 1,
+ "Unsupported voltage swing/pre-emphasis level: 0x%x\n",
+ signal_levels);
+
+ return 0;
+}
+
+static int intel_ddi_dp_level(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int lane)
+{
+ u8 train_set = intel_dp->train_set[lane];
+
+ if (intel_dp_is_uhbr(crtc_state)) {
+ return train_set & DP_TX_FFE_PRESET_VALUE_MASK;
+ } else {
+ u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+
+ return translate_signal_level(intel_dp, signal_levels);
+ }
+}
+
+int intel_ddi_level(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int lane)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_ddi_buf_trans *trans;
+ int level, n_entries;
+
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&i915->drm, !trans))
+ return 0;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ level = intel_ddi_hdmi_level(encoder, trans);
+ else
+ level = intel_ddi_dp_level(enc_to_intel_dp(encoder), crtc_state,
+ lane);
+
+ if (drm_WARN_ON_ONCE(&i915->drm, level >= n_entries))
+ level = n_entries - 1;
+
+ return level;
+}
+
+static void
+hsw_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int level = intel_ddi_level(encoder, crtc_state, 0);
+ enum port port = encoder->port;
+ u32 signal_levels;
+
+ if (has_iboost(dev_priv))
+ skl_ddi_set_iboost(encoder, crtc_state, level);
+
+ /* HDMI ignores the rest */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return;
+
+ signal_levels = DDI_BUF_TRANS_SELECT(level);
+
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ intel_dp->DP &= ~DDI_BUF_EMP_MASK;
+ intel_dp->DP |= signal_levels;
+
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+}
+
+static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 clk_sel_mask, u32 clk_sel, u32 clk_off)
+{
+ mutex_lock(&i915->display.dpll.lock);
+
+ intel_de_rmw(i915, reg, clk_sel_mask, clk_sel);
+
+ /*
+ * "This step and the step before must be
+ * done with separate register writes."
+ */
+ intel_de_rmw(i915, reg, clk_off, 0);
+
+ mutex_unlock(&i915->display.dpll.lock);
+}
+
+static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 clk_off)
+{
+ mutex_lock(&i915->display.dpll.lock);
+
+ intel_de_rmw(i915, reg, 0, clk_off);
+
+ mutex_unlock(&i915->display.dpll.lock);
+}
+
+static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 clk_off)
+{
+ return !(intel_de_read(i915, reg) & clk_off);
+}
+
+static struct intel_shared_dpll *
+_icl_ddi_get_pll(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 clk_sel_mask, u32 clk_sel_shift)
+{
+ enum intel_dpll_id id;
+
+ id = (intel_de_read(i915, reg) & clk_sel_mask) >> clk_sel_shift;
+
+ return intel_get_shared_dpll_by_id(i915, id);
+}
+
+static void adls_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ _icl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
+ pll->info->id << ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static void adls_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ _icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _icl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy),
+ ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
+ ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy));
+}
+
+static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
+ RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
+ RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
+}
+
+static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ /*
+ * If we fail this, something went very wrong: first 2 PLLs should be
+ * used by first 2 phys and last 2 PLLs by last phys
+ */
+ if (drm_WARN_ON(&i915->drm,
+ (pll->info->id < DPLL_ID_DG1_DPLL2 && phy >= PHY_C) ||
+ (pll->info->id >= DPLL_ID_DG1_DPLL2 && phy < PHY_C)))
+ return;
+
+ _icl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ _icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
+ DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static struct intel_shared_dpll *dg1_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ enum intel_dpll_id id;
+ u32 val;
+
+ val = intel_de_read(i915, DG1_DPCLKA_CFGCR0(phy));
+ val &= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ val >>= DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
+ id = val;
+
+ /*
+ * _DG1_DPCLKA0_CFGCR0 maps between DPLL 0 and 1 with one bit for phy A
+ * and B while _DG1_DPCLKA1_CFGCR0 maps between DPLL 2 and 3 with one
+ * bit for phy C and D.
+ */
+ if (phy >= PHY_C)
+ id += DPLL_ID_DG1_DPLL2;
+
+ return intel_get_shared_dpll_by_id(i915, id);
+}
+
+static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+ ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+ ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
+}
+
+struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
+ ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
+ ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
+}
+
+static void jsl_ddi_tc_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port = encoder->port;
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ /*
+ * "For DDIC and DDID, program DDI_CLK_SEL to map the MG clock to the port.
+ * MG does not exist, but the programming is required to ungate DDIC and DDID."
+ */
+ intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
+
+ icl_ddi_combo_enable_clock(encoder, crtc_state);
+}
+
+static void jsl_ddi_tc_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ icl_ddi_combo_disable_clock(encoder);
+
+ intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+}
+
+static bool jsl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+
+ if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
+ return false;
+
+ return icl_ddi_combo_is_clock_enabled(encoder);
+}
+
+static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum port port = encoder->port;
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ intel_de_write(i915, DDI_CLK_SEL(port),
+ icl_pll_to_ddi_clk_sel(encoder, crtc_state));
+
+ mutex_lock(&i915->display.dpll.lock);
+
+ intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0);
+
+ mutex_unlock(&i915->display.dpll.lock);
+}
+
+static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum port port = encoder->port;
+
+ mutex_lock(&i915->display.dpll.lock);
+
+ intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
+ 0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
+
+ mutex_unlock(&i915->display.dpll.lock);
+
+ intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+}
+
+static bool icl_ddi_tc_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum port port = encoder->port;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+
+ if ((tmp & DDI_CLK_SEL_MASK) == DDI_CLK_SEL_NONE)
+ return false;
+
+ tmp = intel_de_read(i915, ICL_DPCLKA_CFGCR0);
+
+ return !(tmp & ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
+}
+
+static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ enum port port = encoder->port;
+ enum intel_dpll_id id;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, DDI_CLK_SEL(port));
+
+ switch (tmp & DDI_CLK_SEL_MASK) {
+ case DDI_CLK_SEL_TBT_162:
+ case DDI_CLK_SEL_TBT_270:
+ case DDI_CLK_SEL_TBT_540:
+ case DDI_CLK_SEL_TBT_810:
+ id = DPLL_ID_ICL_TBTPLL;
+ break;
+ case DDI_CLK_SEL_MG:
+ id = icl_tc_port_to_pll_id(tc_port);
+ break;
+ default:
+ MISSING_CASE(tmp);
+ fallthrough;
+ case DDI_CLK_SEL_NONE:
+ return NULL;
+ }
+
+ return intel_get_shared_dpll_by_id(i915, id);
+}
+
+static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum intel_dpll_id id;
+
+ switch (encoder->port) {
+ case PORT_A:
+ id = DPLL_ID_SKL_DPLL0;
+ break;
+ case PORT_B:
+ id = DPLL_ID_SKL_DPLL1;
+ break;
+ case PORT_C:
+ id = DPLL_ID_SKL_DPLL2;
+ break;
+ default:
+ MISSING_CASE(encoder->port);
+ return NULL;
+ }
+
+ return intel_get_shared_dpll_by_id(i915, id);
+}
+
+static void skl_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port = encoder->port;
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ mutex_lock(&i915->display.dpll.lock);
+
+ intel_de_rmw(i915, DPLL_CTRL2,
+ DPLL_CTRL2_DDI_CLK_OFF(port) |
+ DPLL_CTRL2_DDI_CLK_SEL_MASK(port),
+ DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
+
+ mutex_unlock(&i915->display.dpll.lock);
+}
+
+static void skl_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ mutex_lock(&i915->display.dpll.lock);
+
+ intel_de_rmw(i915, DPLL_CTRL2,
+ 0, DPLL_CTRL2_DDI_CLK_OFF(port));
+
+ mutex_unlock(&i915->display.dpll.lock);
+}
+
+static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ /*
+ * FIXME Not sure if the override affects both
+ * the PLL selection and the CLK_OFF bit.
+ */
+ return !(intel_de_read(i915, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_OFF(port));
+}
+
+static struct intel_shared_dpll *skl_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum intel_dpll_id id;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, DPLL_CTRL2);
+
+ /*
+ * FIXME Not sure if the override affects both
+ * the PLL selection and the CLK_OFF bit.
+ */
+ if ((tmp & DPLL_CTRL2_DDI_SEL_OVERRIDE(port)) == 0)
+ return NULL;
+
+ id = (tmp & DPLL_CTRL2_DDI_CLK_SEL_MASK(port)) >>
+ DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port);
+
+ return intel_get_shared_dpll_by_id(i915, id);
+}
+
+void hsw_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port = encoder->port;
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ intel_de_write(i915, PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
+}
+
+void hsw_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ intel_de_write(i915, PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
+bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ return intel_de_read(i915, PORT_CLK_SEL(port)) != PORT_CLK_SEL_NONE;
+}
+
+static struct intel_shared_dpll *hsw_ddi_get_pll(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum intel_dpll_id id;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, PORT_CLK_SEL(port));
+
+ switch (tmp & PORT_CLK_SEL_MASK) {
+ case PORT_CLK_SEL_WRPLL1:
+ id = DPLL_ID_WRPLL1;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ id = DPLL_ID_WRPLL2;
+ break;
+ case PORT_CLK_SEL_SPLL:
+ id = DPLL_ID_SPLL;
+ break;
+ case PORT_CLK_SEL_LCPLL_810:
+ id = DPLL_ID_LCPLL_810;
+ break;
+ case PORT_CLK_SEL_LCPLL_1350:
+ id = DPLL_ID_LCPLL_1350;
+ break;
+ case PORT_CLK_SEL_LCPLL_2700:
+ id = DPLL_ID_LCPLL_2700;
+ break;
+ default:
+ MISSING_CASE(tmp);
+ fallthrough;
+ case PORT_CLK_SEL_NONE:
+ return NULL;
+ }
+
+ return intel_get_shared_dpll_by_id(i915, id);
+}
+
+void intel_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (encoder->enable_clock)
+ encoder->enable_clock(encoder, crtc_state);
+}
+
+void intel_ddi_disable_clock(struct intel_encoder *encoder)
+{
+ if (encoder->disable_clock)
+ encoder->disable_clock(encoder);
+}
+
+void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u32 port_mask;
+ bool ddi_clk_needed;
+
+ /*
+ * In case of DP MST, we sanitize the primary encoder only, not the
+ * virtual ones.
+ */
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ return;
+
+ if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) {
+ u8 pipe_mask;
+ bool is_mst;
+
+ intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
+ /*
+ * In the unlikely case that BIOS enables DP in MST mode, just
+ * warn since our MST HW readout is incomplete.
+ */
+ if (drm_WARN_ON(&i915->drm, is_mst))
+ return;
+ }
+
+ port_mask = BIT(encoder->port);
+ ddi_clk_needed = encoder->base.crtc;
+
+ if (encoder->type == INTEL_OUTPUT_DSI) {
+ struct intel_encoder *other_encoder;
+
+ port_mask = intel_dsi_encoder_ports(encoder);
+ /*
+ * Sanity check that we haven't incorrectly registered another
+ * encoder using any of the ports of this DSI encoder.
+ */
+ for_each_intel_encoder(&i915->drm, other_encoder) {
+ if (other_encoder == encoder)
+ continue;
+
+ if (drm_WARN_ON(&i915->drm,
+ port_mask & BIT(other_encoder->port)))
+ return;
+ }
+ /*
+ * For DSI we keep the ddi clocks gated
+ * except during enable/disable sequence.
+ */
+ ddi_clk_needed = false;
+ }
+
+ if (ddi_clk_needed || !encoder->is_clock_enabled ||
+ !encoder->is_clock_enabled(encoder))
+ return;
+
+ drm_notice(&i915->drm,
+ "[ENCODER:%d:%s] is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ encoder->base.base.id, encoder->base.name);
+
+ encoder->disable_clock(encoder);
+}
+
+static void
+icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+ enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+ u32 ln0, ln1, pin_assignment;
+ u8 width;
+
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ intel_tc_port_in_tbt_alt_mode(dig_port))
+ return;
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ ln0 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port), 0);
+ ln1 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port), 1);
+ } else {
+ ln0 = intel_de_read(dev_priv, MG_DP_MODE(0, tc_port));
+ ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port));
+ }
+
+ ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+ ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+
+ /* DPPATC */
+ pin_assignment = intel_tc_port_get_pin_assignment_mask(dig_port);
+ width = crtc_state->lane_count;
+
+ switch (pin_assignment) {
+ case 0x0:
+ drm_WARN_ON(&dev_priv->drm,
+ !intel_tc_port_in_legacy_mode(dig_port));
+ if (width == 1) {
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ } else {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x1:
+ if (width == 4) {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x2:
+ if (width == 2) {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x3:
+ case 0x5:
+ if (width == 1) {
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ } else {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x4:
+ case 0x6:
+ if (width == 1) {
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ } else {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ default:
+ MISSING_CASE(pin_assignment);
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port), 0, ln0);
+ intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port), 1, ln1);
+ } else {
+ intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0);
+ intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1);
+ }
+}
+
+static enum transcoder
+tgl_dp_tp_transcoder(const struct intel_crtc_state *crtc_state)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+ return crtc_state->mst_master_transcoder;
+ else
+ return crtc_state->cpu_transcoder;
+}
+
+i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ return TGL_DP_TP_CTL(tgl_dp_tp_transcoder(crtc_state));
+ else
+ return DP_TP_CTL(encoder->port);
+}
+
+i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ return TGL_DP_TP_STATUS(tgl_dp_tp_transcoder(crtc_state));
+ else
+ return DP_TP_STATUS(encoder->port);
+}
+
+static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_DOWNSPREAD_CTRL,
+ enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0) <= 0)
+ drm_dbg_kms(&i915->drm,
+ "Failed to %s MSA_TIMING_PAR_IGNORE in the sink\n",
+ str_enable_disable(enable));
+}
+
+static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
+ drm_dbg_kms(&i915->drm,
+ "Failed to set FEC_READY in the sink\n");
+}
+
+static void intel_ddi_enable_fec(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp;
+ u32 val;
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ intel_dp = enc_to_intel_dp(encoder);
+ val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ val |= DP_TP_CTL_FEC_ENABLE;
+ intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+}
+
+static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp;
+ u32 val;
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ intel_dp = enc_to_intel_dp(encoder);
+ val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ val &= ~DP_TP_CTL_FEC_ENABLE;
+ intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+ intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+}
+
+static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_phy_is_combo(i915, phy)) {
+ bool lane_reversal =
+ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ intel_combo_phy_power_up_lanes(i915, phy, false,
+ crtc_state->lane_count,
+ lane_reversal);
+ }
+}
+
+/* Splitter enable for eDP MSO is limited to certain pipes. */
+static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
+{
+ if (IS_ALDERLAKE_P(i915))
+ return BIT(PIPE_A) | BIT(PIPE_B);
+ else
+ return BIT(PIPE_A);
+}
+
+static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 dss1;
+
+ if (!HAS_MSO(i915))
+ return;
+
+ dss1 = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe));
+
+ pipe_config->splitter.enable = dss1 & SPLITTER_ENABLE;
+ if (!pipe_config->splitter.enable)
+ return;
+
+ if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
+ pipe_config->splitter.enable = false;
+ return;
+ }
+
+ switch (dss1 & SPLITTER_CONFIGURATION_MASK) {
+ default:
+ drm_WARN(&i915->drm, true,
+ "Invalid splitter configuration, dss1=0x%08x\n", dss1);
+ fallthrough;
+ case SPLITTER_CONFIGURATION_2_SEGMENT:
+ pipe_config->splitter.link_count = 2;
+ break;
+ case SPLITTER_CONFIGURATION_4_SEGMENT:
+ pipe_config->splitter.link_count = 4;
+ break;
+ }
+
+ pipe_config->splitter.pixel_overlap = REG_FIELD_GET(OVERLAP_PIXELS_MASK, dss1);
+}
+
+static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 dss1 = 0;
+
+ if (!HAS_MSO(i915))
+ return;
+
+ if (crtc_state->splitter.enable) {
+ dss1 |= SPLITTER_ENABLE;
+ dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap);
+ if (crtc_state->splitter.link_count == 2)
+ dss1 |= SPLITTER_CONFIGURATION_2_SEGMENT;
+ else
+ dss1 |= SPLITTER_CONFIGURATION_4_SEGMENT;
+ }
+
+ intel_de_rmw(i915, ICL_PIPE_DSS_CTL1(pipe),
+ SPLITTER_ENABLE | SPLITTER_CONFIGURATION_MASK |
+ OVERLAP_PIXELS_MASK, dss1);
+}
+
+static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+
+ intel_dp_set_link_params(intel_dp,
+ crtc_state->port_clock,
+ crtc_state->lane_count);
+
+ /*
+ * We only configure what the register value will be here. Actual
+ * enabling happens during link training farther down.
+ */
+ intel_ddi_init_dp_buf_reg(encoder, crtc_state);
+
+ /*
+ * 1. Enable Power Wells
+ *
+ * This was handled at the beginning of intel_atomic_commit_tail(),
+ * before we called down into this function.
+ */
+
+ /* 2. Enable Panel Power if PPS is required */
+ intel_pps_on(intel_dp);
+
+ /*
+ * 3. For non-TBT Type-C ports, set FIA lane count
+ * (DFLEXDPSP.DPX4TXLATC)
+ *
+ * This was done before tgl_ddi_pre_enable_dp by
+ * hsw_crtc_enable()->intel_encoders_pre_pll_enable().
+ */
+
+ /*
+ * 4. Enable the port PLL.
+ *
+ * The PLL enabling itself was already done before this function by
+ * hsw_crtc_enable()->intel_enable_shared_dpll(). We need only
+ * configure the PLL to port mapping here.
+ */
+ intel_ddi_enable_clock(encoder, crtc_state);
+
+ /* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+ }
+
+ /* 6. Program DP_MODE */
+ icl_program_mg_dp_mode(dig_port, crtc_state);
+
+ /*
+ * 7. The rest of the below are substeps under the bspec's "Enable and
+ * Train Display Port" step. Note that steps that are specific to
+ * MST will be handled by intel_mst_pre_enable_dp() before/after it
+ * calls into this function. Also intel_mst_pre_enable_dp() only calls
+ * us when active_mst_links==0, so any steps designated for "single
+ * stream or multi-stream master transcoder" can just be performed
+ * unconditionally here.
+ */
+
+ /*
+ * 7.a Configure Transcoder Clock Select to direct the Port clock to the
+ * Transcoder.
+ */
+ intel_ddi_enable_pipe_clock(encoder, crtc_state);
+
+ if (HAS_DP20(dev_priv))
+ intel_ddi_config_transcoder_dp2(encoder, crtc_state);
+
+ /*
+ * 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
+ * Transport Select
+ */
+ intel_ddi_config_transcoder_func(encoder, crtc_state);
+
+ /*
+ * 7.c Configure & enable DP_TP_CTL with link training pattern 1
+ * selected
+ *
+ * This will be handled by the intel_dp_start_link_train() farther
+ * down this function.
+ */
+
+ /* 7.e Configure voltage swing and related IO settings */
+ encoder->set_signal_levels(encoder, crtc_state);
+
+ /*
+ * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
+ * the used lanes of the DDI.
+ */
+ intel_ddi_power_up_lanes(encoder, crtc_state);
+
+ /*
+ * 7.g Program CoG/MSO configuration bits in DSS_CTL1 if selected.
+ */
+ intel_ddi_mso_configure(crtc_state);
+
+ if (!is_mst)
+ intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+
+ intel_dp_configure_protocol_converter(intel_dp, crtc_state);
+ intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
+ /*
+ * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
+ * in the FEC_CONFIGURATION register to 1 before initiating link
+ * training
+ */
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
+
+ /*
+ * 7.i Follow DisplayPort specification training sequence (see notes for
+ * failure handling)
+ * 7.j If DisplayPort multi-stream - Set DP_TP_CTL link training to Idle
+ * Pattern, wait for 5 idle patterns (DP_TP_STATUS Min_Idles_Sent)
+ * (timeout after 800 us)
+ */
+ intel_dp_start_link_train(intel_dp, crtc_state);
+
+ /* 7.k Set DP_TP_CTL link training to Normal */
+ if (!is_trans_port_sync_mode(crtc_state))
+ intel_dp_stop_link_train(intel_dp, crtc_state);
+
+ /* 7.l Configure and enable FEC if needed */
+ intel_ddi_enable_fec(encoder, crtc_state);
+
+ intel_dsc_dp_pps_write(encoder, crtc_state);
+}
+
+static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+
+ if (DISPLAY_VER(dev_priv) < 11)
+ drm_WARN_ON(&dev_priv->drm,
+ is_mst && (port == PORT_A || port == PORT_E));
+ else
+ drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A);
+
+ intel_dp_set_link_params(intel_dp,
+ crtc_state->port_clock,
+ crtc_state->lane_count);
+
+ /*
+ * We only configure what the register value will be here. Actual
+ * enabling happens during link training farther down.
+ */
+ intel_ddi_init_dp_buf_reg(encoder, crtc_state);
+
+ intel_pps_on(intel_dp);
+
+ intel_ddi_enable_clock(encoder, crtc_state);
+
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+ }
+
+ icl_program_mg_dp_mode(dig_port, crtc_state);
+
+ if (has_buf_trans_select(dev_priv))
+ hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
+
+ encoder->set_signal_levels(encoder, crtc_state);
+
+ intel_ddi_power_up_lanes(encoder, crtc_state);
+
+ if (!is_mst)
+ intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+ intel_dp_configure_protocol_converter(intel_dp, crtc_state);
+ intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
+ true);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ intel_dp_start_link_train(intel_dp, crtc_state);
+ if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) &&
+ !is_trans_port_sync_mode(crtc_state))
+ intel_dp_stop_link_train(intel_dp, crtc_state);
+
+ intel_ddi_enable_fec(encoder, crtc_state);
+
+ if (!is_mst)
+ intel_ddi_enable_pipe_clock(encoder, crtc_state);
+
+ intel_dsc_dp_pps_write(encoder, crtc_state);
+}
+
+static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
+ else
+ hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
+
+ /* MST will call a setting of MSA after an allocating of Virtual Channel
+ * from MST encoder pre_enable callback.
+ */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+ intel_ddi_set_dp_msa(crtc_state, conn_state);
+}
+
+static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+ intel_ddi_enable_clock(encoder, crtc_state);
+
+ drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
+ dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+
+ icl_program_mg_dp_mode(dig_port, crtc_state);
+
+ intel_ddi_enable_pipe_clock(encoder, crtc_state);
+
+ dig_port->set_infoframes(encoder,
+ crtc_state->has_infoframe,
+ crtc_state, conn_state);
+}
+
+static void intel_ddi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /*
+ * When called from DP MST code:
+ * - conn_state will be NULL
+ * - encoder will be the main encoder (ie. mst->primary)
+ * - the main connector associated with this port
+ * won't be active or linked to a crtc
+ * - crtc_state will be the state of the first stream to
+ * be activated on this port, and it may not be the same
+ * stream that will be deactivated last, but each stream
+ * should have a state that is identical when it comes to
+ * the DP link parameteres
+ */
+
+ drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
+
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ intel_ddi_pre_enable_hdmi(state, encoder, crtc_state,
+ conn_state);
+ } else {
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ intel_ddi_pre_enable_dp(state, encoder, crtc_state,
+ conn_state);
+
+ /* FIXME precompute everything properly */
+ /* FIXME how do we turn infoframes off again? */
+ if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink)
+ dig_port->set_infoframes(encoder,
+ crtc_state->has_infoframe,
+ crtc_state, conn_state);
+ }
+}
+
+static void intel_disable_ddi_buf(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ bool wait = false;
+ u32 val;
+
+ val = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), val);
+ wait = true;
+ }
+
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+ }
+
+ /* Disable FEC in DP Sink */
+ intel_ddi_disable_fec_state(encoder, crtc_state);
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+}
+
+static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &dig_port->dp;
+ bool is_mst = intel_crtc_has_type(old_crtc_state,
+ INTEL_OUTPUT_DP_MST);
+
+ if (!is_mst)
+ intel_dp_set_infoframes(encoder, false,
+ old_crtc_state, old_conn_state);
+
+ /*
+ * Power down sink before disabling the port, otherwise we end
+ * up getting interrupts from the sink on detecting link loss.
+ */
+ intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ if (is_mst) {
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+ u32 val;
+
+ val = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ val &= ~(TGL_TRANS_DDI_PORT_MASK |
+ TRANS_DDI_MODE_SELECT_MASK);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder),
+ val);
+ }
+ } else {
+ if (!is_mst)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+ }
+
+ intel_disable_ddi_buf(encoder, old_crtc_state);
+
+ /*
+ * From TGL spec: "If single stream or multi-stream master transcoder:
+ * Configure Transcoder Clock select to direct no clock to the
+ * transcoder"
+ */
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
+ intel_pps_vdd_on(intel_dp);
+ intel_pps_off(intel_dp);
+
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port))
+ intel_display_power_put(dev_priv,
+ dig_port->ddi_io_power_domain,
+ fetch_and_zero(&dig_port->ddi_io_wakeref));
+
+ intel_ddi_disable_clock(encoder);
+}
+
+static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
+
+ dig_port->set_infoframes(encoder, false,
+ old_crtc_state, old_conn_state);
+
+ if (DISPLAY_VER(dev_priv) < 12)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
+ intel_disable_ddi_buf(encoder, old_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
+ intel_display_power_put(dev_priv,
+ dig_port->ddi_io_power_domain,
+ fetch_and_zero(&dig_port->ddi_io_wakeref));
+
+ intel_ddi_disable_clock(encoder);
+
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
+}
+
+static void intel_ddi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
+ struct intel_crtc *slave_crtc;
+
+ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_disable_transcoder(old_crtc_state);
+
+ intel_vrr_disable(old_crtc_state);
+
+ intel_ddi_disable_transcoder_func(old_crtc_state);
+
+ intel_dsc_disable(old_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ skl_scaler_disable(old_crtc_state);
+ else
+ ilk_pfit_disable(old_crtc_state);
+ }
+
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) {
+ const struct intel_crtc_state *old_slave_crtc_state =
+ intel_atomic_get_old_crtc_state(state, slave_crtc);
+
+ intel_crtc_vblank_off(old_slave_crtc_state);
+
+ intel_dsc_disable(old_slave_crtc_state);
+ skl_scaler_disable(old_slave_crtc_state);
+ }
+
+ /*
+ * When called from DP MST code:
+ * - old_conn_state will be NULL
+ * - encoder will be the main encoder (ie. mst->primary)
+ * - the main connector associated with this port
+ * won't be active or linked to a crtc
+ * - old_crtc_state will be the state of the last stream to
+ * be deactivated on this port, and it may not be the same
+ * stream that was activated last, but each stream
+ * should have a state that is identical when it comes to
+ * the DP link parameteres
+ */
+
+ if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
+ intel_ddi_post_disable_hdmi(state, encoder, old_crtc_state,
+ old_conn_state);
+ else
+ intel_ddi_post_disable_dp(state, encoder, old_crtc_state,
+ old_conn_state);
+
+ if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port)
+ intel_display_power_put(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port),
+ fetch_and_zero(&dig_port->aux_wakeref));
+
+ if (is_tc_port)
+ intel_tc_port_put_link(dig_port);
+}
+
+static void trans_port_sync_stop_link_train(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_connector_state *conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ if (!crtc_state->sync_mode_slaves_mask)
+ return;
+
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+ struct intel_encoder *slave_encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ struct intel_crtc *slave_crtc = to_intel_crtc(conn_state->crtc);
+ const struct intel_crtc_state *slave_crtc_state;
+
+ if (!slave_crtc)
+ continue;
+
+ slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+
+ if (slave_crtc_state->master_transcoder !=
+ crtc_state->cpu_transcoder)
+ continue;
+
+ intel_dp_stop_link_train(enc_to_intel_dp(slave_encoder),
+ slave_crtc_state);
+ }
+
+ usleep_range(200, 400);
+
+ intel_dp_stop_link_train(enc_to_intel_dp(encoder),
+ crtc_state);
+}
+
+static void intel_enable_ddi_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum port port = encoder->port;
+
+ if (port == PORT_A && DISPLAY_VER(dev_priv) < 9)
+ intel_dp_stop_link_train(intel_dp, crtc_state);
+
+ drm_connector_update_privacy_screen(conn_state);
+ intel_edp_backlight_on(crtc_state, conn_state);
+
+ if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink)
+ intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
+
+ intel_audio_codec_enable(encoder, crtc_state, conn_state);
+
+ trans_port_sync_stop_link_train(state, encoder, crtc_state);
+}
+
+static i915_reg_t
+gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ static const enum transcoder trans[] = {
+ [PORT_A] = TRANSCODER_EDP,
+ [PORT_B] = TRANSCODER_A,
+ [PORT_C] = TRANSCODER_B,
+ [PORT_D] = TRANSCODER_C,
+ [PORT_E] = TRANSCODER_A,
+ };
+
+ drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) < 9);
+
+ if (drm_WARN_ON(&dev_priv->drm, port < PORT_A || port > PORT_E))
+ port = PORT_A;
+
+ return CHICKEN_TRANS(trans[port]);
+}
+
+static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_connector *connector = conn_state->connector;
+ enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+ u32 buf_ctl;
+
+ if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
+ crtc_state->hdmi_high_tmds_clock_ratio,
+ crtc_state->hdmi_scrambling))
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
+ connector->base.id, connector->name);
+
+ if (has_buf_trans_select(dev_priv))
+ hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state);
+
+ encoder->set_signal_levels(encoder, crtc_state);
+
+ /* Display WA #1143: skl,kbl,cfl */
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
+ /*
+ * For some reason these chicken bits have been
+ * stuffed into a transcoder register, event though
+ * the bits affect a specific DDI port rather than
+ * a specific transcoder.
+ */
+ i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
+ u32 val;
+
+ val = intel_de_read(dev_priv, reg);
+
+ if (port == PORT_E)
+ val |= DDIE_TRAINING_OVERRIDE_ENABLE |
+ DDIE_TRAINING_OVERRIDE_VALUE;
+ else
+ val |= DDI_TRAINING_OVERRIDE_ENABLE |
+ DDI_TRAINING_OVERRIDE_VALUE;
+
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+
+ udelay(1);
+
+ if (port == PORT_E)
+ val &= ~(DDIE_TRAINING_OVERRIDE_ENABLE |
+ DDIE_TRAINING_OVERRIDE_VALUE);
+ else
+ val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
+ DDI_TRAINING_OVERRIDE_VALUE);
+
+ intel_de_write(dev_priv, reg, val);
+ }
+
+ intel_ddi_power_up_lanes(encoder, crtc_state);
+
+ /* In HDMI/DVI mode, the port width, and swing/emphasis values
+ * are ignored so nothing special needs to be done besides
+ * enabling the port.
+ *
+ * On ADL_P the PHY link rate and lane count must be programmed but
+ * these are both 0 for HDMI.
+ */
+ buf_ctl = dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE;
+ if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) {
+ drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
+ buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+ }
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
+
+ intel_audio_codec_enable(encoder, crtc_state, conn_state);
+}
+
+static void intel_enable_ddi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
+
+ if (!intel_crtc_is_bigjoiner_slave(crtc_state))
+ intel_ddi_enable_transcoder_func(encoder, crtc_state);
+
+ intel_vrr_enable(encoder, crtc_state);
+
+ intel_enable_transcoder(crtc_state);
+
+ intel_crtc_vblank_on(crtc_state);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state);
+ else
+ intel_enable_ddi_dp(state, encoder, crtc_state, conn_state);
+
+ /* Enable hdcp if it's desired */
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ intel_hdcp_enable(to_intel_connector(conn_state->connector),
+ crtc_state,
+ (u8)conn_state->hdcp_content_type);
+}
+
+static void intel_disable_ddi_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_dp->link_trained = false;
+
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+
+ intel_psr_disable(intel_dp, old_crtc_state);
+ intel_edp_backlight_off(old_conn_state);
+ /* Disable the decompression in DP Sink */
+ intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
+ false);
+ /* Disable Ignore_MSA bit in DP Sink */
+ intel_dp_sink_set_msa_timing_par_ignore_state(intel_dp, old_crtc_state,
+ false);
+}
+
+static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct drm_connector *connector = old_conn_state->connector;
+
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+
+ if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
+ false, false))
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
+ connector->base.id, connector->name);
+}
+
+static void intel_disable_ddi(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_hdcp_disable(to_intel_connector(old_conn_state->connector));
+
+ if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
+ intel_disable_ddi_hdmi(state, encoder, old_crtc_state,
+ old_conn_state);
+ else
+ intel_disable_ddi_dp(state, encoder, old_crtc_state,
+ old_conn_state);
+}
+
+static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ intel_ddi_set_dp_msa(crtc_state, conn_state);
+
+ intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
+
+ intel_backlight_update(state, encoder, crtc_state, conn_state);
+ drm_connector_update_privacy_screen(conn_state);
+}
+
+void intel_ddi_update_pipe(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
+ !intel_encoder_is_mst(encoder))
+ intel_ddi_update_pipe_dp(state, encoder, crtc_state,
+ conn_state);
+
+ intel_hdcp_update_pipe(state, encoder, crtc_state, conn_state);
+}
+
+static void
+intel_ddi_update_prepare(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL;
+ int required_lanes = crtc_state ? crtc_state->lane_count : 1;
+
+ drm_WARN_ON(state->base.dev, crtc && crtc->active);
+
+ intel_tc_port_get_link(enc_to_dig_port(encoder),
+ required_lanes);
+ if (crtc_state && crtc_state->hw.active) {
+ struct intel_crtc *slave_crtc;
+
+ intel_update_active_dpll(state, crtc, encoder);
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(crtc_state))
+ intel_update_active_dpll(state, slave_crtc, encoder);
+ }
+}
+
+static void
+intel_ddi_update_complete(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ struct intel_crtc *crtc)
+{
+ intel_tc_port_put_link(enc_to_dig_port(encoder));
+}
+
+static void
+intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
+
+ if (is_tc_port)
+ intel_tc_port_get_link(dig_port, crtc_state->lane_count);
+
+ if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) {
+ drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref);
+ dig_port->aux_wakeref =
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+ }
+
+ if (is_tc_port && !intel_tc_port_in_tbt_alt_mode(dig_port))
+ /*
+ * Program the lane count for static/dynamic connections on
+ * Type-C ports. Skip this step for TBT.
+ */
+ intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ bxt_ddi_phy_set_lane_optim_mask(encoder,
+ crtc_state->lane_lat_optim_mask);
+}
+
+static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
+ int ln;
+
+ for (ln = 0; ln < 2; ln++)
+ intel_dkl_phy_rmw(i915, DKL_PCS_DW5(tc_port), ln, DKL_PCS_DW5_CORE_SOFTRESET, 0);
+}
+
+static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 dp_tp_ctl, ddi_buf_ctl;
+ bool wait = false;
+
+ dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+
+ if (dp_tp_ctl & DP_TP_CTL_ENABLE) {
+ ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ if (ddi_buf_ctl & DDI_BUF_CTL_ENABLE) {
+ intel_de_write(dev_priv, DDI_BUF_CTL(port),
+ ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE);
+ wait = true;
+ }
+
+ dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
+ intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+
+ if (wait)
+ intel_wait_ddi_buf_idle(dev_priv, port);
+ }
+
+ dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ dp_tp_ctl |= DP_TP_CTL_MODE_MST;
+ } else {
+ dp_tp_ctl |= DP_TP_CTL_MODE_SST;
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ }
+ intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
+ intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+
+ if (IS_ALDERLAKE_P(dev_priv) &&
+ (intel_tc_port_in_dp_alt_mode(dig_port) || intel_tc_port_in_legacy_mode(dig_port)))
+ adlp_tbt_to_dp_alt_switch_wa(encoder);
+
+ intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+
+ intel_wait_ddi_buf_active(dev_priv, port);
+}
+
+static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ u8 dp_train_pat)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+
+ temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+ break;
+ case DP_TRAINING_PATTERN_4:
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
+ break;
+ }
+
+ intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), temp);
+}
+
+static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
+
+ val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+ val |= DP_TP_CTL_LINK_TRAIN_IDLE;
+ intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val);
+
+ /*
+ * Until TGL on PORT_A we can have only eDP in SST mode. There the only
+ * reason we need to set idle transmission mode is to work around a HW
+ * issue where we enable the pipe while not in idle link-training mode.
+ * In this case there is requirement to wait for a minimum number of
+ * idle patterns to be sent.
+ */
+ if (port == PORT_A && DISPLAY_VER(dev_priv) < 12)
+ return;
+
+ if (intel_de_wait_for_set(dev_priv,
+ dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_IDLE_DONE, 1))
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DP idle patterns\n");
+}
+
+static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return false;
+
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO_MMIO))
+ return false;
+
+ return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) &
+ AUDIO_OUTPUT_ENABLE(cpu_transcoder);
+}
+
+void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *crtc_state)
+{
+ if (DISPLAY_VER(dev_priv) >= 12 && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 2;
+ else if (IS_JSL_EHL(dev_priv) && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 3;
+ else if (DISPLAY_VER(dev_priv) >= 11 && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 1;
+}
+
+static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ u32 master_select;
+
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ u32 ctl2 = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL2(cpu_transcoder));
+
+ if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0)
+ return INVALID_TRANSCODER;
+
+ master_select = REG_FIELD_GET(PORT_SYNC_MODE_MASTER_SELECT_MASK, ctl2);
+ } else {
+ u32 ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ if ((ctl & TRANS_DDI_PORT_SYNC_ENABLE) == 0)
+ return INVALID_TRANSCODER;
+
+ master_select = REG_FIELD_GET(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, ctl);
+ }
+
+ if (master_select == 0)
+ return TRANSCODER_EDP;
+ else
+ return master_select - 1;
+}
+
+static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
+ enum transcoder cpu_transcoder;
+
+ crtc_state->master_transcoder =
+ bdw_transcoder_master_readout(dev_priv, crtc_state->cpu_transcoder);
+
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t trans_wakeref;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ power_domain);
+
+ if (!trans_wakeref)
+ continue;
+
+ if (bdw_transcoder_master_readout(dev_priv, cpu_transcoder) ==
+ crtc_state->cpu_transcoder)
+ crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
+
+ intel_display_power_put(dev_priv, power_domain, trans_wakeref);
+ }
+
+ drm_WARN_ON(&dev_priv->drm,
+ crtc_state->master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->sync_mode_slaves_mask);
+}
+
+static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ u32 temp, flags = 0;
+
+ temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (temp & TRANS_DDI_PHSYNC)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (temp & TRANS_DDI_PVSYNC)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ pipe_config->hw.adjusted_mode.flags |= flags;
+
+ switch (temp & TRANS_DDI_BPC_MASK) {
+ case TRANS_DDI_BPC_6:
+ pipe_config->pipe_bpp = 18;
+ break;
+ case TRANS_DDI_BPC_8:
+ pipe_config->pipe_bpp = 24;
+ break;
+ case TRANS_DDI_BPC_10:
+ pipe_config->pipe_bpp = 30;
+ break;
+ case TRANS_DDI_BPC_12:
+ pipe_config->pipe_bpp = 36;
+ break;
+ default:
+ break;
+ }
+
+ switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
+ case TRANS_DDI_MODE_SELECT_HDMI:
+ pipe_config->has_hdmi_sink = true;
+
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
+
+ if (pipe_config->infoframes.enable)
+ pipe_config->has_infoframe = true;
+
+ if (temp & TRANS_DDI_HDMI_SCRAMBLING)
+ pipe_config->hdmi_scrambling = true;
+ if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
+ pipe_config->hdmi_high_tmds_clock_ratio = true;
+ fallthrough;
+ case TRANS_DDI_MODE_SELECT_DVI:
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
+ pipe_config->lane_count = 4;
+ break;
+ case TRANS_DDI_MODE_SELECT_DP_SST:
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
+ else
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
+ pipe_config->lane_count =
+ ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+
+ intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder,
+ &pipe_config->dp_m_n);
+ intel_cpu_transcoder_get_m2_n2(crtc, cpu_transcoder,
+ &pipe_config->dp_m2_n2);
+
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ i915_reg_t dp_tp_ctl = dp_tp_ctl_reg(encoder, pipe_config);
+
+ pipe_config->fec_enable =
+ intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] Fec status: %u\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_config->fec_enable);
+ }
+
+ if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink)
+ pipe_config->infoframes.enable |=
+ intel_lspcon_infoframes_enabled(encoder, pipe_config);
+ else
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
+ break;
+ case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B:
+ if (!HAS_DP20(dev_priv)) {
+ /* FDI */
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
+ break;
+ }
+ fallthrough; /* 128b/132b */
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
+ pipe_config->lane_count =
+ ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ pipe_config->mst_master_transcoder =
+ REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
+
+ intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder,
+ &pipe_config->dp_m_n);
+
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
+ break;
+ default:
+ break;
+ }
+}
+
+static void intel_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+
+ /* XXX: DSI transcoder paranoia */
+ if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
+ return;
+
+ intel_ddi_read_func_ctl(encoder, pipe_config);
+
+ intel_ddi_mso_get_config(encoder, pipe_config);
+
+ pipe_config->has_audio =
+ intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
+
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
+
+ ddi_dotclock_get(pipe_config);
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ pipe_config->lane_lat_optim_mask =
+ bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
+
+ intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+
+ intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
+
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &pipe_config->infoframes.avi);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &pipe_config->infoframes.spd);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &pipe_config->infoframes.hdmi);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_DRM,
+ &pipe_config->infoframes.drm);
+
+ if (DISPLAY_VER(dev_priv) >= 8)
+ bdw_get_trans_port_sync_config(pipe_config);
+
+ intel_read_dp_sdp(encoder, pipe_config, HDMI_PACKET_TYPE_GAMUT_METADATA);
+ intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
+
+ intel_psr_get_config(encoder, pipe_config);
+}
+
+void intel_ddi_get_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct intel_shared_dpll *pll)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+ struct icl_port_dpll *port_dpll = &crtc_state->icl_port_dplls[port_dpll_id];
+ bool pll_active;
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ port_dpll->pll = pll;
+ pll_active = intel_dpll_get_hw_state(i915, pll, &port_dpll->hw_state);
+ drm_WARN_ON(&i915->drm, !pll_active);
+
+ icl_set_active_port_dpll(crtc_state, port_dpll_id);
+
+ crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll,
+ &crtc_state->dpll_hw_state);
+}
+
+static void dg2_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_mpllb_readout_hw_state(encoder, &crtc_state->mpllb_state);
+ crtc_state->port_clock = intel_mpllb_calc_port_clock(encoder, &crtc_state->mpllb_state);
+
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void adls_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, adls_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void rkl_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, rkl_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void dg1_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, dg1_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void icl_ddi_combo_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, icl_ddi_combo_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void icl_ddi_tc_get_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct intel_shared_dpll *pll)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum icl_port_dpll_id port_dpll_id;
+ struct icl_port_dpll *port_dpll;
+ bool pll_active;
+
+ if (drm_WARN_ON(&i915->drm, !pll))
+ return;
+
+ if (intel_get_shared_dpll_id(i915, pll) == DPLL_ID_ICL_TBTPLL)
+ port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+ else
+ port_dpll_id = ICL_PORT_DPLL_MG_PHY;
+
+ port_dpll = &crtc_state->icl_port_dplls[port_dpll_id];
+
+ port_dpll->pll = pll;
+ pll_active = intel_dpll_get_hw_state(i915, pll, &port_dpll->hw_state);
+ drm_WARN_ON(&i915->drm, !pll_active);
+
+ icl_set_active_port_dpll(crtc_state, port_dpll_id);
+
+ if (intel_get_shared_dpll_id(i915, crtc_state->shared_dpll) == DPLL_ID_ICL_TBTPLL)
+ crtc_state->port_clock = icl_calc_tbt_pll_link(i915, encoder->port);
+ else
+ crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll,
+ &crtc_state->dpll_hw_state);
+}
+
+static void icl_ddi_tc_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ icl_ddi_tc_get_clock(encoder, crtc_state, icl_ddi_tc_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void bxt_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, bxt_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void skl_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, skl_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+void hsw_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_ddi_get_clock(encoder, crtc_state, hsw_ddi_get_pll(encoder));
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
+static void intel_ddi_sync_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_phy_is_tc(i915, phy))
+ intel_tc_port_sanitize_mode(enc_to_dig_port(encoder));
+
+ if (crtc_state && intel_crtc_has_dp_encoder(crtc_state))
+ intel_dp_sync_state(encoder, crtc_state);
+}
+
+static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ bool fastset = true;
+
+ if (intel_phy_is_tc(i915, phy)) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
+ encoder->base.base.id, encoder->base.name);
+ crtc_state->uapi.mode_changed = true;
+ fastset = false;
+ }
+
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ !intel_dp_initial_fastset_check(encoder, crtc_state))
+ fastset = false;
+
+ return fastset;
+}
+
+static enum intel_output_type
+intel_ddi_compute_output_type(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ switch (conn_state->connector->connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ return INTEL_OUTPUT_HDMI;
+ case DRM_MODE_CONNECTOR_eDP:
+ return INTEL_OUTPUT_EDP;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ return INTEL_OUTPUT_DP;
+ default:
+ MISSING_CASE(conn_state->connector->connector_type);
+ return INTEL_OUTPUT_UNUSED;
+ }
+}
+
+static int intel_ddi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ int ret;
+
+ if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
+ pipe_config->cpu_transcoder = TRANSCODER_EDP;
+
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
+ ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
+ } else {
+ ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
+ }
+
+ if (ret)
+ return ret;
+
+ if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
+ pipe_config->cpu_transcoder == TRANSCODER_EDP)
+ pipe_config->pch_pfit.force_thru =
+ pipe_config->pch_pfit.enabled ||
+ pipe_config->crc_enabled;
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ pipe_config->lane_lat_optim_mask =
+ bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+
+ intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+
+ return 0;
+}
+
+static bool mode_equal(const struct drm_display_mode *mode1,
+ const struct drm_display_mode *mode2)
+{
+ return drm_mode_match(mode1, mode2,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS) &&
+ mode1->clock == mode2->clock; /* we want an exact match */
+}
+
+static bool m_n_equal(const struct intel_link_m_n *m_n_1,
+ const struct intel_link_m_n *m_n_2)
+{
+ return m_n_1->tu == m_n_2->tu &&
+ m_n_1->data_m == m_n_2->data_m &&
+ m_n_1->data_n == m_n_2->data_n &&
+ m_n_1->link_m == m_n_2->link_m &&
+ m_n_1->link_n == m_n_2->link_n;
+}
+
+static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
+ const struct intel_crtc_state *crtc_state2)
+{
+ return crtc_state1->hw.active && crtc_state2->hw.active &&
+ crtc_state1->output_types == crtc_state2->output_types &&
+ crtc_state1->output_format == crtc_state2->output_format &&
+ crtc_state1->lane_count == crtc_state2->lane_count &&
+ crtc_state1->port_clock == crtc_state2->port_clock &&
+ mode_equal(&crtc_state1->hw.adjusted_mode,
+ &crtc_state2->hw.adjusted_mode) &&
+ m_n_equal(&crtc_state1->dp_m_n, &crtc_state2->dp_m_n);
+}
+
+static u8
+intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
+ int tile_group_id)
+{
+ struct drm_connector *connector;
+ const struct drm_connector_state *conn_state;
+ struct drm_i915_private *dev_priv = to_i915(ref_crtc_state->uapi.crtc->dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(ref_crtc_state->uapi.state);
+ u8 transcoders = 0;
+ int i;
+
+ /*
+ * We don't enable port sync on BDW due to missing w/as and
+ * due to not having adjusted the modeset sequence appropriately.
+ */
+ if (DISPLAY_VER(dev_priv) < 9)
+ return 0;
+
+ if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
+ return 0;
+
+ for_each_new_connector_in_state(&state->base, connector, conn_state, i) {
+ struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
+ const struct intel_crtc_state *crtc_state;
+
+ if (!crtc)
+ continue;
+
+ if (!connector->has_tile ||
+ connector->tile_group->id !=
+ tile_group_id)
+ continue;
+ crtc_state = intel_atomic_get_new_crtc_state(state,
+ crtc);
+ if (!crtcs_port_sync_compatible(ref_crtc_state,
+ crtc_state))
+ continue;
+ transcoders |= BIT(crtc_state->cpu_transcoder);
+ }
+
+ return transcoders;
+}
+
+static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct drm_connector *connector = conn_state->connector;
+ u8 port_sync_transcoders = 0;
+
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]",
+ encoder->base.base.id, encoder->base.name,
+ crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
+
+ if (connector->has_tile)
+ port_sync_transcoders = intel_ddi_port_sync_transcoders(crtc_state,
+ connector->tile_group->id);
+
+ /*
+ * EDP Transcoders cannot be ensalved
+ * make them a master always when present
+ */
+ if (port_sync_transcoders & BIT(TRANSCODER_EDP))
+ crtc_state->master_transcoder = TRANSCODER_EDP;
+ else
+ crtc_state->master_transcoder = ffs(port_sync_transcoders) - 1;
+
+ if (crtc_state->master_transcoder == crtc_state->cpu_transcoder) {
+ crtc_state->master_transcoder = INVALID_TRANSCODER;
+ crtc_state->sync_mode_slaves_mask =
+ port_sync_transcoders & ~BIT(crtc_state->cpu_transcoder);
+ }
+
+ return 0;
+}
+
+static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+
+ intel_dp_encoder_flush_work(encoder);
+ if (intel_phy_is_tc(i915, phy))
+ intel_tc_port_flush_work(dig_port);
+ intel_display_power_flush_work(i915);
+
+ drm_encoder_cleanup(encoder);
+ kfree(dig_port->hdcp_port_data.streams);
+ kfree(dig_port);
+}
+
+static void intel_ddi_encoder_reset(struct drm_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
+ struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+
+ intel_dp->reset_link_params = true;
+
+ intel_pps_encoder_reset(intel_dp);
+
+ if (intel_phy_is_tc(i915, phy))
+ intel_tc_port_init_mode(dig_port);
+}
+
+static const struct drm_encoder_funcs intel_ddi_funcs = {
+ .reset = intel_ddi_encoder_reset,
+ .destroy = intel_ddi_encoder_destroy,
+};
+
+static struct intel_connector *
+intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
+{
+ struct intel_connector *connector;
+ enum port port = dig_port->base.port;
+
+ connector = intel_connector_alloc();
+ if (!connector)
+ return NULL;
+
+ dig_port->dp.output_reg = DDI_BUF_CTL(port);
+ dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain;
+ dig_port->dp.set_link_train = intel_ddi_set_link_train;
+ dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
+
+ dig_port->dp.voltage_max = intel_ddi_dp_voltage_max;
+ dig_port->dp.preemph_max = intel_ddi_dp_preemph_max;
+
+ if (!intel_dp_init_connector(dig_port, connector)) {
+ kfree(connector);
+ return NULL;
+ }
+
+ if (dig_port->base.type == INTEL_OUTPUT_EDP) {
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_privacy_screen *privacy_screen;
+
+ privacy_screen = drm_privacy_screen_get(dev->dev, NULL);
+ if (!IS_ERR(privacy_screen)) {
+ drm_connector_attach_privacy_screen_provider(&connector->base,
+ privacy_screen);
+ } else if (PTR_ERR(privacy_screen) != -ENODEV) {
+ drm_warn(dev, "Error getting privacy-screen\n");
+ }
+ }
+
+ return connector;
+}
+
+static int modeset_pipe(struct drm_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ crtc_state->connectors_changed = true;
+
+ ret = drm_atomic_commit(state);
+out:
+ drm_atomic_state_put(state);
+
+ return ret;
+}
+
+static int intel_hdmi_reset_link(struct intel_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_connector *connector = hdmi->attached_connector;
+ struct i2c_adapter *adapter =
+ intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+ struct drm_connector_state *conn_state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ u8 config;
+ int ret;
+
+ if (!connector || connector->base.status != connector_status_connected)
+ return 0;
+
+ ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ctx);
+ if (ret)
+ return ret;
+
+ conn_state = connector->base.state;
+
+ crtc = to_intel_crtc(conn_state->crtc);
+ if (!crtc)
+ return 0;
+
+ ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ drm_WARN_ON(&dev_priv->drm,
+ !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
+
+ if (!crtc_state->hw.active)
+ return 0;
+
+ if (!crtc_state->hdmi_high_tmds_clock_ratio &&
+ !crtc_state->hdmi_scrambling)
+ return 0;
+
+ if (conn_state->commit &&
+ !try_wait_for_completion(&conn_state->commit->hw_done))
+ return 0;
+
+ ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
+ if (ret < 0) {
+ drm_err(&dev_priv->drm, "Failed to read TMDS config: %d\n",
+ ret);
+ return 0;
+ }
+
+ if (!!(config & SCDC_TMDS_BIT_CLOCK_RATIO_BY_40) ==
+ crtc_state->hdmi_high_tmds_clock_ratio &&
+ !!(config & SCDC_SCRAMBLING_ENABLE) ==
+ crtc_state->hdmi_scrambling)
+ return 0;
+
+ /*
+ * HDMI 2.0 says that one should not send scrambled data
+ * prior to configuring the sink scrambling, and that
+ * TMDS clock/data transmission should be suspended when
+ * changing the TMDS clock rate in the sink. So let's
+ * just do a full modeset here, even though some sinks
+ * would be perfectly happy if were to just reconfigure
+ * the SCDC settings on the fly.
+ */
+ return modeset_pipe(&crtc->base, ctx);
+}
+
+static enum intel_hotplug_state
+intel_ddi_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp = &dig_port->dp;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ bool is_tc = intel_phy_is_tc(i915, phy);
+ struct drm_modeset_acquire_ctx ctx;
+ enum intel_hotplug_state state;
+ int ret;
+
+ if (intel_dp->compliance.test_active &&
+ intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
+ intel_dp_phy_test(encoder);
+ /* just do the PHY test and nothing else */
+ return INTEL_HOTPLUG_UNCHANGED;
+ }
+
+ state = intel_encoder_hotplug(encoder, connector);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ for (;;) {
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA)
+ ret = intel_hdmi_reset_link(encoder, &ctx);
+ else
+ ret = intel_dp_retrain_link(encoder, &ctx);
+
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ continue;
+ }
+
+ break;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_WARN(encoder->base.dev, ret,
+ "Acquiring modeset locks failed with %i\n", ret);
+
+ /*
+ * Unpowered type-c dongles can take some time to boot and be
+ * responsible, so here giving some time to those dongles to power up
+ * and then retrying the probe.
+ *
+ * On many platforms the HDMI live state signal is known to be
+ * unreliable, so we can't use it to detect if a sink is connected or
+ * not. Instead we detect if it's connected based on whether we can
+ * read the EDID or not. That in turn has a problem during disconnect,
+ * since the HPD interrupt may be raised before the DDC lines get
+ * disconnected (due to how the required length of DDC vs. HPD
+ * connector pins are specified) and so we'll still be able to get a
+ * valid EDID. To solve this schedule another detection cycle if this
+ * time around we didn't detect any change in the sink's connection
+ * status.
+ *
+ * Type-c connectors which get their HPD signal deasserted then
+ * reasserted, without unplugging/replugging the sink from the
+ * connector, introduce a delay until the AUX channel communication
+ * becomes functional. Retry the detection for 5 seconds on type-c
+ * connectors to account for this delay.
+ */
+ if (state == INTEL_HOTPLUG_UNCHANGED &&
+ connector->hotplug_retries < (is_tc ? 5 : 1) &&
+ !dig_port->dp.is_mst)
+ state = INTEL_HOTPLUG_RETRY;
+
+ return state;
+}
+
+static bool lpt_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
+
+ return intel_de_read(dev_priv, SDEISR) & bit;
+}
+
+static bool hsw_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+
+ return intel_de_read(dev_priv, DEISR) & bit;
+}
+
+static bool bdw_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+
+ return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
+}
+
+static struct intel_connector *
+intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
+{
+ struct intel_connector *connector;
+ enum port port = dig_port->base.port;
+
+ connector = intel_connector_alloc();
+ if (!connector)
+ return NULL;
+
+ dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+ intel_hdmi_init_connector(dig_port, connector);
+
+ return connector;
+}
+
+static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+
+ if (dig_port->base.port != PORT_A)
+ return false;
+
+ if (dig_port->saved_port_bits & DDI_A_4_LANES)
+ return false;
+
+ /* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
+ * supported configuration
+ */
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ return true;
+
+ return false;
+}
+
+static int
+intel_ddi_max_lanes(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
+ int max_lanes = 4;
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ return max_lanes;
+
+ if (port == PORT_A || port == PORT_E) {
+ if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ max_lanes = port == PORT_A ? 4 : 0;
+ else
+ /* Both A and E share 2 lanes */
+ max_lanes = 2;
+ }
+
+ /*
+ * Some BIOS might fail to set this bit on port A if eDP
+ * wasn't lit up at boot. Force this bit set when needed
+ * so we use the proper lane count for our calculations.
+ */
+ if (intel_ddi_a_force_4_lanes(dig_port)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Forcing DDI_A_4_LANES for port A\n");
+ dig_port->saved_port_bits |= DDI_A_4_LANES;
+ max_lanes = 4;
+ }
+
+ return max_lanes;
+}
+
+static bool hti_uses_phy(struct drm_i915_private *i915, enum phy phy)
+{
+ return i915->hti_state & HDPORT_ENABLED &&
+ i915->hti_state & HDPORT_DDI_USED(phy);
+}
+
+static enum hpd_pin xelpd_hpd_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (port >= PORT_D_XELPD)
+ return HPD_PORT_D + port - PORT_D_XELPD;
+ else if (port >= PORT_TC1)
+ return HPD_PORT_TC1 + port - PORT_TC1;
+ else
+ return HPD_PORT_A + port - PORT_A;
+}
+
+static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (port >= PORT_TC1)
+ return HPD_PORT_C + port - PORT_TC1;
+ else
+ return HPD_PORT_A + port - PORT_A;
+}
+
+static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (port >= PORT_TC1)
+ return HPD_PORT_TC1 + port - PORT_TC1;
+ else
+ return HPD_PORT_A + port - PORT_A;
+}
+
+static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (HAS_PCH_TGP(dev_priv))
+ return tgl_hpd_pin(dev_priv, port);
+
+ if (port >= PORT_TC1)
+ return HPD_PORT_C + port - PORT_TC1;
+ else
+ return HPD_PORT_A + port - PORT_A;
+}
+
+static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (port >= PORT_C)
+ return HPD_PORT_TC1 + port - PORT_C;
+ else
+ return HPD_PORT_A + port - PORT_A;
+}
+
+static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (port == PORT_D)
+ return HPD_PORT_A;
+
+ if (HAS_PCH_TGP(dev_priv))
+ return icl_hpd_pin(dev_priv, port);
+
+ return HPD_PORT_A + port - PORT_A;
+}
+
+static enum hpd_pin skl_hpd_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ if (HAS_PCH_TGP(dev_priv))
+ return icl_hpd_pin(dev_priv, port);
+
+ return HPD_PORT_A + port - PORT_A;
+}
+
+static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port)
+{
+ if (DISPLAY_VER(i915) >= 12)
+ return port >= PORT_TC1;
+ else if (DISPLAY_VER(i915) >= 11)
+ return port >= PORT_C;
+ else
+ return false;
+}
+
+static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ intel_dp_encoder_suspend(encoder);
+
+ if (!intel_phy_is_tc(i915, phy))
+ return;
+
+ intel_tc_port_flush_work(dig_port);
+}
+
+static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ intel_dp_encoder_shutdown(encoder);
+ intel_hdmi_encoder_shutdown(encoder);
+
+ if (!intel_phy_is_tc(i915, phy))
+ return;
+
+ intel_tc_port_flush_work(dig_port);
+}
+
+#define port_tc_name(port) ((port) - PORT_TC1 + '1')
+#define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1')
+
+void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
+{
+ struct intel_digital_port *dig_port;
+ struct intel_encoder *encoder;
+ const struct intel_bios_encoder_data *devdata;
+ bool init_hdmi, init_dp;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+
+ /*
+ * On platforms with HTI (aka HDPORT), if it's enabled at boot it may
+ * have taken over some of the PHYs and made them unavailable to the
+ * driver. In that case we should skip initializing the corresponding
+ * outputs.
+ */
+ if (hti_uses_phy(dev_priv, phy)) {
+ drm_dbg_kms(&dev_priv->drm, "PORT %c / PHY %c reserved by HTI\n",
+ port_name(port), phy_name(phy));
+ return;
+ }
+
+ devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+ if (!devdata) {
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT says port %c is not present\n",
+ port_name(port));
+ return;
+ }
+
+ init_hdmi = intel_bios_encoder_supports_dvi(devdata) ||
+ intel_bios_encoder_supports_hdmi(devdata);
+ init_dp = intel_bios_encoder_supports_dp(devdata);
+
+ if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ /*
+ * Lspcon device needs to be driven with DP connector
+ * with special detection sequence. So make sure DP
+ * is initialized before lspcon.
+ */
+ init_dp = true;
+ init_hdmi = false;
+ drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n",
+ port_name(port));
+ }
+
+ if (!init_dp && !init_hdmi) {
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
+ port_name(port));
+ return;
+ }
+
+ if (intel_phy_is_snps(dev_priv, phy) &&
+ dev_priv->snps_phy_failed_calibration & BIT(phy)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "SNPS PHY %c failed to calibrate, proceeding anyway\n",
+ phy_name(phy));
+ }
+
+ dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ if (!dig_port)
+ return;
+
+ encoder = &dig_port->base;
+ encoder->devdata = devdata;
+
+ if (DISPLAY_VER(dev_priv) >= 13 && port >= PORT_D_XELPD) {
+ drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ "DDI %c/PHY %c",
+ port_name(port - PORT_D_XELPD + PORT_D),
+ phy_name(phy));
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+
+ drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ "DDI %s%c/PHY %s%c",
+ port >= PORT_TC1 ? "TC" : "",
+ port >= PORT_TC1 ? port_tc_name(port) : port_name(port),
+ tc_port != TC_PORT_NONE ? "TC" : "",
+ tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+
+ drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ "DDI %c%s/PHY %s%c",
+ port_name(port),
+ port >= PORT_C ? " (TC)" : "",
+ tc_port != TC_PORT_NONE ? "TC" : "",
+ tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
+ } else {
+ drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ "DDI %c/PHY %c", port_name(port), phy_name(phy));
+ }
+
+ mutex_init(&dig_port->hdcp_mutex);
+ dig_port->num_hdcp_streams = 0;
+
+ encoder->hotplug = intel_ddi_hotplug;
+ encoder->compute_output_type = intel_ddi_compute_output_type;
+ encoder->compute_config = intel_ddi_compute_config;
+ encoder->compute_config_late = intel_ddi_compute_config_late;
+ encoder->enable = intel_enable_ddi;
+ encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
+ encoder->pre_enable = intel_ddi_pre_enable;
+ encoder->disable = intel_disable_ddi;
+ encoder->post_disable = intel_ddi_post_disable;
+ encoder->update_pipe = intel_ddi_update_pipe;
+ encoder->get_hw_state = intel_ddi_get_hw_state;
+ encoder->sync_state = intel_ddi_sync_state;
+ encoder->initial_fastset_check = intel_ddi_initial_fastset_check;
+ encoder->suspend = intel_ddi_encoder_suspend;
+ encoder->shutdown = intel_ddi_encoder_shutdown;
+ encoder->get_power_domains = intel_ddi_get_power_domains;
+
+ encoder->type = INTEL_OUTPUT_DDI;
+ encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
+ encoder->port = port;
+ encoder->cloneable = 0;
+ encoder->pipe_mask = ~0;
+
+ if (IS_DG2(dev_priv)) {
+ encoder->enable_clock = intel_mpllb_enable;
+ encoder->disable_clock = intel_mpllb_disable;
+ encoder->get_config = dg2_ddi_get_config;
+ } else if (IS_ALDERLAKE_S(dev_priv)) {
+ encoder->enable_clock = adls_ddi_enable_clock;
+ encoder->disable_clock = adls_ddi_disable_clock;
+ encoder->is_clock_enabled = adls_ddi_is_clock_enabled;
+ encoder->get_config = adls_ddi_get_config;
+ } else if (IS_ROCKETLAKE(dev_priv)) {
+ encoder->enable_clock = rkl_ddi_enable_clock;
+ encoder->disable_clock = rkl_ddi_disable_clock;
+ encoder->is_clock_enabled = rkl_ddi_is_clock_enabled;
+ encoder->get_config = rkl_ddi_get_config;
+ } else if (IS_DG1(dev_priv)) {
+ encoder->enable_clock = dg1_ddi_enable_clock;
+ encoder->disable_clock = dg1_ddi_disable_clock;
+ encoder->is_clock_enabled = dg1_ddi_is_clock_enabled;
+ encoder->get_config = dg1_ddi_get_config;
+ } else if (IS_JSL_EHL(dev_priv)) {
+ if (intel_ddi_is_tc(dev_priv, port)) {
+ encoder->enable_clock = jsl_ddi_tc_enable_clock;
+ encoder->disable_clock = jsl_ddi_tc_disable_clock;
+ encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled;
+ encoder->get_config = icl_ddi_combo_get_config;
+ } else {
+ encoder->enable_clock = icl_ddi_combo_enable_clock;
+ encoder->disable_clock = icl_ddi_combo_disable_clock;
+ encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
+ encoder->get_config = icl_ddi_combo_get_config;
+ }
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
+ if (intel_ddi_is_tc(dev_priv, port)) {
+ encoder->enable_clock = icl_ddi_tc_enable_clock;
+ encoder->disable_clock = icl_ddi_tc_disable_clock;
+ encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled;
+ encoder->get_config = icl_ddi_tc_get_config;
+ } else {
+ encoder->enable_clock = icl_ddi_combo_enable_clock;
+ encoder->disable_clock = icl_ddi_combo_disable_clock;
+ encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
+ encoder->get_config = icl_ddi_combo_get_config;
+ }
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ /* BXT/GLK have fixed PLL->port mapping */
+ encoder->get_config = bxt_ddi_get_config;
+ } else if (DISPLAY_VER(dev_priv) == 9) {
+ encoder->enable_clock = skl_ddi_enable_clock;
+ encoder->disable_clock = skl_ddi_disable_clock;
+ encoder->is_clock_enabled = skl_ddi_is_clock_enabled;
+ encoder->get_config = skl_ddi_get_config;
+ } else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ encoder->enable_clock = hsw_ddi_enable_clock;
+ encoder->disable_clock = hsw_ddi_disable_clock;
+ encoder->is_clock_enabled = hsw_ddi_is_clock_enabled;
+ encoder->get_config = hsw_ddi_get_config;
+ }
+
+ if (IS_DG2(dev_priv)) {
+ encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
+ else
+ encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels;
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
+ else
+ encoder->set_signal_levels = icl_mg_phy_set_signal_levels;
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ encoder->set_signal_levels = bxt_ddi_phy_set_signal_levels;
+ } else {
+ encoder->set_signal_levels = hsw_set_signal_levels;
+ }
+
+ intel_ddi_buf_trans_init(encoder);
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ encoder->hpd_pin = xelpd_hpd_pin(dev_priv, port);
+ else if (IS_DG1(dev_priv))
+ encoder->hpd_pin = dg1_hpd_pin(dev_priv, port);
+ else if (IS_ROCKETLAKE(dev_priv))
+ encoder->hpd_pin = rkl_hpd_pin(dev_priv, port);
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ encoder->hpd_pin = tgl_hpd_pin(dev_priv, port);
+ else if (IS_JSL_EHL(dev_priv))
+ encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
+ else if (DISPLAY_VER(dev_priv) == 11)
+ encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
+ else if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
+ encoder->hpd_pin = skl_hpd_pin(dev_priv, port);
+ else
+ encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ dig_port->saved_port_bits =
+ intel_de_read(dev_priv, DDI_BUF_CTL(port))
+ & DDI_BUF_PORT_REVERSAL;
+ else
+ dig_port->saved_port_bits =
+ intel_de_read(dev_priv, DDI_BUF_CTL(port))
+ & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
+
+ if (intel_bios_is_lane_reversal_needed(dev_priv, port))
+ dig_port->saved_port_bits |= DDI_BUF_PORT_REVERSAL;
+
+ dig_port->dp.output_reg = INVALID_MMIO_REG;
+ dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
+ dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+
+ if (intel_phy_is_tc(dev_priv, phy)) {
+ bool is_legacy =
+ !intel_bios_encoder_supports_typec_usb(devdata) &&
+ !intel_bios_encoder_supports_tbt(devdata);
+
+ intel_tc_port_init(dig_port, is_legacy);
+
+ encoder->update_prepare = intel_ddi_update_prepare;
+ encoder->update_complete = intel_ddi_update_complete;
+ }
+
+ drm_WARN_ON(&dev_priv->drm, port > PORT_I);
+ dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port);
+
+ if (init_dp) {
+ if (!intel_ddi_init_dp_connector(dig_port))
+ goto err;
+
+ dig_port->hpd_pulse = intel_dp_hpd_pulse;
+
+ if (dig_port->dp.mso_link_count)
+ encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
+ }
+
+ /* In theory we don't need the encoder->type check, but leave it just in
+ * case we have some really bad VBTs... */
+ if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+ if (!intel_ddi_init_hdmi_connector(dig_port))
+ goto err;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ if (intel_phy_is_tc(dev_priv, phy))
+ dig_port->connected = intel_tc_port_connected;
+ else
+ dig_port->connected = lpt_digital_port_connected;
+ } else if (DISPLAY_VER(dev_priv) >= 8) {
+ if (port == PORT_A || IS_GEMINILAKE(dev_priv) ||
+ IS_BROXTON(dev_priv))
+ dig_port->connected = bdw_digital_port_connected;
+ else
+ dig_port->connected = lpt_digital_port_connected;
+ } else {
+ if (port == PORT_A)
+ dig_port->connected = hsw_digital_port_connected;
+ else
+ dig_port->connected = lpt_digital_port_connected;
+ }
+
+ intel_infoframe_init(dig_port);
+
+ return;
+
+err:
+ drm_encoder_cleanup(&encoder->base);
+ kfree(dig_port);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
new file mode 100644
index 000000000..d39076fac
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DDI_H__
+#define __INTEL_DDI_H__
+
+#include "i915_reg_defs.h"
+
+struct drm_connector_state;
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_connector;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_dpll_hw_state;
+struct intel_encoder;
+struct intel_shared_dpll;
+enum pipe;
+enum port;
+enum transcoder;
+
+i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *intel_encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
+void intel_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_clock(struct intel_encoder *encoder);
+void intel_ddi_get_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct intel_shared_dpll *pll);
+void hsw_ddi_enable_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void hsw_ddi_disable_clock(struct intel_encoder *encoder);
+bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder);
+void hsw_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
+struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
+void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+ enum port port);
+void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
+void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
+ bool state);
+void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *crtc_state);
+int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
+ enum transcoder cpu_transcoder,
+ bool enable, u32 hdcp_mask);
+void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
+int intel_ddi_level(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int lane);
+
+#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
new file mode 100644
index 000000000..006a2e979
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -0,0 +1,1662 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+
+/* HDMI/DVI modes ignore everything but the last 2 items. So we share
+ * them for both DP and FDI transports, allowing those ports to
+ * automatically adapt to HDMI connections as well
+ */
+static const union intel_ddi_buf_trans_entry _hsw_trans_dp[] = {
+ { .hsw = { 0x00FFFFFF, 0x0006000E, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x0005000A, 0x0 } },
+ { .hsw = { 0x00C30FFF, 0x00040006, 0x0 } },
+ { .hsw = { 0x80AAAFFF, 0x000B0000, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x0005000A, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x000C0004, 0x0 } },
+ { .hsw = { 0x80C30FFF, 0x000B0000, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x00040006, 0x0 } },
+ { .hsw = { 0x80D75FFF, 0x000B0000, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans hsw_trans_dp = {
+ .entries = _hsw_trans_dp,
+ .num_entries = ARRAY_SIZE(_hsw_trans_dp),
+};
+
+static const union intel_ddi_buf_trans_entry _hsw_trans_fdi[] = {
+ { .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x000F000A, 0x0 } },
+ { .hsw = { 0x00C30FFF, 0x00060006, 0x0 } },
+ { .hsw = { 0x00AAAFFF, 0x001E0000, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x000F000A, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x00160004, 0x0 } },
+ { .hsw = { 0x00C30FFF, 0x001E0000, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x00060006, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x001E0000, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans hsw_trans_fdi = {
+ .entries = _hsw_trans_fdi,
+ .num_entries = ARRAY_SIZE(_hsw_trans_fdi),
+};
+
+static const union intel_ddi_buf_trans_entry _hsw_trans_hdmi[] = {
+ /* Idx NT mV d T mV d db */
+ { .hsw = { 0x00FFFFFF, 0x0006000E, 0x0 } }, /* 0: 400 400 0 */
+ { .hsw = { 0x00E79FFF, 0x000E000C, 0x0 } }, /* 1: 400 500 2 */
+ { .hsw = { 0x00D75FFF, 0x0005000A, 0x0 } }, /* 2: 400 600 3.5 */
+ { .hsw = { 0x00FFFFFF, 0x0005000A, 0x0 } }, /* 3: 600 600 0 */
+ { .hsw = { 0x00E79FFF, 0x001D0007, 0x0 } }, /* 4: 600 750 2 */
+ { .hsw = { 0x00D75FFF, 0x000C0004, 0x0 } }, /* 5: 600 900 3.5 */
+ { .hsw = { 0x00FFFFFF, 0x00040006, 0x0 } }, /* 6: 800 800 0 */
+ { .hsw = { 0x80E79FFF, 0x00030002, 0x0 } }, /* 7: 800 1000 2 */
+ { .hsw = { 0x00FFFFFF, 0x00140005, 0x0 } }, /* 8: 850 850 0 */
+ { .hsw = { 0x00FFFFFF, 0x000C0004, 0x0 } }, /* 9: 900 900 0 */
+ { .hsw = { 0x00FFFFFF, 0x001C0003, 0x0 } }, /* 10: 950 950 0 */
+ { .hsw = { 0x80FFFFFF, 0x00030002, 0x0 } }, /* 11: 1000 1000 0 */
+};
+
+static const struct intel_ddi_buf_trans hsw_trans_hdmi = {
+ .entries = _hsw_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_hsw_trans_hdmi),
+ .hdmi_default_entry = 6,
+};
+
+static const union intel_ddi_buf_trans_entry _bdw_trans_edp[] = {
+ { .hsw = { 0x00FFFFFF, 0x00000012, 0x0 } },
+ { .hsw = { 0x00EBAFFF, 0x00020011, 0x0 } },
+ { .hsw = { 0x00C71FFF, 0x0006000F, 0x0 } },
+ { .hsw = { 0x00AAAFFF, 0x000E000A, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x00020011, 0x0 } },
+ { .hsw = { 0x00DB6FFF, 0x0005000F, 0x0 } },
+ { .hsw = { 0x00BEEFFF, 0x000A000C, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x0005000F, 0x0 } },
+ { .hsw = { 0x00DB6FFF, 0x000A000C, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans bdw_trans_edp = {
+ .entries = _bdw_trans_edp,
+ .num_entries = ARRAY_SIZE(_bdw_trans_edp),
+};
+
+static const union intel_ddi_buf_trans_entry _bdw_trans_dp[] = {
+ { .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x000E000A, 0x0 } },
+ { .hsw = { 0x00BEFFFF, 0x00140006, 0x0 } },
+ { .hsw = { 0x80B2CFFF, 0x001B0002, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x000E000A, 0x0 } },
+ { .hsw = { 0x00DB6FFF, 0x00160005, 0x0 } },
+ { .hsw = { 0x80C71FFF, 0x001A0002, 0x0 } },
+ { .hsw = { 0x00F7DFFF, 0x00180004, 0x0 } },
+ { .hsw = { 0x80D75FFF, 0x001B0002, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans bdw_trans_dp = {
+ .entries = _bdw_trans_dp,
+ .num_entries = ARRAY_SIZE(_bdw_trans_dp),
+};
+
+static const union intel_ddi_buf_trans_entry _bdw_trans_fdi[] = {
+ { .hsw = { 0x00FFFFFF, 0x0001000E, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x0004000A, 0x0 } },
+ { .hsw = { 0x00C30FFF, 0x00070006, 0x0 } },
+ { .hsw = { 0x00AAAFFF, 0x000C0000, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x0004000A, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x00090004, 0x0 } },
+ { .hsw = { 0x00C30FFF, 0x000C0000, 0x0 } },
+ { .hsw = { 0x00FFFFFF, 0x00070006, 0x0 } },
+ { .hsw = { 0x00D75FFF, 0x000C0000, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans bdw_trans_fdi = {
+ .entries = _bdw_trans_fdi,
+ .num_entries = ARRAY_SIZE(_bdw_trans_fdi),
+};
+
+static const union intel_ddi_buf_trans_entry _bdw_trans_hdmi[] = {
+ /* Idx NT mV d T mV df db */
+ { .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } }, /* 0: 400 400 0 */
+ { .hsw = { 0x00D75FFF, 0x000E000A, 0x0 } }, /* 1: 400 600 3.5 */
+ { .hsw = { 0x00BEFFFF, 0x00140006, 0x0 } }, /* 2: 400 800 6 */
+ { .hsw = { 0x00FFFFFF, 0x0009000D, 0x0 } }, /* 3: 450 450 0 */
+ { .hsw = { 0x00FFFFFF, 0x000E000A, 0x0 } }, /* 4: 600 600 0 */
+ { .hsw = { 0x00D7FFFF, 0x00140006, 0x0 } }, /* 5: 600 800 2.5 */
+ { .hsw = { 0x80CB2FFF, 0x001B0002, 0x0 } }, /* 6: 600 1000 4.5 */
+ { .hsw = { 0x00FFFFFF, 0x00140006, 0x0 } }, /* 7: 800 800 0 */
+ { .hsw = { 0x80E79FFF, 0x001B0002, 0x0 } }, /* 8: 800 1000 2 */
+ { .hsw = { 0x80FFFFFF, 0x001B0002, 0x0 } }, /* 9: 1000 1000 0 */
+};
+
+static const struct intel_ddi_buf_trans bdw_trans_hdmi = {
+ .entries = _bdw_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_bdw_trans_hdmi),
+ .hdmi_default_entry = 7,
+};
+
+/* Skylake H and S */
+static const union intel_ddi_buf_trans_entry _skl_trans_dp[] = {
+ { .hsw = { 0x00002016, 0x000000A0, 0x0 } },
+ { .hsw = { 0x00005012, 0x0000009B, 0x0 } },
+ { .hsw = { 0x00007011, 0x00000088, 0x0 } },
+ { .hsw = { 0x80009010, 0x000000C0, 0x1 } },
+ { .hsw = { 0x00002016, 0x0000009B, 0x0 } },
+ { .hsw = { 0x00005012, 0x00000088, 0x0 } },
+ { .hsw = { 0x80007011, 0x000000C0, 0x1 } },
+ { .hsw = { 0x00002016, 0x000000DF, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x1 } },
+};
+
+static const struct intel_ddi_buf_trans skl_trans_dp = {
+ .entries = _skl_trans_dp,
+ .num_entries = ARRAY_SIZE(_skl_trans_dp),
+};
+
+/* Skylake U */
+static const union intel_ddi_buf_trans_entry _skl_u_trans_dp[] = {
+ { .hsw = { 0x0000201B, 0x000000A2, 0x0 } },
+ { .hsw = { 0x00005012, 0x00000088, 0x0 } },
+ { .hsw = { 0x80007011, 0x000000CD, 0x1 } },
+ { .hsw = { 0x80009010, 0x000000C0, 0x1 } },
+ { .hsw = { 0x0000201B, 0x0000009D, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x1 } },
+ { .hsw = { 0x80007011, 0x000000C0, 0x1 } },
+ { .hsw = { 0x00002016, 0x00000088, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x1 } },
+};
+
+static const struct intel_ddi_buf_trans skl_u_trans_dp = {
+ .entries = _skl_u_trans_dp,
+ .num_entries = ARRAY_SIZE(_skl_u_trans_dp),
+};
+
+/* Skylake Y */
+static const union intel_ddi_buf_trans_entry _skl_y_trans_dp[] = {
+ { .hsw = { 0x00000018, 0x000000A2, 0x0 } },
+ { .hsw = { 0x00005012, 0x00000088, 0x0 } },
+ { .hsw = { 0x80007011, 0x000000CD, 0x3 } },
+ { .hsw = { 0x80009010, 0x000000C0, 0x3 } },
+ { .hsw = { 0x00000018, 0x0000009D, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x3 } },
+ { .hsw = { 0x80007011, 0x000000C0, 0x3 } },
+ { .hsw = { 0x00000018, 0x00000088, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x3 } },
+};
+
+static const struct intel_ddi_buf_trans skl_y_trans_dp = {
+ .entries = _skl_y_trans_dp,
+ .num_entries = ARRAY_SIZE(_skl_y_trans_dp),
+};
+
+/* Kabylake H and S */
+static const union intel_ddi_buf_trans_entry _kbl_trans_dp[] = {
+ { .hsw = { 0x00002016, 0x000000A0, 0x0 } },
+ { .hsw = { 0x00005012, 0x0000009B, 0x0 } },
+ { .hsw = { 0x00007011, 0x00000088, 0x0 } },
+ { .hsw = { 0x80009010, 0x000000C0, 0x1 } },
+ { .hsw = { 0x00002016, 0x0000009B, 0x0 } },
+ { .hsw = { 0x00005012, 0x00000088, 0x0 } },
+ { .hsw = { 0x80007011, 0x000000C0, 0x1 } },
+ { .hsw = { 0x00002016, 0x00000097, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x1 } },
+};
+
+static const struct intel_ddi_buf_trans kbl_trans_dp = {
+ .entries = _kbl_trans_dp,
+ .num_entries = ARRAY_SIZE(_kbl_trans_dp),
+};
+
+/* Kabylake U */
+static const union intel_ddi_buf_trans_entry _kbl_u_trans_dp[] = {
+ { .hsw = { 0x0000201B, 0x000000A1, 0x0 } },
+ { .hsw = { 0x00005012, 0x00000088, 0x0 } },
+ { .hsw = { 0x80007011, 0x000000CD, 0x3 } },
+ { .hsw = { 0x80009010, 0x000000C0, 0x3 } },
+ { .hsw = { 0x0000201B, 0x0000009D, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x3 } },
+ { .hsw = { 0x80007011, 0x000000C0, 0x3 } },
+ { .hsw = { 0x00002016, 0x0000004F, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x3 } },
+};
+
+static const struct intel_ddi_buf_trans kbl_u_trans_dp = {
+ .entries = _kbl_u_trans_dp,
+ .num_entries = ARRAY_SIZE(_kbl_u_trans_dp),
+};
+
+/* Kabylake Y */
+static const union intel_ddi_buf_trans_entry _kbl_y_trans_dp[] = {
+ { .hsw = { 0x00001017, 0x000000A1, 0x0 } },
+ { .hsw = { 0x00005012, 0x00000088, 0x0 } },
+ { .hsw = { 0x80007011, 0x000000CD, 0x3 } },
+ { .hsw = { 0x8000800F, 0x000000C0, 0x3 } },
+ { .hsw = { 0x00001017, 0x0000009D, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x3 } },
+ { .hsw = { 0x80007011, 0x000000C0, 0x3 } },
+ { .hsw = { 0x00001017, 0x0000004C, 0x0 } },
+ { .hsw = { 0x80005012, 0x000000C0, 0x3 } },
+};
+
+static const struct intel_ddi_buf_trans kbl_y_trans_dp = {
+ .entries = _kbl_y_trans_dp,
+ .num_entries = ARRAY_SIZE(_kbl_y_trans_dp),
+};
+
+/*
+ * Skylake/Kabylake H and S
+ * eDP 1.4 low vswing translation parameters
+ */
+static const union intel_ddi_buf_trans_entry _skl_trans_edp[] = {
+ { .hsw = { 0x00000018, 0x000000A8, 0x0 } },
+ { .hsw = { 0x00004013, 0x000000A9, 0x0 } },
+ { .hsw = { 0x00007011, 0x000000A2, 0x0 } },
+ { .hsw = { 0x00009010, 0x0000009C, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000A9, 0x0 } },
+ { .hsw = { 0x00006013, 0x000000A2, 0x0 } },
+ { .hsw = { 0x00007011, 0x000000A6, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000AB, 0x0 } },
+ { .hsw = { 0x00007013, 0x0000009F, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000DF, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans skl_trans_edp = {
+ .entries = _skl_trans_edp,
+ .num_entries = ARRAY_SIZE(_skl_trans_edp),
+};
+
+/*
+ * Skylake/Kabylake U
+ * eDP 1.4 low vswing translation parameters
+ */
+static const union intel_ddi_buf_trans_entry _skl_u_trans_edp[] = {
+ { .hsw = { 0x00000018, 0x000000A8, 0x0 } },
+ { .hsw = { 0x00004013, 0x000000A9, 0x0 } },
+ { .hsw = { 0x00007011, 0x000000A2, 0x0 } },
+ { .hsw = { 0x00009010, 0x0000009C, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000A9, 0x0 } },
+ { .hsw = { 0x00006013, 0x000000A2, 0x0 } },
+ { .hsw = { 0x00007011, 0x000000A6, 0x0 } },
+ { .hsw = { 0x00002016, 0x000000AB, 0x0 } },
+ { .hsw = { 0x00005013, 0x0000009F, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000DF, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans skl_u_trans_edp = {
+ .entries = _skl_u_trans_edp,
+ .num_entries = ARRAY_SIZE(_skl_u_trans_edp),
+};
+
+/*
+ * Skylake/Kabylake Y
+ * eDP 1.4 low vswing translation parameters
+ */
+static const union intel_ddi_buf_trans_entry _skl_y_trans_edp[] = {
+ { .hsw = { 0x00000018, 0x000000A8, 0x0 } },
+ { .hsw = { 0x00004013, 0x000000AB, 0x0 } },
+ { .hsw = { 0x00007011, 0x000000A4, 0x0 } },
+ { .hsw = { 0x00009010, 0x000000DF, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000AA, 0x0 } },
+ { .hsw = { 0x00006013, 0x000000A4, 0x0 } },
+ { .hsw = { 0x00007011, 0x0000009D, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000A0, 0x0 } },
+ { .hsw = { 0x00006012, 0x000000DF, 0x0 } },
+ { .hsw = { 0x00000018, 0x0000008A, 0x0 } },
+};
+
+static const struct intel_ddi_buf_trans skl_y_trans_edp = {
+ .entries = _skl_y_trans_edp,
+ .num_entries = ARRAY_SIZE(_skl_y_trans_edp),
+};
+
+/* Skylake/Kabylake U, H and S */
+static const union intel_ddi_buf_trans_entry _skl_trans_hdmi[] = {
+ { .hsw = { 0x00000018, 0x000000AC, 0x0 } },
+ { .hsw = { 0x00005012, 0x0000009D, 0x0 } },
+ { .hsw = { 0x00007011, 0x00000088, 0x0 } },
+ { .hsw = { 0x00000018, 0x000000A1, 0x0 } },
+ { .hsw = { 0x00000018, 0x00000098, 0x0 } },
+ { .hsw = { 0x00004013, 0x00000088, 0x0 } },
+ { .hsw = { 0x80006012, 0x000000CD, 0x1 } },
+ { .hsw = { 0x00000018, 0x000000DF, 0x0 } },
+ { .hsw = { 0x80003015, 0x000000CD, 0x1 } }, /* Default */
+ { .hsw = { 0x80003015, 0x000000C0, 0x1 } },
+ { .hsw = { 0x80000018, 0x000000C0, 0x1 } },
+};
+
+static const struct intel_ddi_buf_trans skl_trans_hdmi = {
+ .entries = _skl_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_skl_trans_hdmi),
+ .hdmi_default_entry = 8,
+};
+
+/* Skylake/Kabylake Y */
+static const union intel_ddi_buf_trans_entry _skl_y_trans_hdmi[] = {
+ { .hsw = { 0x00000018, 0x000000A1, 0x0 } },
+ { .hsw = { 0x00005012, 0x000000DF, 0x0 } },
+ { .hsw = { 0x80007011, 0x000000CB, 0x3 } },
+ { .hsw = { 0x00000018, 0x000000A4, 0x0 } },
+ { .hsw = { 0x00000018, 0x0000009D, 0x0 } },
+ { .hsw = { 0x00004013, 0x00000080, 0x0 } },
+ { .hsw = { 0x80006013, 0x000000C0, 0x3 } },
+ { .hsw = { 0x00000018, 0x0000008A, 0x0 } },
+ { .hsw = { 0x80003015, 0x000000C0, 0x3 } }, /* Default */
+ { .hsw = { 0x80003015, 0x000000C0, 0x3 } },
+ { .hsw = { 0x80000018, 0x000000C0, 0x3 } },
+};
+
+static const struct intel_ddi_buf_trans skl_y_trans_hdmi = {
+ .entries = _skl_y_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_skl_y_trans_hdmi),
+ .hdmi_default_entry = 8,
+};
+
+static const union intel_ddi_buf_trans_entry _bxt_trans_dp[] = {
+ /* Idx NT mV diff db */
+ { .bxt = { 52, 0x9A, 0, 128, } }, /* 0: 400 0 */
+ { .bxt = { 78, 0x9A, 0, 85, } }, /* 1: 400 3.5 */
+ { .bxt = { 104, 0x9A, 0, 64, } }, /* 2: 400 6 */
+ { .bxt = { 154, 0x9A, 0, 43, } }, /* 3: 400 9.5 */
+ { .bxt = { 77, 0x9A, 0, 128, } }, /* 4: 600 0 */
+ { .bxt = { 116, 0x9A, 0, 85, } }, /* 5: 600 3.5 */
+ { .bxt = { 154, 0x9A, 0, 64, } }, /* 6: 600 6 */
+ { .bxt = { 102, 0x9A, 0, 128, } }, /* 7: 800 0 */
+ { .bxt = { 154, 0x9A, 0, 85, } }, /* 8: 800 3.5 */
+ { .bxt = { 154, 0x9A, 1, 128, } }, /* 9: 1200 0 */
+};
+
+static const struct intel_ddi_buf_trans bxt_trans_dp = {
+ .entries = _bxt_trans_dp,
+ .num_entries = ARRAY_SIZE(_bxt_trans_dp),
+};
+
+static const union intel_ddi_buf_trans_entry _bxt_trans_edp[] = {
+ /* Idx NT mV diff db */
+ { .bxt = { 26, 0, 0, 128, } }, /* 0: 200 0 */
+ { .bxt = { 38, 0, 0, 112, } }, /* 1: 200 1.5 */
+ { .bxt = { 48, 0, 0, 96, } }, /* 2: 200 4 */
+ { .bxt = { 54, 0, 0, 69, } }, /* 3: 200 6 */
+ { .bxt = { 32, 0, 0, 128, } }, /* 4: 250 0 */
+ { .bxt = { 48, 0, 0, 104, } }, /* 5: 250 1.5 */
+ { .bxt = { 54, 0, 0, 85, } }, /* 6: 250 4 */
+ { .bxt = { 43, 0, 0, 128, } }, /* 7: 300 0 */
+ { .bxt = { 54, 0, 0, 101, } }, /* 8: 300 1.5 */
+ { .bxt = { 48, 0, 0, 128, } }, /* 9: 300 0 */
+};
+
+static const struct intel_ddi_buf_trans bxt_trans_edp = {
+ .entries = _bxt_trans_edp,
+ .num_entries = ARRAY_SIZE(_bxt_trans_edp),
+};
+
+/* BSpec has 2 recommended values - entries 0 and 8.
+ * Using the entry with higher vswing.
+ */
+static const union intel_ddi_buf_trans_entry _bxt_trans_hdmi[] = {
+ /* Idx NT mV diff db */
+ { .bxt = { 52, 0x9A, 0, 128, } }, /* 0: 400 0 */
+ { .bxt = { 52, 0x9A, 0, 85, } }, /* 1: 400 3.5 */
+ { .bxt = { 52, 0x9A, 0, 64, } }, /* 2: 400 6 */
+ { .bxt = { 42, 0x9A, 0, 43, } }, /* 3: 400 9.5 */
+ { .bxt = { 77, 0x9A, 0, 128, } }, /* 4: 600 0 */
+ { .bxt = { 77, 0x9A, 0, 85, } }, /* 5: 600 3.5 */
+ { .bxt = { 77, 0x9A, 0, 64, } }, /* 6: 600 6 */
+ { .bxt = { 102, 0x9A, 0, 128, } }, /* 7: 800 0 */
+ { .bxt = { 102, 0x9A, 0, 85, } }, /* 8: 800 3.5 */
+ { .bxt = { 154, 0x9A, 1, 128, } }, /* 9: 1200 0 */
+};
+
+static const struct intel_ddi_buf_trans bxt_trans_hdmi = {
+ .entries = _bxt_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_bxt_trans_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_bxt_trans_hdmi) - 1,
+};
+
+/* icl_combo_phy_trans */
+static const union intel_ddi_buf_trans_entry _icl_combo_phy_trans_dp_hbr2_edp_hbr3[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x71, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
+ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x6C, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans icl_combo_phy_trans_dp_hbr2_edp_hbr3 = {
+ .entries = _icl_combo_phy_trans_dp_hbr2_edp_hbr3,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_dp_hbr2_edp_hbr3),
+};
+
+static const union intel_ddi_buf_trans_entry _icl_combo_phy_trans_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0x0, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
+ { .icl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 200 250 1.9 */
+ { .icl = { 0x1, 0x7F, 0x33, 0x00, 0x0C } }, /* 200 300 3.5 */
+ { .icl = { 0x9, 0x7F, 0x31, 0x00, 0x0E } }, /* 200 350 4.9 */
+ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */
+ { .icl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } }, /* 250 300 1.6 */
+ { .icl = { 0x9, 0x7F, 0x35, 0x00, 0x0A } }, /* 250 350 2.9 */
+ { .icl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */
+ { .icl = { 0x9, 0x7F, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */
+ { .icl = { 0x9, 0x7F, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+};
+
+static const struct intel_ddi_buf_trans icl_combo_phy_trans_edp_hbr2 = {
+ .entries = _icl_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_edp_hbr2),
+};
+
+static const union intel_ddi_buf_trans_entry _icl_combo_phy_trans_hdmi[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x60, 0x3F, 0x00, 0x00 } }, /* 450 450 0.0 */
+ { .icl = { 0xB, 0x73, 0x36, 0x00, 0x09 } }, /* 450 650 3.2 */
+ { .icl = { 0x6, 0x7F, 0x31, 0x00, 0x0E } }, /* 450 850 5.5 */
+ { .icl = { 0xB, 0x73, 0x3F, 0x00, 0x00 } }, /* 650 650 0.0 ALS */
+ { .icl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 650 850 2.3 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 850 850 0.0 */
+ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 850 3.0 */
+};
+
+static const struct intel_ddi_buf_trans icl_combo_phy_trans_hdmi = {
+ .entries = _icl_combo_phy_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_icl_combo_phy_trans_hdmi) - 1,
+};
+
+static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_dp[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .icl = { 0xA, 0x47, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x64, 0x33, 0x00, 0x0C } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x46, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .icl = { 0xC, 0x64, 0x37, 0x00, 0x08 } }, /* 500 700 2.9 */
+ { .icl = { 0x6, 0x7F, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x61, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans ehl_combo_phy_trans_dp = {
+ .entries = _ehl_combo_phy_trans_dp,
+ .num_entries = ARRAY_SIZE(_ehl_combo_phy_trans_dp),
+};
+
+static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
+ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 250 1.9 */
+ { .icl = { 0x1, 0x7F, 0x3D, 0x00, 0x02 } }, /* 200 300 3.5 */
+ { .icl = { 0xA, 0x35, 0x39, 0x00, 0x06 } }, /* 200 350 4.9 */
+ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */
+ { .icl = { 0x1, 0x7F, 0x3C, 0x00, 0x03 } }, /* 250 300 1.6 */
+ { .icl = { 0xA, 0x35, 0x39, 0x00, 0x06 } }, /* 250 350 2.9 */
+ { .icl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */
+ { .icl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */
+ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+};
+
+static const struct intel_ddi_buf_trans ehl_combo_phy_trans_edp_hbr2 = {
+ .entries = _ehl_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_ehl_combo_phy_trans_edp_hbr2),
+};
+
+static const union intel_ddi_buf_trans_entry _jsl_combo_phy_trans_edp_hbr[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
+ { .icl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 200 250 1.9 */
+ { .icl = { 0x1, 0x7F, 0x33, 0x00, 0x0C } }, /* 200 300 3.5 */
+ { .icl = { 0xA, 0x35, 0x36, 0x00, 0x09 } }, /* 200 350 4.9 */
+ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */
+ { .icl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } }, /* 250 300 1.6 */
+ { .icl = { 0xA, 0x35, 0x35, 0x00, 0x0A } }, /* 250 350 2.9 */
+ { .icl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */
+ { .icl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */
+ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+};
+
+static const struct intel_ddi_buf_trans jsl_combo_phy_trans_edp_hbr = {
+ .entries = _jsl_combo_phy_trans_edp_hbr,
+ .num_entries = ARRAY_SIZE(_jsl_combo_phy_trans_edp_hbr),
+};
+
+static const union intel_ddi_buf_trans_entry _jsl_combo_phy_trans_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
+ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 250 1.9 */
+ { .icl = { 0x1, 0x7F, 0x3D, 0x00, 0x02 } }, /* 200 300 3.5 */
+ { .icl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 200 350 4.9 */
+ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 250 0.0 */
+ { .icl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 250 300 1.6 */
+ { .icl = { 0xA, 0x35, 0x3A, 0x00, 0x05 } }, /* 250 350 2.9 */
+ { .icl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } }, /* 300 300 0.0 */
+ { .icl = { 0xA, 0x35, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */
+ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+};
+
+static const struct intel_ddi_buf_trans jsl_combo_phy_trans_edp_hbr2 = {
+ .entries = _jsl_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_jsl_combo_phy_trans_edp_hbr2),
+};
+
+static const union intel_ddi_buf_trans_entry _dg1_combo_phy_trans_dp_rbr_hbr[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .icl = { 0xA, 0x48, 0x35, 0x00, 0x0A } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x43, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .icl = { 0xC, 0x60, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */
+ { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x60, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans dg1_combo_phy_trans_dp_rbr_hbr = {
+ .entries = _dg1_combo_phy_trans_dp_rbr_hbr,
+ .num_entries = ARRAY_SIZE(_dg1_combo_phy_trans_dp_rbr_hbr),
+};
+
+static const union intel_ddi_buf_trans_entry _dg1_combo_phy_trans_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .icl = { 0xA, 0x48, 0x35, 0x00, 0x0A } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x43, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .icl = { 0xC, 0x60, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */
+ { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x58, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans dg1_combo_phy_trans_dp_hbr2_hbr3 = {
+ .entries = _dg1_combo_phy_trans_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_dg1_combo_phy_trans_dp_hbr2_hbr3),
+};
+
+static const union intel_ddi_buf_trans_entry _icl_mg_phy_trans_rbr_hbr[] = {
+ /* Voltage swing pre-emphasis */
+ { .mg = { 0x18, 0x00, 0x00 } }, /* 0 0 */
+ { .mg = { 0x1D, 0x00, 0x05 } }, /* 0 1 */
+ { .mg = { 0x24, 0x00, 0x0C } }, /* 0 2 */
+ { .mg = { 0x2B, 0x00, 0x14 } }, /* 0 3 */
+ { .mg = { 0x21, 0x00, 0x00 } }, /* 1 0 */
+ { .mg = { 0x2B, 0x00, 0x08 } }, /* 1 1 */
+ { .mg = { 0x30, 0x00, 0x0F } }, /* 1 2 */
+ { .mg = { 0x31, 0x00, 0x03 } }, /* 2 0 */
+ { .mg = { 0x34, 0x00, 0x0B } }, /* 2 1 */
+ { .mg = { 0x3F, 0x00, 0x00 } }, /* 3 0 */
+};
+
+static const struct intel_ddi_buf_trans icl_mg_phy_trans_rbr_hbr = {
+ .entries = _icl_mg_phy_trans_rbr_hbr,
+ .num_entries = ARRAY_SIZE(_icl_mg_phy_trans_rbr_hbr),
+};
+
+static const union intel_ddi_buf_trans_entry _icl_mg_phy_trans_hbr2_hbr3[] = {
+ /* Voltage swing pre-emphasis */
+ { .mg = { 0x18, 0x00, 0x00 } }, /* 0 0 */
+ { .mg = { 0x1D, 0x00, 0x05 } }, /* 0 1 */
+ { .mg = { 0x24, 0x00, 0x0C } }, /* 0 2 */
+ { .mg = { 0x2B, 0x00, 0x14 } }, /* 0 3 */
+ { .mg = { 0x26, 0x00, 0x00 } }, /* 1 0 */
+ { .mg = { 0x2C, 0x00, 0x07 } }, /* 1 1 */
+ { .mg = { 0x33, 0x00, 0x0C } }, /* 1 2 */
+ { .mg = { 0x2E, 0x00, 0x00 } }, /* 2 0 */
+ { .mg = { 0x36, 0x00, 0x09 } }, /* 2 1 */
+ { .mg = { 0x3F, 0x00, 0x00 } }, /* 3 0 */
+};
+
+static const struct intel_ddi_buf_trans icl_mg_phy_trans_hbr2_hbr3 = {
+ .entries = _icl_mg_phy_trans_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_icl_mg_phy_trans_hbr2_hbr3),
+};
+
+static const union intel_ddi_buf_trans_entry _icl_mg_phy_trans_hdmi[] = {
+ /* HDMI Preset VS Pre-emph */
+ { .mg = { 0x1A, 0x0, 0x0 } }, /* 1 400mV 0dB */
+ { .mg = { 0x20, 0x0, 0x0 } }, /* 2 500mV 0dB */
+ { .mg = { 0x29, 0x0, 0x0 } }, /* 3 650mV 0dB */
+ { .mg = { 0x32, 0x0, 0x0 } }, /* 4 800mV 0dB */
+ { .mg = { 0x3F, 0x0, 0x0 } }, /* 5 1000mV 0dB */
+ { .mg = { 0x3A, 0x0, 0x5 } }, /* 6 Full -1.5 dB */
+ { .mg = { 0x39, 0x0, 0x6 } }, /* 7 Full -1.8 dB */
+ { .mg = { 0x38, 0x0, 0x7 } }, /* 8 Full -2 dB */
+ { .mg = { 0x37, 0x0, 0x8 } }, /* 9 Full -2.5 dB */
+ { .mg = { 0x36, 0x0, 0x9 } }, /* 10 Full -3 dB */
+};
+
+static const struct intel_ddi_buf_trans icl_mg_phy_trans_hdmi = {
+ .entries = _icl_mg_phy_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_icl_mg_phy_trans_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_icl_mg_phy_trans_hdmi) - 1,
+};
+
+static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_trans_dp_hbr[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */
+ { .dkl = { 0x5, 0x0, 0x05 } }, /* 0 1 400mV 3.5 dB */
+ { .dkl = { 0x2, 0x0, 0x0B } }, /* 0 2 400mV 6 dB */
+ { .dkl = { 0x0, 0x0, 0x18 } }, /* 0 3 400mV 9.5 dB */
+ { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */
+ { .dkl = { 0x2, 0x0, 0x08 } }, /* 1 1 600mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */
+ { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */
+ { .dkl = { 0x0, 0x0, 0x0B } }, /* 2 1 800mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB HDMI default */
+};
+
+static const struct intel_ddi_buf_trans tgl_dkl_phy_trans_dp_hbr = {
+ .entries = _tgl_dkl_phy_trans_dp_hbr,
+ .num_entries = ARRAY_SIZE(_tgl_dkl_phy_trans_dp_hbr),
+};
+
+static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_trans_dp_hbr2[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */
+ { .dkl = { 0x5, 0x0, 0x05 } }, /* 0 1 400mV 3.5 dB */
+ { .dkl = { 0x2, 0x0, 0x0B } }, /* 0 2 400mV 6 dB */
+ { .dkl = { 0x0, 0x0, 0x19 } }, /* 0 3 400mV 9.5 dB */
+ { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */
+ { .dkl = { 0x2, 0x0, 0x08 } }, /* 1 1 600mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */
+ { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */
+ { .dkl = { 0x0, 0x0, 0x0B } }, /* 2 1 800mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB HDMI default */
+};
+
+static const struct intel_ddi_buf_trans tgl_dkl_phy_trans_dp_hbr2 = {
+ .entries = _tgl_dkl_phy_trans_dp_hbr2,
+ .num_entries = ARRAY_SIZE(_tgl_dkl_phy_trans_dp_hbr2),
+};
+
+static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_trans_hdmi[] = {
+ /* HDMI Preset VS Pre-emph */
+ { .dkl = { 0x7, 0x0, 0x0 } }, /* 1 400mV 0dB */
+ { .dkl = { 0x6, 0x0, 0x0 } }, /* 2 500mV 0dB */
+ { .dkl = { 0x4, 0x0, 0x0 } }, /* 3 650mV 0dB */
+ { .dkl = { 0x2, 0x0, 0x0 } }, /* 4 800mV 0dB */
+ { .dkl = { 0x0, 0x0, 0x0 } }, /* 5 1000mV 0dB */
+ { .dkl = { 0x0, 0x0, 0x5 } }, /* 6 Full -1.5 dB */
+ { .dkl = { 0x0, 0x0, 0x6 } }, /* 7 Full -1.8 dB */
+ { .dkl = { 0x0, 0x0, 0x7 } }, /* 8 Full -2 dB */
+ { .dkl = { 0x0, 0x0, 0x8 } }, /* 9 Full -2.5 dB */
+ { .dkl = { 0x0, 0x0, 0xA } }, /* 10 Full -3 dB */
+};
+
+static const struct intel_ddi_buf_trans tgl_dkl_phy_trans_hdmi = {
+ .entries = _tgl_dkl_phy_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_tgl_dkl_phy_trans_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_tgl_dkl_phy_trans_hdmi) - 1,
+};
+
+static const union intel_ddi_buf_trans_entry _tgl_combo_phy_trans_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x71, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7D, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
+ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x6C, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans tgl_combo_phy_trans_dp_hbr = {
+ .entries = _tgl_combo_phy_trans_dp_hbr,
+ .num_entries = ARRAY_SIZE(_tgl_combo_phy_trans_dp_hbr),
+};
+
+static const union intel_ddi_buf_trans_entry _tgl_combo_phy_trans_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .icl = { 0xC, 0x63, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
+ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x61, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7B, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans tgl_combo_phy_trans_dp_hbr2 = {
+ .entries = _tgl_combo_phy_trans_dp_hbr2,
+ .num_entries = ARRAY_SIZE(_tgl_combo_phy_trans_dp_hbr2),
+};
+
+static const union intel_ddi_buf_trans_entry _tgl_uy_combo_phy_trans_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .icl = { 0xA, 0x4F, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x60, 0x32, 0x00, 0x0D } }, /* 350 700 6.0 */
+ { .icl = { 0xC, 0x7F, 0x2D, 0x00, 0x12 } }, /* 350 900 8.2 */
+ { .icl = { 0xC, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .icl = { 0xC, 0x6F, 0x36, 0x00, 0x09 } }, /* 500 700 2.9 */
+ { .icl = { 0x6, 0x7D, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */
+ { .icl = { 0x6, 0x60, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7F, 0x34, 0x00, 0x0B } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans tgl_uy_combo_phy_trans_dp_hbr2 = {
+ .entries = _tgl_uy_combo_phy_trans_dp_hbr2,
+ .num_entries = ARRAY_SIZE(_tgl_uy_combo_phy_trans_dp_hbr2),
+};
+
+/*
+ * Cloned the HOBL entry to comply with the voltage and pre-emphasis entries
+ * that DisplayPort specification requires
+ */
+static const union intel_ddi_buf_trans_entry _tgl_combo_phy_trans_edp_hbr2_hobl[] = {
+ /* VS pre-emp */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 0 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 1 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 2 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 3 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1 0 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1 1 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 1 2 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 2 0 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 2 1 */
+};
+
+static const struct intel_ddi_buf_trans tgl_combo_phy_trans_edp_hbr2_hobl = {
+ .entries = _tgl_combo_phy_trans_edp_hbr2_hobl,
+ .num_entries = ARRAY_SIZE(_tgl_combo_phy_trans_edp_hbr2_hobl),
+};
+
+static const union intel_ddi_buf_trans_entry _rkl_combo_phy_trans_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x2F, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7D, 0x2A, 0x00, 0x15 } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
+ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x6E, 0x3E, 0x00, 0x01 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans rkl_combo_phy_trans_dp_hbr = {
+ .entries = _rkl_combo_phy_trans_dp_hbr,
+ .num_entries = ARRAY_SIZE(_rkl_combo_phy_trans_dp_hbr),
+};
+
+static const union intel_ddi_buf_trans_entry _rkl_combo_phy_trans_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .icl = { 0xA, 0x50, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x61, 0x33, 0x00, 0x0C } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7F, 0x2E, 0x00, 0x11 } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .icl = { 0xC, 0x5F, 0x38, 0x00, 0x07 } }, /* 500 700 2.9 */
+ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x5F, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7E, 0x36, 0x00, 0x09 } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans rkl_combo_phy_trans_dp_hbr2_hbr3 = {
+ .entries = _rkl_combo_phy_trans_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_rkl_combo_phy_trans_dp_hbr2_hbr3),
+};
+
+static const union intel_ddi_buf_trans_entry _adls_combo_phy_trans_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x63, 0x31, 0x00, 0x0E } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .icl = { 0xC, 0x63, 0x37, 0x00, 0x08 } }, /* 500 700 2.9 */
+ { .icl = { 0x6, 0x73, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x58, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans adls_combo_phy_trans_dp_hbr2_hbr3 = {
+ .entries = _adls_combo_phy_trans_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_adls_combo_phy_trans_dp_hbr2_hbr3),
+};
+
+static const union intel_ddi_buf_trans_entry _adls_combo_phy_trans_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0x9, 0x73, 0x3D, 0x00, 0x02 } }, /* 200 200 0.0 */
+ { .icl = { 0x9, 0x7A, 0x3C, 0x00, 0x03 } }, /* 200 250 1.9 */
+ { .icl = { 0x9, 0x7F, 0x3B, 0x00, 0x04 } }, /* 200 300 3.5 */
+ { .icl = { 0x4, 0x6C, 0x33, 0x00, 0x0C } }, /* 200 350 4.9 */
+ { .icl = { 0x2, 0x73, 0x3A, 0x00, 0x05 } }, /* 250 250 0.0 */
+ { .icl = { 0x2, 0x7C, 0x38, 0x00, 0x07 } }, /* 250 300 1.6 */
+ { .icl = { 0x4, 0x5A, 0x36, 0x00, 0x09 } }, /* 250 350 2.9 */
+ { .icl = { 0x4, 0x57, 0x3D, 0x00, 0x02 } }, /* 300 300 0.0 */
+ { .icl = { 0x4, 0x65, 0x38, 0x00, 0x07 } }, /* 300 350 1.3 */
+ { .icl = { 0x4, 0x6C, 0x3A, 0x00, 0x05 } }, /* 350 350 0.0 */
+};
+
+static const struct intel_ddi_buf_trans adls_combo_phy_trans_edp_hbr2 = {
+ .entries = _adls_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_adls_combo_phy_trans_edp_hbr2),
+};
+
+static const union intel_ddi_buf_trans_entry _adls_combo_phy_trans_edp_hbr3[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x63, 0x31, 0x00, 0x0E } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .icl = { 0xC, 0x63, 0x37, 0x00, 0x08 } }, /* 500 700 2.9 */
+ { .icl = { 0x6, 0x73, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x58, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans adls_combo_phy_trans_edp_hbr3 = {
+ .entries = _adls_combo_phy_trans_edp_hbr3,
+ .num_entries = ARRAY_SIZE(_adls_combo_phy_trans_edp_hbr3),
+};
+
+static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x71, 0x31, 0x00, 0x0E } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
+ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x7C, 0x3C, 0x00, 0x03 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans adlp_combo_phy_trans_dp_hbr = {
+ .entries = _adlp_combo_phy_trans_dp_hbr,
+ .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_dp_hbr),
+};
+
+static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x71, 0x30, 0x00, 0x0F } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
+ { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x63, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0x4, 0x50, 0x38, 0x00, 0x07 } }, /* 200 200 0.0 */
+ { .icl = { 0x4, 0x58, 0x35, 0x00, 0x0A } }, /* 200 250 1.9 */
+ { .icl = { 0x4, 0x60, 0x34, 0x00, 0x0B } }, /* 200 300 3.5 */
+ { .icl = { 0x4, 0x6A, 0x32, 0x00, 0x0D } }, /* 200 350 4.9 */
+ { .icl = { 0x4, 0x5E, 0x38, 0x00, 0x07 } }, /* 250 250 0.0 */
+ { .icl = { 0x4, 0x61, 0x36, 0x00, 0x09 } }, /* 250 300 1.6 */
+ { .icl = { 0x4, 0x6B, 0x34, 0x00, 0x0B } }, /* 250 350 2.9 */
+ { .icl = { 0x4, 0x69, 0x39, 0x00, 0x06 } }, /* 300 300 0.0 */
+ { .icl = { 0x4, 0x73, 0x37, 0x00, 0x08 } }, /* 300 350 1.3 */
+ { .icl = { 0x4, 0x7A, 0x38, 0x00, 0x07 } }, /* 350 350 0.0 */
+};
+
+static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr2_edp_hbr3[] = {
+ /* NT mV Trans mV db */
+ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
+ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x71, 0x30, 0x00, 0x0f } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
+ { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } }, /* 500 700 2.9 */
+ { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 500 900 5.1 */
+ { .icl = { 0xC, 0x63, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
+ { .icl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
+};
+
+static const struct intel_ddi_buf_trans adlp_combo_phy_trans_dp_hbr2_hbr3 = {
+ .entries = _adlp_combo_phy_trans_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_dp_hbr2_hbr3),
+};
+
+static const struct intel_ddi_buf_trans adlp_combo_phy_trans_edp_hbr3 = {
+ .entries = _adlp_combo_phy_trans_dp_hbr2_edp_hbr3,
+ .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_dp_hbr2_edp_hbr3),
+};
+
+static const struct intel_ddi_buf_trans adlp_combo_phy_trans_edp_up_to_hbr2 = {
+ .entries = _adlp_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_edp_hbr2),
+};
+
+static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_trans_dp_hbr[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { .dkl = { 0x7, 0x0, 0x01 } }, /* 0 0 400mV 0 dB */
+ { .dkl = { 0x5, 0x0, 0x06 } }, /* 0 1 400mV 3.5 dB */
+ { .dkl = { 0x2, 0x0, 0x0B } }, /* 0 2 400mV 6 dB */
+ { .dkl = { 0x0, 0x0, 0x17 } }, /* 0 3 400mV 9.5 dB */
+ { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */
+ { .dkl = { 0x2, 0x0, 0x08 } }, /* 1 1 600mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */
+ { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */
+ { .dkl = { 0x0, 0x0, 0x0B } }, /* 2 1 800mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB */
+};
+
+static const struct intel_ddi_buf_trans adlp_dkl_phy_trans_dp_hbr = {
+ .entries = _adlp_dkl_phy_trans_dp_hbr,
+ .num_entries = ARRAY_SIZE(_adlp_dkl_phy_trans_dp_hbr),
+};
+
+static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_trans_dp_hbr2_hbr3[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */
+ { .dkl = { 0x5, 0x0, 0x04 } }, /* 0 1 400mV 3.5 dB */
+ { .dkl = { 0x2, 0x0, 0x0A } }, /* 0 2 400mV 6 dB */
+ { .dkl = { 0x0, 0x0, 0x18 } }, /* 0 3 400mV 9.5 dB */
+ { .dkl = { 0x5, 0x0, 0x00 } }, /* 1 0 600mV 0 dB */
+ { .dkl = { 0x2, 0x0, 0x06 } }, /* 1 1 600mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x14 } }, /* 1 2 600mV 6 dB */
+ { .dkl = { 0x2, 0x0, 0x00 } }, /* 2 0 800mV 0 dB */
+ { .dkl = { 0x0, 0x0, 0x09 } }, /* 2 1 800mV 3.5 dB */
+ { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB */
+};
+
+static const struct intel_ddi_buf_trans adlp_dkl_phy_trans_dp_hbr2_hbr3 = {
+ .entries = _adlp_dkl_phy_trans_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_adlp_dkl_phy_trans_dp_hbr2_hbr3),
+};
+
+static const union intel_ddi_buf_trans_entry _dg2_snps_trans[] = {
+ { .snps = { 25, 0, 0 } }, /* VS 0, pre-emph 0 */
+ { .snps = { 32, 0, 6 } }, /* VS 0, pre-emph 1 */
+ { .snps = { 35, 0, 10 } }, /* VS 0, pre-emph 2 */
+ { .snps = { 43, 0, 17 } }, /* VS 0, pre-emph 3 */
+ { .snps = { 35, 0, 0 } }, /* VS 1, pre-emph 0 */
+ { .snps = { 45, 0, 8 } }, /* VS 1, pre-emph 1 */
+ { .snps = { 48, 0, 14 } }, /* VS 1, pre-emph 2 */
+ { .snps = { 47, 0, 0 } }, /* VS 2, pre-emph 0 */
+ { .snps = { 55, 0, 7 } }, /* VS 2, pre-emph 1 */
+ { .snps = { 62, 0, 0 } }, /* VS 3, pre-emph 0 */
+};
+
+static const struct intel_ddi_buf_trans dg2_snps_trans = {
+ .entries = _dg2_snps_trans,
+ .num_entries = ARRAY_SIZE(_dg2_snps_trans),
+ .hdmi_default_entry = ARRAY_SIZE(_dg2_snps_trans) - 1,
+};
+
+static const union intel_ddi_buf_trans_entry _dg2_snps_trans_uhbr[] = {
+ { .snps = { 62, 0, 0 } }, /* preset 0 */
+ { .snps = { 55, 0, 7 } }, /* preset 1 */
+ { .snps = { 50, 0, 12 } }, /* preset 2 */
+ { .snps = { 44, 0, 18 } }, /* preset 3 */
+ { .snps = { 35, 0, 21 } }, /* preset 4 */
+ { .snps = { 59, 3, 0 } }, /* preset 5 */
+ { .snps = { 53, 3, 6 } }, /* preset 6 */
+ { .snps = { 48, 3, 11 } }, /* preset 7 */
+ { .snps = { 42, 5, 15 } }, /* preset 8 */
+ { .snps = { 37, 5, 20 } }, /* preset 9 */
+ { .snps = { 56, 6, 0 } }, /* preset 10 */
+ { .snps = { 48, 7, 7 } }, /* preset 11 */
+ { .snps = { 45, 7, 10 } }, /* preset 12 */
+ { .snps = { 39, 8, 15 } }, /* preset 13 */
+ { .snps = { 48, 14, 0 } }, /* preset 14 */
+ { .snps = { 45, 4, 4 } }, /* preset 15 */
+};
+
+static const struct intel_ddi_buf_trans dg2_snps_trans_uhbr = {
+ .entries = _dg2_snps_trans_uhbr,
+ .num_entries = ARRAY_SIZE(_dg2_snps_trans_uhbr),
+};
+
+bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
+{
+ return table == &tgl_combo_phy_trans_edp_hbr2_hobl;
+}
+
+static bool use_edp_hobl(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ return connector->panel.vbt.edp.hobl && !intel_dp->hobl_failed;
+}
+
+static bool use_edp_low_vswing(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ return connector->panel.vbt.edp.low_vswing;
+}
+
+static const struct intel_ddi_buf_trans *
+intel_get_buf_trans(const struct intel_ddi_buf_trans *trans, int *num_entries)
+{
+ *num_entries = trans->num_entries;
+ return trans;
+}
+
+static const struct intel_ddi_buf_trans *
+hsw_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ return intel_get_buf_trans(&hsw_trans_fdi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&hsw_trans_hdmi, n_entries);
+ else
+ return intel_get_buf_trans(&hsw_trans_dp, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+bdw_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ return intel_get_buf_trans(&bdw_trans_fdi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&bdw_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ use_edp_low_vswing(encoder))
+ return intel_get_buf_trans(&bdw_trans_edp, n_entries);
+ else
+ return intel_get_buf_trans(&bdw_trans_dp, n_entries);
+}
+
+static int skl_buf_trans_num_entries(enum port port, int n_entries)
+{
+ /* Only DDIA and DDIE can select the 10th register with DP */
+ if (port == PORT_A || port == PORT_E)
+ return min(n_entries, 10);
+ else
+ return min(n_entries, 9);
+}
+
+static const struct intel_ddi_buf_trans *
+_skl_get_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_ddi_buf_trans *trans,
+ int *n_entries)
+{
+ trans = intel_get_buf_trans(trans, n_entries);
+ *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
+ return trans;
+}
+
+static const struct intel_ddi_buf_trans *
+skl_y_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ use_edp_low_vswing(encoder))
+ return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries);
+ else
+ return _skl_get_buf_trans_dp(encoder, &skl_y_trans_dp, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+skl_u_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ use_edp_low_vswing(encoder))
+ return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries);
+ else
+ return _skl_get_buf_trans_dp(encoder, &skl_u_trans_dp, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+skl_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ use_edp_low_vswing(encoder))
+ return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries);
+ else
+ return _skl_get_buf_trans_dp(encoder, &skl_trans_dp, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+kbl_y_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ use_edp_low_vswing(encoder))
+ return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries);
+ else
+ return _skl_get_buf_trans_dp(encoder, &kbl_y_trans_dp, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+kbl_u_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ use_edp_low_vswing(encoder))
+ return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries);
+ else
+ return _skl_get_buf_trans_dp(encoder, &kbl_u_trans_dp, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+kbl_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ use_edp_low_vswing(encoder))
+ return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries);
+ else
+ return _skl_get_buf_trans_dp(encoder, &kbl_trans_dp, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+bxt_get_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&bxt_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ use_edp_low_vswing(encoder))
+ return intel_get_buf_trans(&bxt_trans_edp, n_entries);
+ else
+ return intel_get_buf_trans(&bxt_trans_dp, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+icl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
+ n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 540000) {
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
+ n_entries);
+ } else if (use_edp_low_vswing(encoder)) {
+ return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
+ n_entries);
+ }
+
+ return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+icl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return icl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+icl_get_mg_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000) {
+ return intel_get_buf_trans(&icl_mg_phy_trans_hbr2_hbr3,
+ n_entries);
+ } else {
+ return intel_get_buf_trans(&icl_mg_phy_trans_rbr_hbr,
+ n_entries);
+ }
+}
+
+static const struct intel_ddi_buf_trans *
+icl_get_mg_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&icl_mg_phy_trans_hdmi, n_entries);
+ else
+ return icl_get_mg_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+ehl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000)
+ return intel_get_buf_trans(&ehl_combo_phy_trans_edp_hbr2, n_entries);
+ else
+ return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+ehl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ use_edp_low_vswing(encoder))
+ return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return intel_get_buf_trans(&ehl_combo_phy_trans_dp, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+jsl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000)
+ return intel_get_buf_trans(&jsl_combo_phy_trans_edp_hbr2, n_entries);
+ else
+ return intel_get_buf_trans(&jsl_combo_phy_trans_edp_hbr, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+jsl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ use_edp_low_vswing(encoder))
+ return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (crtc_state->port_clock > 270000) {
+ if (IS_TGL_UY(dev_priv)) {
+ return intel_get_buf_trans(&tgl_uy_combo_phy_trans_dp_hbr2,
+ n_entries);
+ } else {
+ return intel_get_buf_trans(&tgl_combo_phy_trans_dp_hbr2,
+ n_entries);
+ }
+ } else {
+ return intel_get_buf_trans(&tgl_combo_phy_trans_dp_hbr,
+ n_entries);
+ }
+}
+
+static const struct intel_ddi_buf_trans *
+tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 540000) {
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
+ n_entries);
+ } else if (use_edp_hobl(encoder)) {
+ return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
+ n_entries);
+ } else if (use_edp_low_vswing(encoder)) {
+ return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
+ n_entries);
+ }
+
+ return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+tgl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return tgl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return tgl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+dg1_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000)
+ return intel_get_buf_trans(&dg1_combo_phy_trans_dp_hbr2_hbr3,
+ n_entries);
+ else
+ return intel_get_buf_trans(&dg1_combo_phy_trans_dp_rbr_hbr,
+ n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+dg1_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 540000)
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
+ n_entries);
+ else if (use_edp_hobl(encoder))
+ return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
+ n_entries);
+ else if (use_edp_low_vswing(encoder))
+ return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
+ n_entries);
+ else
+ return dg1_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+dg1_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return dg1_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return dg1_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+rkl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000)
+ return intel_get_buf_trans(&rkl_combo_phy_trans_dp_hbr2_hbr3, n_entries);
+ else
+ return intel_get_buf_trans(&rkl_combo_phy_trans_dp_hbr, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+rkl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 540000) {
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
+ n_entries);
+ } else if (use_edp_hobl(encoder)) {
+ return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
+ n_entries);
+ } else if (use_edp_low_vswing(encoder)) {
+ return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
+ n_entries);
+ }
+
+ return rkl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+rkl_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return rkl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return rkl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+adls_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000)
+ return intel_get_buf_trans(&adls_combo_phy_trans_dp_hbr2_hbr3, n_entries);
+ else
+ return intel_get_buf_trans(&tgl_combo_phy_trans_dp_hbr, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+adls_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 540000)
+ return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr3, n_entries);
+ else if (use_edp_hobl(encoder))
+ return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries);
+ else if (use_edp_low_vswing(encoder))
+ return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr2, n_entries);
+ else
+ return adls_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+adls_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return adls_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return adls_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+adlp_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000)
+ return intel_get_buf_trans(&adlp_combo_phy_trans_dp_hbr2_hbr3, n_entries);
+ else
+ return intel_get_buf_trans(&adlp_combo_phy_trans_dp_hbr, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+adlp_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 540000) {
+ return intel_get_buf_trans(&adlp_combo_phy_trans_edp_hbr3,
+ n_entries);
+ } else if (use_edp_hobl(encoder)) {
+ return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
+ n_entries);
+ } else if (use_edp_low_vswing(encoder)) {
+ return intel_get_buf_trans(&adlp_combo_phy_trans_edp_up_to_hbr2,
+ n_entries);
+ }
+
+ return adlp_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+adlp_get_combo_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return adlp_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+ else
+ return adlp_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+tgl_get_dkl_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000) {
+ return intel_get_buf_trans(&tgl_dkl_phy_trans_dp_hbr2,
+ n_entries);
+ } else {
+ return intel_get_buf_trans(&tgl_dkl_phy_trans_dp_hbr,
+ n_entries);
+ }
+}
+
+static const struct intel_ddi_buf_trans *
+tgl_get_dkl_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&tgl_dkl_phy_trans_hdmi, n_entries);
+ else
+ return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+adlp_get_dkl_buf_trans_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (crtc_state->port_clock > 270000) {
+ return intel_get_buf_trans(&adlp_dkl_phy_trans_dp_hbr2_hbr3,
+ n_entries);
+ } else {
+ return intel_get_buf_trans(&adlp_dkl_phy_trans_dp_hbr,
+ n_entries);
+ }
+}
+
+static const struct intel_ddi_buf_trans *
+adlp_get_dkl_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return intel_get_buf_trans(&tgl_dkl_phy_trans_hdmi, n_entries);
+ else
+ return adlp_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+dg2_get_snps_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ intel_dp_is_uhbr(crtc_state))
+ return intel_get_buf_trans(&dg2_snps_trans_uhbr, n_entries);
+ else
+ return intel_get_buf_trans(&dg2_snps_trans, n_entries);
+}
+
+void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (IS_DG2(i915)) {
+ encoder->get_buf_trans = dg2_get_snps_buf_trans;
+ } else if (IS_ALDERLAKE_P(i915)) {
+ if (intel_phy_is_combo(i915, phy))
+ encoder->get_buf_trans = adlp_get_combo_buf_trans;
+ else
+ encoder->get_buf_trans = adlp_get_dkl_buf_trans;
+ } else if (IS_ALDERLAKE_S(i915)) {
+ encoder->get_buf_trans = adls_get_combo_buf_trans;
+ } else if (IS_ROCKETLAKE(i915)) {
+ encoder->get_buf_trans = rkl_get_combo_buf_trans;
+ } else if (IS_DG1(i915)) {
+ encoder->get_buf_trans = dg1_get_combo_buf_trans;
+ } else if (DISPLAY_VER(i915) >= 12) {
+ if (intel_phy_is_combo(i915, phy))
+ encoder->get_buf_trans = tgl_get_combo_buf_trans;
+ else
+ encoder->get_buf_trans = tgl_get_dkl_buf_trans;
+ } else if (DISPLAY_VER(i915) == 11) {
+ if (IS_PLATFORM(i915, INTEL_JASPERLAKE))
+ encoder->get_buf_trans = jsl_get_combo_buf_trans;
+ else if (IS_PLATFORM(i915, INTEL_ELKHARTLAKE))
+ encoder->get_buf_trans = ehl_get_combo_buf_trans;
+ else if (intel_phy_is_combo(i915, phy))
+ encoder->get_buf_trans = icl_get_combo_buf_trans;
+ else
+ encoder->get_buf_trans = icl_get_mg_buf_trans;
+ } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ encoder->get_buf_trans = bxt_get_buf_trans;
+ } else if (IS_CML_ULX(i915) || IS_CFL_ULX(i915) || IS_KBL_ULX(i915)) {
+ encoder->get_buf_trans = kbl_y_get_buf_trans;
+ } else if (IS_CML_ULT(i915) || IS_CFL_ULT(i915) || IS_KBL_ULT(i915)) {
+ encoder->get_buf_trans = kbl_u_get_buf_trans;
+ } else if (IS_COMETLAKE(i915) || IS_COFFEELAKE(i915) || IS_KABYLAKE(i915)) {
+ encoder->get_buf_trans = kbl_get_buf_trans;
+ } else if (IS_SKL_ULX(i915)) {
+ encoder->get_buf_trans = skl_y_get_buf_trans;
+ } else if (IS_SKL_ULT(i915)) {
+ encoder->get_buf_trans = skl_u_get_buf_trans;
+ } else if (IS_SKYLAKE(i915)) {
+ encoder->get_buf_trans = skl_get_buf_trans;
+ } else if (IS_BROADWELL(i915)) {
+ encoder->get_buf_trans = bdw_get_buf_trans;
+ } else if (IS_HASWELL(i915)) {
+ encoder->get_buf_trans = hsw_get_buf_trans;
+ } else {
+ MISSING_CASE(INTEL_INFO(i915)->platform);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
new file mode 100644
index 000000000..2133984a5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_DDI_BUF_TRANS_H_
+#define _INTEL_DDI_BUF_TRANS_H_
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_encoder;
+struct intel_crtc_state;
+
+struct hsw_ddi_buf_trans {
+ u32 trans1; /* balance leg enable, de-emph level */
+ u32 trans2; /* vref sel, vswing */
+ u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
+};
+
+struct bxt_ddi_buf_trans {
+ u8 margin; /* swing value */
+ u8 scale; /* scale value */
+ u8 enable; /* scale enable */
+ u8 deemphasis;
+};
+
+struct icl_ddi_buf_trans {
+ u8 dw2_swing_sel;
+ u8 dw7_n_scalar;
+ u8 dw4_cursor_coeff;
+ u8 dw4_post_cursor_2;
+ u8 dw4_post_cursor_1;
+};
+
+struct icl_mg_phy_ddi_buf_trans {
+ u8 cri_txdeemph_override_11_6;
+ u8 cri_txdeemph_override_5_0;
+ u8 cri_txdeemph_override_17_12;
+};
+
+struct tgl_dkl_phy_ddi_buf_trans {
+ u8 vswing;
+ u8 preshoot;
+ u8 de_emphasis;
+};
+
+struct dg2_snps_phy_buf_trans {
+ u8 vswing;
+ u8 pre_cursor;
+ u8 post_cursor;
+};
+
+union intel_ddi_buf_trans_entry {
+ struct hsw_ddi_buf_trans hsw;
+ struct bxt_ddi_buf_trans bxt;
+ struct icl_ddi_buf_trans icl;
+ struct icl_mg_phy_ddi_buf_trans mg;
+ struct tgl_dkl_phy_ddi_buf_trans dkl;
+ struct dg2_snps_phy_buf_trans snps;
+};
+
+struct intel_ddi_buf_trans {
+ const union intel_ddi_buf_trans_entry *entries;
+ u8 num_entries;
+ u8 hdmi_default_entry;
+};
+
+bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table);
+
+void intel_ddi_buf_trans_init(struct intel_encoder *encoder);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
new file mode 100644
index 000000000..9c104f65e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DE_H__
+#define __INTEL_DE_H__
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_uncore.h"
+
+static inline u32
+intel_de_read(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ return intel_uncore_read(&i915->uncore, reg);
+}
+
+static inline void
+intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ intel_uncore_posting_read(&i915->uncore, reg);
+}
+
+static inline void
+intel_de_write(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write(&i915->uncore, reg, val);
+}
+
+static inline void
+intel_de_rmw(struct drm_i915_private *i915, i915_reg_t reg, u32 clear, u32 set)
+{
+ intel_uncore_rmw(&i915->uncore, reg, clear, set);
+}
+
+static inline int
+intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
+{
+ return intel_wait_for_register(&i915->uncore, reg, mask, value, timeout);
+}
+
+static inline int
+intel_de_wait_for_set(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
+{
+ return intel_de_wait_for_register(i915, reg, mask, mask, timeout);
+}
+
+static inline int
+intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
+{
+ return intel_de_wait_for_register(i915, reg, mask, 0, timeout);
+}
+
+/*
+ * Unlocked mmio-accessors, think carefully before using these.
+ *
+ * Certain architectures will die if the same cacheline is concurrently accessed
+ * by different clients (e.g. on Ivybridge). Access to registers should
+ * therefore generally be serialised, by either the dev_priv->uncore.lock or
+ * a more localised lock guarding all access to that bank of registers.
+ */
+static inline u32
+intel_de_read_fw(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ u32 val;
+
+ val = intel_uncore_read_fw(&i915->uncore, reg);
+ trace_i915_reg_rw(false, reg, val, sizeof(val), true);
+
+ return val;
+}
+
+static inline void
+intel_de_write_fw(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+{
+ trace_i915_reg_rw(true, reg, val, sizeof(val), true);
+ intel_uncore_write_fw(&i915->uncore, reg, val);
+}
+
+#endif /* __INTEL_DE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
new file mode 100644
index 000000000..fb8d1d634
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -0,0 +1,9140 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <acpi/video.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-resv.h>
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+#include <linux/vga_switcheroo.h>
+
+#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_privacy_screen_consumer.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_rect.h>
+
+#include "display/intel_audio.h"
+#include "display/intel_crt.h"
+#include "display/intel_ddi.h"
+#include "display/intel_display_debugfs.h"
+#include "display/intel_display_power.h"
+#include "display/intel_dp.h"
+#include "display/intel_dp_mst.h"
+#include "display/intel_dpll.h"
+#include "display/intel_dpll_mgr.h"
+#include "display/intel_drrs.h"
+#include "display/intel_dsi.h"
+#include "display/intel_dvo.h"
+#include "display/intel_fb.h"
+#include "display/intel_gmbus.h"
+#include "display/intel_hdmi.h"
+#include "display/intel_lvds.h"
+#include "display/intel_sdvo.h"
+#include "display/intel_snps_phy.h"
+#include "display/intel_tv.h"
+#include "display/intel_vdsc.h"
+#include "display/intel_vrr.h"
+
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_object.h"
+
+#include "gt/gen8_ppgtt.h"
+
+#include "g4x_dp.h"
+#include "g4x_hdmi.h"
+#include "hsw_ips.h"
+#include "i915_drv.h"
+#include "i915_utils.h"
+#include "icl_dsi.h"
+#include "intel_acpi.h"
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_bw.h"
+#include "intel_cdclk.h"
+#include "intel_color.h"
+#include "intel_crtc.h"
+#include "intel_crtc_state_dump.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dmc.h"
+#include "intel_dp_link_training.h"
+#include "intel_dpt.h"
+#include "intel_dsb.h"
+#include "intel_fbc.h"
+#include "intel_fbdev.h"
+#include "intel_fdi.h"
+#include "intel_fifo_underrun.h"
+#include "intel_frontbuffer.h"
+#include "intel_hdcp.h"
+#include "intel_hotplug.h"
+#include "intel_modeset_verify.h"
+#include "intel_modeset_setup.h"
+#include "intel_overlay.h"
+#include "intel_panel.h"
+#include "intel_pch_display.h"
+#include "intel_pch_refclk.h"
+#include "intel_pcode.h"
+#include "intel_pipe_crc.h"
+#include "intel_plane_initial.h"
+#include "intel_pm.h"
+#include "intel_pps.h"
+#include "intel_psr.h"
+#include "intel_quirks.h"
+#include "intel_sprite.h"
+#include "intel_tc.h"
+#include "intel_vga.h"
+#include "i9xx_plane.h"
+#include "skl_scaler.h"
+#include "skl_universal_plane.h"
+#include "skl_watermark.h"
+#include "vlv_dsi.h"
+#include "vlv_dsi_pll.h"
+#include "vlv_dsi_regs.h"
+#include "vlv_sideband.h"
+
+static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
+static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
+static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
+static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
+static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ * @dev_priv: i915 device
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+void intel_update_watermarks(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->display.funcs.wm->update_wm)
+ dev_priv->display.funcs.wm->update_wm(dev_priv);
+}
+
+static int intel_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->display.funcs.wm->compute_pipe_wm)
+ return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc);
+ return 0;
+}
+
+static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (!dev_priv->display.funcs.wm->compute_intermediate_wm)
+ return 0;
+ if (drm_WARN_ON(&dev_priv->drm,
+ !dev_priv->display.funcs.wm->compute_pipe_wm))
+ return 0;
+ return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc);
+}
+
+static bool intel_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->display.funcs.wm->initial_watermarks) {
+ dev_priv->display.funcs.wm->initial_watermarks(state, crtc);
+ return true;
+ }
+ return false;
+}
+
+static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->display.funcs.wm->atomic_update_watermarks)
+ dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc);
+}
+
+static void intel_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->display.funcs.wm->optimize_watermarks)
+ dev_priv->display.funcs.wm->optimize_watermarks(state, crtc);
+}
+
+static int intel_compute_global_watermarks(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->display.funcs.wm->compute_global_watermarks)
+ return dev_priv->display.funcs.wm->compute_global_watermarks(state);
+ return 0;
+}
+
+/* returns HPLL frequency in kHz */
+int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
+{
+ int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
+
+ /* Obtain SKU information */
+ hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
+ CCK_FUSE_HPLL_FREQ_MASK;
+
+ return vco_freq[hpll_freq] * 1000;
+}
+
+int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg, int ref_freq)
+{
+ u32 val;
+ int divider;
+
+ val = vlv_cck_read(dev_priv, reg);
+ divider = val & CCK_FREQUENCY_VALUES;
+
+ drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
+ (divider << CCK_FREQUENCY_STATUS_SHIFT),
+ "%s change in progress\n", name);
+
+ return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
+}
+
+int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg)
+{
+ int hpll;
+
+ vlv_cck_get(dev_priv);
+
+ if (dev_priv->hpll_freq == 0)
+ dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
+
+ hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
+
+ vlv_cck_put(dev_priv);
+
+ return hpll;
+}
+
+static void intel_update_czclk(struct drm_i915_private *dev_priv)
+{
+ if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
+ return;
+
+ dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
+ CCK_CZ_CLOCK_CONTROL);
+
+ drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
+ dev_priv->czclk_freq);
+}
+
+static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
+{
+ return (crtc_state->active_planes &
+ ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
+}
+
+/* WA Display #0827: Gen9:all */
+static void
+skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
+{
+ if (enable)
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
+ else
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
+}
+
+/* Wa_2006604312:icl,ehl */
+static void
+icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool enable)
+{
+ if (enable)
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
+ else
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
+}
+
+/* Wa_1604331009:icl,jsl,ehl */
+static void
+icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool enable)
+{
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
+ enable ? CURSOR_GATING_DIS : 0);
+}
+
+static bool
+is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->master_transcoder != INVALID_TRANSCODER;
+}
+
+static bool
+is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->sync_mode_slaves_mask != 0;
+}
+
+bool
+is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
+{
+ return is_trans_port_sync_master(crtc_state) ||
+ is_trans_port_sync_slave(crtc_state);
+}
+
+static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state)
+{
+ return ffs(crtc_state->bigjoiner_pipes) - 1;
+}
+
+u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->bigjoiner_pipes)
+ return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state));
+ else
+ return 0;
+}
+
+bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ return crtc_state->bigjoiner_pipes &&
+ crtc->pipe != bigjoiner_master_pipe(crtc_state);
+}
+
+bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ return crtc_state->bigjoiner_pipes &&
+ crtc->pipe == bigjoiner_master_pipe(crtc_state);
+}
+
+static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state)
+{
+ return hweight8(crtc_state->bigjoiner_pipes);
+}
+
+struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
+ return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state));
+ else
+ return to_intel_crtc(crtc_state->uapi.crtc);
+}
+
+static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ i915_reg_t reg = PIPEDSL(pipe);
+ u32 line1, line2;
+
+ line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
+ msleep(5);
+ line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
+
+ return line1 != line2;
+}
+
+static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /* Wait for the display line to settle/start moving */
+ if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
+ drm_err(&dev_priv->drm,
+ "pipe %c scanline %s wait timed out\n",
+ pipe_name(pipe), str_on_off(state));
+}
+
+static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
+{
+ wait_for_pipe_scanline_moving(crtc, false);
+}
+
+static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
+{
+ wait_for_pipe_scanline_moving(crtc, true);
+}
+
+static void
+intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (DISPLAY_VER(dev_priv) >= 4) {
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+
+ /* Wait for the Pipe State to go off */
+ if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder),
+ PIPECONF_STATE_ENABLE, 100))
+ drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
+ } else {
+ intel_wait_for_pipe_scanline_stopped(crtc);
+ }
+}
+
+void assert_transcoder(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, bool state)
+{
+ bool cur_state;
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+
+ /* we keep both pipes enabled on 830 */
+ if (IS_I830(dev_priv))
+ state = true;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (wakeref) {
+ u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
+ cur_state = !!(val & PIPECONF_ENABLE);
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+ } else {
+ cur_state = false;
+ }
+
+ I915_STATE_WARN(cur_state != state,
+ "transcoder %s assertion failure (expected %s, current %s)\n",
+ transcoder_name(cpu_transcoder),
+ str_on_off(state), str_on_off(cur_state));
+}
+
+static void assert_plane(struct intel_plane *plane, bool state)
+{
+ enum pipe pipe;
+ bool cur_state;
+
+ cur_state = plane->get_hw_state(plane, &pipe);
+
+ I915_STATE_WARN(cur_state != state,
+ "%s assertion failure (expected %s, current %s)\n",
+ plane->base.name, str_on_off(state),
+ str_on_off(cur_state));
+}
+
+#define assert_plane_enabled(p) assert_plane(p, true)
+#define assert_plane_disabled(p) assert_plane(p, false)
+
+static void assert_planes_disabled(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+ assert_plane_disabled(plane);
+}
+
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dig_port,
+ unsigned int expected_mask)
+{
+ u32 port_mask;
+ i915_reg_t dpll_reg;
+
+ switch (dig_port->base.port) {
+ default:
+ MISSING_CASE(dig_port->base.port);
+ fallthrough;
+ case PORT_B:
+ port_mask = DPLL_PORTB_READY_MASK;
+ dpll_reg = DPLL(0);
+ break;
+ case PORT_C:
+ port_mask = DPLL_PORTC_READY_MASK;
+ dpll_reg = DPLL(0);
+ expected_mask <<= 4;
+ break;
+ case PORT_D:
+ port_mask = DPLL_PORTD_READY_MASK;
+ dpll_reg = DPIO_PHY_STATUS;
+ break;
+ }
+
+ if (intel_de_wait_for_register(dev_priv, dpll_reg,
+ port_mask, expected_mask, 1000))
+ drm_WARN(&dev_priv->drm, 1,
+ "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ intel_de_read(dev_priv, dpll_reg) & port_mask,
+ expected_mask);
+}
+
+void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 val;
+
+ drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
+
+ assert_planes_disabled(crtc);
+
+ /*
+ * A pipe without a PLL won't actually be able to drive bits from
+ * a plane. On ILK+ the pipe PLLs are integrated, so we don't
+ * need the check.
+ */
+ if (HAS_GMCH(dev_priv)) {
+ if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
+ assert_dsi_pll_enabled(dev_priv);
+ else
+ assert_pll_enabled(dev_priv, pipe);
+ } else {
+ if (new_crtc_state->has_pch_encoder) {
+ /* if driving the PCH, we need FDI enabled */
+ assert_fdi_rx_pll_enabled(dev_priv,
+ intel_crtc_pch_transcoder(crtc));
+ assert_fdi_tx_pll_enabled(dev_priv,
+ (enum pipe) cpu_transcoder);
+ }
+ /* FIXME: assert CPU port conditions for SNB+ */
+ }
+
+ /* Wa_22012358565:adl-p */
+ if (DISPLAY_VER(dev_priv) == 13)
+ intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
+ 0, PIPE_ARB_USE_PROG_SLOTS);
+
+ reg = PIPECONF(cpu_transcoder);
+ val = intel_de_read(dev_priv, reg);
+ if (val & PIPECONF_ENABLE) {
+ /* we keep both pipes enabled on 830 */
+ drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
+ return;
+ }
+
+ intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
+ intel_de_posting_read(dev_priv, reg);
+
+ /*
+ * Until the pipe starts PIPEDSL reads will return a stale value,
+ * which causes an apparent vblank timestamp jump when PIPEDSL
+ * resets to its proper value. That also messes up the frame count
+ * when it's derived from the timestamps. So let's wait for the
+ * pipe to start properly before we call drm_crtc_vblank_on()
+ */
+ if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
+ intel_wait_for_pipe_scanline_moving(crtc);
+}
+
+void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 val;
+
+ drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
+
+ /*
+ * Make sure planes won't keep trying to pump pixels to us,
+ * or we might hang the display.
+ */
+ assert_planes_disabled(crtc);
+
+ reg = PIPECONF(cpu_transcoder);
+ val = intel_de_read(dev_priv, reg);
+ if ((val & PIPECONF_ENABLE) == 0)
+ return;
+
+ /*
+ * Double wide has implications for planes
+ * so best keep it disabled when not needed.
+ */
+ if (old_crtc_state->double_wide)
+ val &= ~PIPECONF_DOUBLE_WIDE;
+
+ /* Don't disable pipe or pipe PLLs if needed */
+ if (!IS_I830(dev_priv))
+ val &= ~PIPECONF_ENABLE;
+
+ if (DISPLAY_VER(dev_priv) >= 14)
+ intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
+ FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
+ FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
+
+ intel_de_write(dev_priv, reg, val);
+ if ((val & PIPECONF_ENABLE) == 0)
+ intel_wait_for_pipe_off(old_crtc_state);
+}
+
+unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
+{
+ unsigned int size = 0;
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
+ size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
+
+ return size;
+}
+
+unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
+{
+ unsigned int size = 0;
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
+ unsigned int plane_size;
+
+ if (rem_info->plane[i].linear)
+ plane_size = rem_info->plane[i].size;
+ else
+ plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
+
+ if (plane_size == 0)
+ continue;
+
+ if (rem_info->plane_alignment)
+ size = ALIGN(size, rem_info->plane_alignment);
+
+ size += plane_size;
+ }
+
+ return size;
+}
+
+bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+ return DISPLAY_VER(dev_priv) < 4 ||
+ (plane->fbc &&
+ plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
+}
+
+/*
+ * Convert the x/y offsets into a linear offset.
+ * Only valid with 0/180 degree rotation, which is fine since linear
+ * offset is only used with linear buffers on pre-hsw and tiled buffers
+ * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
+ */
+u32 intel_fb_xy_to_linear(int x, int y,
+ const struct intel_plane_state *state,
+ int color_plane)
+{
+ const struct drm_framebuffer *fb = state->hw.fb;
+ unsigned int cpp = fb->format->cpp[color_plane];
+ unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
+
+ return y * pitch + x * cpp;
+}
+
+/*
+ * Add the x/y offsets derived from fb->offsets[] to the user
+ * specified plane src x/y offsets. The resulting x/y offsets
+ * specify the start of scanout from the beginning of the gtt mapping.
+ */
+void intel_add_fb_offsets(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane)
+
+{
+ *x += state->view.color_plane[color_plane].x;
+ *y += state->view.color_plane[color_plane].y;
+}
+
+u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
+ u32 pixel_format, u64 modifier)
+{
+ struct intel_crtc *crtc;
+ struct intel_plane *plane;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return 0;
+
+ /*
+ * We assume the primary plane for pipe A has
+ * the highest stride limits of them all,
+ * if in case pipe A is disabled, use the first pipe from pipe_mask.
+ */
+ crtc = intel_first_crtc(dev_priv);
+ if (!crtc)
+ return 0;
+
+ plane = to_intel_plane(crtc->base.primary);
+
+ return plane->max_stride(plane, pixel_format, modifier,
+ DRM_MODE_ROTATE_0);
+}
+
+void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state,
+ bool visible)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+
+ plane_state->uapi.visible = visible;
+
+ if (visible)
+ crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
+ else
+ crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
+}
+
+void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct drm_plane *plane;
+
+ /*
+ * Active_planes aliases if multiple "primary" or cursor planes
+ * have been used on the same (or wrong) pipe. plane_mask uses
+ * unique ids, hence we can use that to reconstruct active_planes.
+ */
+ crtc_state->enabled_planes = 0;
+ crtc_state->active_planes = 0;
+
+ drm_for_each_plane_mask(plane, &dev_priv->drm,
+ crtc_state->uapi.plane_mask) {
+ crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
+ crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
+ }
+}
+
+void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
+ plane->base.base.id, plane->base.name,
+ crtc->base.base.id, crtc->base.name);
+
+ intel_set_plane_visible(crtc_state, plane_state, false);
+ intel_plane_fixup_bitmasks(crtc_state);
+ crtc_state->data_rate[plane->id] = 0;
+ crtc_state->data_rate_y[plane->id] = 0;
+ crtc_state->rel_data_rate[plane->id] = 0;
+ crtc_state->rel_data_rate_y[plane->id] = 0;
+ crtc_state->min_cdclk[plane->id] = 0;
+
+ if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
+ hsw_ips_disable(crtc_state)) {
+ crtc_state->ips_enabled = false;
+ intel_crtc_wait_for_next_vblank(crtc);
+ }
+
+ /*
+ * Vblank time updates from the shadow to live plane control register
+ * are blocked if the memory self-refresh mode is active at that
+ * moment. So to make sure the plane gets truly disabled, disable
+ * first the self-refresh mode. The self-refresh enable bit in turn
+ * will be checked/applied by the HW only at the next frame start
+ * event which is after the vblank start event, so we need to have a
+ * wait-for-vblank between disabling the plane and the pipe.
+ */
+ if (HAS_GMCH(dev_priv) &&
+ intel_set_memory_cxsr(dev_priv, false))
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So disable underrun reporting before all the planes get disabled.
+ */
+ if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+
+ intel_plane_disable_arm(plane, crtc_state);
+ intel_crtc_wait_for_next_vblank(crtc);
+}
+
+unsigned int
+intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
+{
+ int x = 0, y = 0;
+
+ intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
+ plane_state->view.color_plane[0].offset, 0);
+
+ return y;
+}
+
+static int
+__intel_display_resume(struct drm_i915_private *i915,
+ struct drm_atomic_state *state,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int i, ret;
+
+ intel_modeset_setup_hw_state(i915, ctx);
+ intel_vga_redisable(i915);
+
+ if (!state)
+ return 0;
+
+ /*
+ * We've duplicated the state, pointers to the old state are invalid.
+ *
+ * Don't attempt to use the old state until we commit the duplicated state.
+ */
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ /*
+ * Force recalculation even if we restore
+ * current state. With fast modeset this may not result
+ * in a modeset when the state is compatible.
+ */
+ crtc_state->mode_changed = true;
+ }
+
+ /* ignore any reset values/BIOS leftovers in the WM registers */
+ if (!HAS_GMCH(i915))
+ to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
+ ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
+
+ drm_WARN_ON(&i915->drm, ret == -EDEADLK);
+
+ return ret;
+}
+
+static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
+{
+ return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
+ intel_has_gpu_reset(to_gt(dev_priv)));
+}
+
+void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+ struct drm_atomic_state *state;
+ int ret;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ /* reset doesn't touch the display */
+ if (!dev_priv->params.force_reset_modeset_test &&
+ !gpu_reset_clobbers_display(dev_priv))
+ return;
+
+ /* We have a modeset vs reset deadlock, defensively unbreak it. */
+ set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
+
+ if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Modeset potentially stuck, unbreaking through wedging\n");
+ intel_gt_set_wedged(to_gt(dev_priv));
+ }
+
+ /*
+ * Need mode_config.mutex so that we don't
+ * trample ongoing ->detect() and whatnot.
+ */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_acquire_init(ctx, 0);
+ while (1) {
+ ret = drm_modeset_lock_all_ctx(dev, ctx);
+ if (ret != -EDEADLK)
+ break;
+
+ drm_modeset_backoff(ctx);
+ }
+ /*
+ * Disabling the crtcs gracefully seems nicer. Also the
+ * g33 docs say we should at least disable all the planes.
+ */
+ state = drm_atomic_helper_duplicate_state(dev, ctx);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
+ drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
+ ret);
+ return;
+ }
+
+ ret = drm_atomic_helper_disable_all(dev, ctx);
+ if (ret) {
+ drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
+ ret);
+ drm_atomic_state_put(state);
+ return;
+ }
+
+ dev_priv->modeset_restore_state = state;
+ state->acquire_ctx = ctx;
+}
+
+void intel_display_finish_reset(struct drm_i915_private *i915)
+{
+ struct drm_modeset_acquire_ctx *ctx = &i915->reset_ctx;
+ struct drm_atomic_state *state;
+ int ret;
+
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ /* reset doesn't touch the display */
+ if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
+ return;
+
+ state = fetch_and_zero(&i915->modeset_restore_state);
+ if (!state)
+ goto unlock;
+
+ /* reset doesn't touch the display */
+ if (!gpu_reset_clobbers_display(i915)) {
+ /* for testing only restore the display */
+ ret = __intel_display_resume(i915, state, ctx);
+ if (ret)
+ drm_err(&i915->drm,
+ "Restoring old state failed with %i\n", ret);
+ } else {
+ /*
+ * The display has been reset as well,
+ * so need a full re-initialization.
+ */
+ intel_pps_unlock_regs_wa(i915);
+ intel_modeset_init_hw(i915);
+ intel_init_clock_gating(i915);
+ intel_hpd_init(i915);
+
+ ret = __intel_display_resume(i915, state, ctx);
+ if (ret)
+ drm_err(&i915->drm,
+ "Restoring old state failed with %i\n", ret);
+
+ intel_hpd_poll_disable(i915);
+ }
+
+ drm_atomic_state_put(state);
+unlock:
+ drm_modeset_drop_locks(ctx);
+ drm_modeset_acquire_fini(ctx);
+ mutex_unlock(&i915->drm.mode_config.mutex);
+
+ clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags);
+}
+
+static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
+
+ /*
+ * Display WA #1153: icl
+ * enable hardware to bypass the alpha math
+ * and rounding for per-pixel values 00 and 0xff
+ */
+ tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
+ /*
+ * Display WA # 1605353570: icl
+ * Set the pixel rounding bit to 1 for allowing
+ * passthrough of Frame buffer pixels unmodified
+ * across pipe
+ */
+ tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
+
+ /*
+ * Underrun recovery must always be disabled on display 13+.
+ * DG2 chicken bit meaning is inverted compared to other platforms.
+ */
+ if (IS_DG2(dev_priv))
+ tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
+ else if (DISPLAY_VER(dev_priv) >= 13)
+ tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
+
+ /* Wa_14010547955:dg2 */
+ if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
+ tmp |= DG2_RENDER_CCSTAG_4_3_EN;
+
+ intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
+}
+
+bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
+{
+ struct drm_crtc *crtc;
+ bool cleanup_done;
+
+ drm_for_each_crtc(crtc, &dev_priv->drm) {
+ struct drm_crtc_commit *commit;
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ cleanup_done = commit ?
+ try_wait_for_completion(&commit->cleanup_done) : true;
+ spin_unlock(&crtc->commit_lock);
+
+ if (cleanup_done)
+ continue;
+
+ intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
+
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Finds the encoder associated with the given CRTC. This can only be
+ * used when we know that the CRTC isn't feeding multiple encoders!
+ */
+struct intel_encoder *
+intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_connector_state *connector_state;
+ const struct drm_connector *connector;
+ struct intel_encoder *encoder = NULL;
+ struct intel_crtc *master_crtc;
+ int num_encoders = 0;
+ int i;
+
+ master_crtc = intel_master_crtc(crtc_state);
+
+ for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+ if (connector_state->crtc != &master_crtc->base)
+ continue;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+ num_encoders++;
+ }
+
+ drm_WARN(state->base.dev, num_encoders != 1,
+ "%d encoders for pipe %c\n",
+ num_encoders, pipe_name(master_crtc->pipe));
+
+ return encoder;
+}
+
+static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ i915_reg_t dslreg = PIPEDSL(pipe);
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, dslreg);
+ udelay(500);
+ if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
+ if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
+ drm_err(&dev_priv->drm,
+ "mode set failed: pipe %c stuck\n",
+ pipe_name(pipe));
+ }
+}
+
+static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
+ enum pipe pipe = crtc->pipe;
+ int width = drm_rect_width(dst);
+ int height = drm_rect_height(dst);
+ int x = dst->x1;
+ int y = dst->y1;
+
+ if (!crtc_state->pch_pfit.enabled)
+ return;
+
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
+ intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
+ PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
+ else
+ intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
+ PF_FILTER_MED_3x3);
+ intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
+ intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
+}
+
+static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
+{
+ if (crtc->overlay)
+ (void) intel_overlay_switch_off(crtc->overlay);
+
+ /* Let userspace switch the overlay on again. In most cases userspace
+ * has to recompute where to put it anyway.
+ */
+}
+
+static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (!crtc_state->nv12_planes)
+ return false;
+
+ /* WA Display #0827: Gen9:all */
+ if (DISPLAY_VER(dev_priv) == 9)
+ return true;
+
+ return false;
+}
+
+static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ /* Wa_2006604312:icl,ehl */
+ if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
+ return true;
+
+ return false;
+}
+
+static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ /* Wa_1604331009:icl,jsl,ehl */
+ if (is_hdr_mode(crtc_state) &&
+ crtc_state->active_planes & BIT(PLANE_CURSOR) &&
+ DISPLAY_VER(dev_priv) == 11)
+ return true;
+
+ return false;
+}
+
+static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
+ enum pipe pipe, bool enable)
+{
+ if (DISPLAY_VER(i915) == 9) {
+ /*
+ * "Plane N strech max must be programmed to 11b (x1)
+ * when Async flips are enabled on that plane."
+ */
+ intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ SKL_PLANE1_STRETCH_MAX_MASK,
+ enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
+ } else {
+ /* Also needed on HSW/BDW albeit undocumented */
+ intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ HSW_PRI_STRETCH_MAX_MASK,
+ enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
+ }
+}
+
+static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
+ (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
+}
+
+static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
+ new_crtc_state->active_planes;
+}
+
+static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ return old_crtc_state->active_planes &&
+ (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
+}
+
+static void intel_post_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ enum pipe pipe = crtc->pipe;
+
+ intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
+
+ if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
+ intel_update_watermarks(dev_priv);
+
+ hsw_ips_post_update(state, crtc);
+ intel_fbc_post_update(state, crtc);
+
+ if (needs_async_flip_vtd_wa(old_crtc_state) &&
+ !needs_async_flip_vtd_wa(new_crtc_state))
+ intel_async_flip_vtd_wa(dev_priv, pipe, false);
+
+ if (needs_nv12_wa(old_crtc_state) &&
+ !needs_nv12_wa(new_crtc_state))
+ skl_wa_827(dev_priv, pipe, false);
+
+ if (needs_scalerclk_wa(old_crtc_state) &&
+ !needs_scalerclk_wa(new_crtc_state))
+ icl_wa_scalerclkgating(dev_priv, pipe, false);
+
+ if (needs_cursorclk_wa(old_crtc_state) &&
+ !needs_cursorclk_wa(new_crtc_state))
+ icl_wa_cursorclkgating(dev_priv, pipe, false);
+
+ intel_drrs_activate(new_crtc_state);
+}
+
+static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u8 update_planes = crtc_state->update_planes;
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id))
+ plane->enable_flip_done(plane);
+ }
+}
+
+static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u8 update_planes = crtc_state->update_planes;
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id))
+ plane->disable_flip_done(plane);
+ }
+}
+
+static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u8 update_planes = new_crtc_state->update_planes;
+ const struct intel_plane_state *old_plane_state;
+ struct intel_plane *plane;
+ bool need_vbl_wait = false;
+ int i;
+
+ for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
+ if (plane->need_async_flip_disable_wa &&
+ plane->pipe == crtc->pipe &&
+ update_planes & BIT(plane->id)) {
+ /*
+ * Apart from the async flip bit we want to
+ * preserve the old state for the plane.
+ */
+ plane->async_flip(plane, old_crtc_state,
+ old_plane_state, false);
+ need_vbl_wait = true;
+ }
+ }
+
+ if (need_vbl_wait)
+ intel_crtc_wait_for_next_vblank(crtc);
+}
+
+static void intel_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ enum pipe pipe = crtc->pipe;
+
+ intel_drrs_deactivate(old_crtc_state);
+
+ intel_psr_pre_plane_update(state, crtc);
+
+ if (hsw_ips_pre_update(state, crtc))
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ if (intel_fbc_pre_update(state, crtc))
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ if (!needs_async_flip_vtd_wa(old_crtc_state) &&
+ needs_async_flip_vtd_wa(new_crtc_state))
+ intel_async_flip_vtd_wa(dev_priv, pipe, true);
+
+ /* Display WA 827 */
+ if (!needs_nv12_wa(old_crtc_state) &&
+ needs_nv12_wa(new_crtc_state))
+ skl_wa_827(dev_priv, pipe, true);
+
+ /* Wa_2006604312:icl,ehl */
+ if (!needs_scalerclk_wa(old_crtc_state) &&
+ needs_scalerclk_wa(new_crtc_state))
+ icl_wa_scalerclkgating(dev_priv, pipe, true);
+
+ /* Wa_1604331009:icl,jsl,ehl */
+ if (!needs_cursorclk_wa(old_crtc_state) &&
+ needs_cursorclk_wa(new_crtc_state))
+ icl_wa_cursorclkgating(dev_priv, pipe, true);
+
+ /*
+ * Vblank time updates from the shadow to live plane control register
+ * are blocked if the memory self-refresh mode is active at that
+ * moment. So to make sure the plane gets truly disabled, disable
+ * first the self-refresh mode. The self-refresh enable bit in turn
+ * will be checked/applied by the HW only at the next frame start
+ * event which is after the vblank start event, so we need to have a
+ * wait-for-vblank between disabling the plane and the pipe.
+ */
+ if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
+ new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ /*
+ * IVB workaround: must disable low power watermarks for at least
+ * one frame before enabling scaling. LP watermarks can be re-enabled
+ * when scaling is disabled.
+ *
+ * WaCxSRDisabledForSpriteScaling:ivb
+ */
+ if (old_crtc_state->hw.active &&
+ new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ /*
+ * If we're doing a modeset we don't need to do any
+ * pre-vblank watermark programming here.
+ */
+ if (!intel_crtc_needs_modeset(new_crtc_state)) {
+ /*
+ * For platforms that support atomic watermarks, program the
+ * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
+ * will be the intermediate values that are safe for both pre- and
+ * post- vblank; when vblank happens, the 'active' values will be set
+ * to the final 'target' values and we'll do this again to get the
+ * optimal watermarks. For gen9+ platforms, the values we program here
+ * will be the final target values which will get automatically latched
+ * at vblank time; no further programming will be necessary.
+ *
+ * If a platform hasn't been transitioned to atomic watermarks yet,
+ * we'll continue to update watermarks the old way, if flags tell
+ * us to.
+ */
+ if (!intel_initial_watermarks(state, crtc))
+ if (new_crtc_state->update_wm_pre)
+ intel_update_watermarks(dev_priv);
+ }
+
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So disable underrun reporting before all the planes get disabled.
+ *
+ * We do this after .initial_watermarks() so that we have a
+ * chance of catching underruns with the intermediate watermarks
+ * vs. the old plane configuration.
+ */
+ if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
+ /*
+ * WA for platforms where async address update enable bit
+ * is double buffered and only latched at start of vblank.
+ */
+ if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
+ intel_crtc_async_flip_disable_wa(state, crtc);
+}
+
+static void intel_crtc_disable_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ unsigned int update_mask = new_crtc_state->update_planes;
+ const struct intel_plane_state *old_plane_state;
+ struct intel_plane *plane;
+ unsigned fb_bits = 0;
+ int i;
+
+ intel_crtc_dpms_overlay_disable(crtc);
+
+ for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
+
+ intel_plane_disable_arm(plane, new_crtc_state);
+
+ if (old_plane_state->uapi.visible)
+ fb_bits |= plane->frontbuffer_bit;
+ }
+
+ intel_frontbuffer_flip(dev_priv, fb_bits);
+}
+
+/*
+ * intel_connector_primary_encoder - get the primary encoder for a connector
+ * @connector: connector for which to return the encoder
+ *
+ * Returns the primary encoder for a connector. There is a 1:1 mapping from
+ * all connectors to their encoder, except for DP-MST connectors which have
+ * both a virtual and a primary encoder. These DP-MST primary encoders can be
+ * pointed to by as many DP-MST connectors as there are pipes.
+ */
+static struct intel_encoder *
+intel_connector_primary_encoder(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder;
+
+ if (connector->mst_port)
+ return &dp_to_dig_port(connector->mst_port)->base;
+
+ encoder = intel_attached_encoder(connector);
+ drm_WARN_ON(connector->base.dev, !encoder);
+
+ return encoder;
+}
+
+static void intel_encoders_update_prepare(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_crtc *crtc;
+ struct drm_connector_state *new_conn_state;
+ struct drm_connector *connector;
+ int i;
+
+ /*
+ * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
+ * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
+ */
+ if (i915->display.dpll.mgr) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
+ new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
+ }
+ }
+
+ if (!state->modeset)
+ return;
+
+ for_each_new_connector_in_state(&state->base, connector, new_conn_state,
+ i) {
+ struct intel_connector *intel_connector;
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+
+ if (!intel_connector_needs_modeset(state, connector))
+ continue;
+
+ intel_connector = to_intel_connector(connector);
+ encoder = intel_connector_primary_encoder(intel_connector);
+ if (!encoder->update_prepare)
+ continue;
+
+ crtc = new_conn_state->crtc ?
+ to_intel_crtc(new_conn_state->crtc) : NULL;
+ encoder->update_prepare(state, encoder, crtc);
+ }
+}
+
+static void intel_encoders_update_complete(struct intel_atomic_state *state)
+{
+ struct drm_connector_state *new_conn_state;
+ struct drm_connector *connector;
+ int i;
+
+ if (!state->modeset)
+ return;
+
+ for_each_new_connector_in_state(&state->base, connector, new_conn_state,
+ i) {
+ struct intel_connector *intel_connector;
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+
+ if (!intel_connector_needs_modeset(state, connector))
+ continue;
+
+ intel_connector = to_intel_connector(connector);
+ encoder = intel_connector_primary_encoder(intel_connector);
+ if (!encoder->update_complete)
+ continue;
+
+ crtc = new_conn_state->crtc ?
+ to_intel_crtc(new_conn_state->crtc) : NULL;
+ encoder->update_complete(state, encoder, crtc);
+ }
+}
+
+static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+
+ if (conn_state->crtc != &crtc->base)
+ continue;
+
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(state, encoder,
+ crtc_state, conn_state);
+ }
+}
+
+static void intel_encoders_pre_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+
+ if (conn_state->crtc != &crtc->base)
+ continue;
+
+ if (encoder->pre_enable)
+ encoder->pre_enable(state, encoder,
+ crtc_state, conn_state);
+ }
+}
+
+static void intel_encoders_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+
+ if (conn_state->crtc != &crtc->base)
+ continue;
+
+ if (encoder->enable)
+ encoder->enable(state, encoder,
+ crtc_state, conn_state);
+ intel_opregion_notify_encoder(encoder, true);
+ }
+}
+
+static void intel_encoders_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct drm_connector_state *old_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(old_conn_state->best_encoder);
+
+ if (old_conn_state->crtc != &crtc->base)
+ continue;
+
+ intel_opregion_notify_encoder(encoder, false);
+ if (encoder->disable)
+ encoder->disable(state, encoder,
+ old_crtc_state, old_conn_state);
+ }
+}
+
+static void intel_encoders_post_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct drm_connector_state *old_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(old_conn_state->best_encoder);
+
+ if (old_conn_state->crtc != &crtc->base)
+ continue;
+
+ if (encoder->post_disable)
+ encoder->post_disable(state, encoder,
+ old_crtc_state, old_conn_state);
+ }
+}
+
+static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct drm_connector_state *old_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(old_conn_state->best_encoder);
+
+ if (old_conn_state->crtc != &crtc->base)
+ continue;
+
+ if (encoder->post_pll_disable)
+ encoder->post_pll_disable(state, encoder,
+ old_crtc_state, old_conn_state);
+ }
+}
+
+static void intel_encoders_update_pipe(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+
+ if (conn_state->crtc != &crtc->base)
+ continue;
+
+ if (encoder->update_pipe)
+ encoder->update_pipe(state, encoder,
+ crtc_state, conn_state);
+ }
+}
+
+static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+
+ plane->disable_arm(plane, crtc_state);
+}
+
+static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (crtc_state->has_pch_encoder) {
+ intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
+ &crtc_state->fdi_m_n);
+ } else if (intel_crtc_has_dp_encoder(crtc_state)) {
+ intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
+ &crtc_state->dp_m_n);
+ intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
+ &crtc_state->dp_m2_n2);
+ }
+
+ intel_set_transcoder_timings(crtc_state);
+
+ ilk_set_pipeconf(crtc_state);
+}
+
+static void ilk_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ return;
+
+ /*
+ * Sometimes spurious CPU pipe underruns happen during FDI
+ * training, at least with VGA+HDMI cloning. Suppress them.
+ *
+ * On ILK we get an occasional spurious CPU pipe underruns
+ * between eDP port A enable and vdd enable. Also PCH port
+ * enable seems to result in the occasional CPU pipe underrun.
+ *
+ * Spurious PCH underruns also occur during PCH enabling.
+ */
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+
+ ilk_configure_cpu_transcoder(new_crtc_state);
+
+ intel_set_pipe_src_size(new_crtc_state);
+
+ crtc->active = true;
+
+ intel_encoders_pre_enable(state, crtc);
+
+ if (new_crtc_state->has_pch_encoder) {
+ ilk_pch_pre_enable(state, crtc);
+ } else {
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+ }
+
+ ilk_pfit_enable(new_crtc_state);
+
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit_noarm(new_crtc_state);
+ intel_color_commit_arm(new_crtc_state);
+ /* update DSPCNTR to configure gamma for pipe bottom color */
+ intel_disable_primary_plane(new_crtc_state);
+
+ intel_initial_watermarks(state, crtc);
+ intel_enable_transcoder(new_crtc_state);
+
+ if (new_crtc_state->has_pch_encoder)
+ ilk_pch_enable(state, crtc);
+
+ intel_crtc_vblank_on(new_crtc_state);
+
+ intel_encoders_enable(state, crtc);
+
+ if (HAS_PCH_CPT(dev_priv))
+ cpt_verify_modeset(dev_priv, pipe);
+
+ /*
+ * Must wait for vblank to avoid spurious PCH FIFO underruns.
+ * And a second vblank wait is needed at least on ILK with
+ * some interlaced HDMI modes. Let's do the double wait always
+ * in case there are more corner cases we don't know about.
+ */
+ if (new_crtc_state->has_pch_encoder) {
+ intel_crtc_wait_for_next_vblank(crtc);
+ intel_crtc_wait_for_next_vblank(crtc);
+ }
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
+}
+
+static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool apply)
+{
+ u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
+ u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
+
+ if (apply)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
+}
+
+static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
+ HSW_LINETIME(crtc_state->linetime) |
+ HSW_IPS_LINETIME(crtc_state->ips_linetime));
+}
+
+static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder transcoder = crtc_state->cpu_transcoder;
+ i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
+ CHICKEN_TRANS(transcoder);
+ u32 val;
+
+ val = intel_de_read(dev_priv, reg);
+ val &= ~HSW_FRAME_START_DELAY_MASK;
+ val |= HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
+ intel_de_write(dev_priv, reg, val);
+}
+
+static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *master_crtc = intel_master_crtc(crtc_state);
+
+ /*
+ * Enable sequence steps 1-7 on bigjoiner master
+ */
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
+ intel_encoders_pre_pll_enable(state, master_crtc);
+
+ if (crtc_state->shared_dpll)
+ intel_enable_shared_dpll(crtc_state);
+
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
+ intel_encoders_pre_enable(state, master_crtc);
+}
+
+static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (crtc_state->has_pch_encoder) {
+ intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
+ &crtc_state->fdi_m_n);
+ } else if (intel_crtc_has_dp_encoder(crtc_state)) {
+ intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
+ &crtc_state->dp_m_n);
+ intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
+ &crtc_state->dp_m2_n2);
+ }
+
+ intel_set_transcoder_timings(crtc_state);
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
+ crtc_state->pixel_multiplier - 1);
+
+ hsw_set_frame_start_delay(crtc_state);
+
+ hsw_set_transconf(crtc_state);
+}
+
+static void hsw_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
+ enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
+ bool psl_clkgate_wa;
+
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ return;
+
+ if (!new_crtc_state->bigjoiner_pipes) {
+ intel_encoders_pre_pll_enable(state, crtc);
+
+ if (new_crtc_state->shared_dpll)
+ intel_enable_shared_dpll(new_crtc_state);
+
+ intel_encoders_pre_enable(state, crtc);
+ } else {
+ icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
+ }
+
+ intel_dsc_enable(new_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ intel_uncompressed_joiner_enable(new_crtc_state);
+
+ intel_set_pipe_src_size(new_crtc_state);
+ if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ bdw_set_pipemisc(new_crtc_state);
+
+ if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
+ !transcoder_is_dsi(cpu_transcoder))
+ hsw_configure_cpu_transcoder(new_crtc_state);
+
+ crtc->active = true;
+
+ /* Display WA #1180: WaDisableScalarClockGating: glk */
+ psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
+ new_crtc_state->pch_pfit.enabled;
+ if (psl_clkgate_wa)
+ glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ skl_pfit_enable(new_crtc_state);
+ else
+ ilk_pfit_enable(new_crtc_state);
+
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit_noarm(new_crtc_state);
+ intel_color_commit_arm(new_crtc_state);
+ /* update DSPCNTR to configure gamma/csc for pipe bottom color */
+ if (DISPLAY_VER(dev_priv) < 9)
+ intel_disable_primary_plane(new_crtc_state);
+
+ hsw_set_linetime_wm(new_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ icl_set_pipe_chicken(new_crtc_state);
+
+ intel_initial_watermarks(state, crtc);
+
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
+ intel_crtc_vblank_on(new_crtc_state);
+
+ intel_encoders_enable(state, crtc);
+
+ if (psl_clkgate_wa) {
+ intel_crtc_wait_for_next_vblank(crtc);
+ glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
+ }
+
+ /* If we change the relative order between pipe/planes enabling, we need
+ * to change the workaround. */
+ hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
+ if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
+ struct intel_crtc *wa_crtc;
+
+ wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
+
+ intel_crtc_wait_for_next_vblank(wa_crtc);
+ intel_crtc_wait_for_next_vblank(wa_crtc);
+ }
+}
+
+void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /* To avoid upsetting the power well on haswell only disable the pfit if
+ * it's in use. The hw state code will make sure we get this right. */
+ if (!old_crtc_state->pch_pfit.enabled)
+ return;
+
+ intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
+ intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
+ intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
+}
+
+static void ilk_crtc_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /*
+ * Sometimes spurious CPU pipe underruns happen when the
+ * pipe is already disabled, but FDI RX/TX is still enabled.
+ * Happens at least with VGA+HDMI cloning. Suppress them.
+ */
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+
+ intel_encoders_disable(state, crtc);
+
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_disable_transcoder(old_crtc_state);
+
+ ilk_pfit_disable(old_crtc_state);
+
+ if (old_crtc_state->has_pch_encoder)
+ ilk_pch_disable(state, crtc);
+
+ intel_encoders_post_disable(state, crtc);
+
+ if (old_crtc_state->has_pch_encoder)
+ ilk_pch_post_disable(state, crtc);
+
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
+}
+
+static void hsw_crtc_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ /*
+ * FIXME collapse everything to one hook.
+ * Need care with mst->ddi interactions.
+ */
+ if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
+ intel_encoders_disable(state, crtc);
+ intel_encoders_post_disable(state, crtc);
+ }
+}
+
+static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (!crtc_state->gmch_pfit.control)
+ return;
+
+ /*
+ * The panel fitter should only be adjusted whilst the pipe is disabled,
+ * according to register description and PRM.
+ */
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
+
+ intel_de_write(dev_priv, PFIT_PGM_RATIOS,
+ crtc_state->gmch_pfit.pgm_ratios);
+ intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
+
+ /* Border color in case we don't scale up to the full screen. Black by
+ * default, change to something else for debugging. */
+ intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
+}
+
+bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
+{
+ if (phy == PHY_NONE)
+ return false;
+ else if (IS_ALDERLAKE_S(dev_priv))
+ return phy <= PHY_E;
+ else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ return phy <= PHY_D;
+ else if (IS_JSL_EHL(dev_priv))
+ return phy <= PHY_C;
+ else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
+ return phy <= PHY_B;
+ else
+ /*
+ * DG2 outputs labelled as "combo PHY" in the bspec use
+ * SNPS PHYs with completely different programming,
+ * hence we always return false here.
+ */
+ return false;
+}
+
+bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
+{
+ if (IS_DG2(dev_priv))
+ /* DG2's "TC1" output uses a SNPS PHY */
+ return false;
+ else if (IS_ALDERLAKE_P(dev_priv))
+ return phy >= PHY_F && phy <= PHY_I;
+ else if (IS_TIGERLAKE(dev_priv))
+ return phy >= PHY_D && phy <= PHY_I;
+ else if (IS_ICELAKE(dev_priv))
+ return phy >= PHY_C && phy <= PHY_F;
+ else
+ return false;
+}
+
+bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
+{
+ if (phy == PHY_NONE)
+ return false;
+ else if (IS_DG2(dev_priv))
+ /*
+ * All four "combo" ports and the TC1 port (PHY E) use
+ * Synopsis PHYs.
+ */
+ return phy <= PHY_E;
+
+ return false;
+}
+
+enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
+{
+ if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
+ return PHY_D + port - PORT_D_XELPD;
+ else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
+ return PHY_F + port - PORT_TC1;
+ else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
+ return PHY_B + port - PORT_TC1;
+ else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
+ return PHY_C + port - PORT_TC1;
+ else if (IS_JSL_EHL(i915) && port == PORT_D)
+ return PHY_A;
+
+ return PHY_A + port - PORT_A;
+}
+
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
+{
+ if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
+ return TC_PORT_NONE;
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ return TC_PORT_1 + port - PORT_TC1;
+ else
+ return TC_PORT_1 + port - PORT_C;
+}
+
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
+
+ return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
+}
+
+static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
+ struct intel_power_domain_mask *mask)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct drm_encoder *encoder;
+ enum pipe pipe = crtc->pipe;
+
+ bitmap_zero(mask->bits, POWER_DOMAIN_NUM);
+
+ if (!crtc_state->hw.active)
+ return;
+
+ set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
+ set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
+ if (crtc_state->pch_pfit.enabled ||
+ crtc_state->pch_pfit.force_thru)
+ set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
+
+ drm_for_each_encoder_mask(encoder, &dev_priv->drm,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ set_bit(intel_encoder->power_domain, mask->bits);
+ }
+
+ if (HAS_DDI(dev_priv) && crtc_state->has_audio)
+ set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
+
+ if (crtc_state->shared_dpll)
+ set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
+
+ if (crtc_state->dsc.compression_enable)
+ set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
+}
+
+void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
+ struct intel_power_domain_mask *old_domains)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum intel_display_power_domain domain;
+ struct intel_power_domain_mask domains, new_domains;
+
+ get_crtc_power_domains(crtc_state, &domains);
+
+ bitmap_andnot(new_domains.bits,
+ domains.bits,
+ crtc->enabled_power_domains.mask.bits,
+ POWER_DOMAIN_NUM);
+ bitmap_andnot(old_domains->bits,
+ crtc->enabled_power_domains.mask.bits,
+ domains.bits,
+ POWER_DOMAIN_NUM);
+
+ for_each_power_domain(domain, &new_domains)
+ intel_display_power_get_in_set(dev_priv,
+ &crtc->enabled_power_domains,
+ domain);
+}
+
+void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
+ struct intel_power_domain_mask *domains)
+{
+ intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
+ &crtc->enabled_power_domains,
+ domains);
+}
+
+static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
+ &crtc_state->dp_m_n);
+ intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
+ &crtc_state->dp_m2_n2);
+ }
+
+ intel_set_transcoder_timings(crtc_state);
+
+ i9xx_set_pipeconf(crtc_state);
+}
+
+static void valleyview_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ return;
+
+ i9xx_configure_cpu_transcoder(new_crtc_state);
+
+ intel_set_pipe_src_size(new_crtc_state);
+
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
+ intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
+ }
+
+ crtc->active = true;
+
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
+ intel_encoders_pre_pll_enable(state, crtc);
+
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_enable_pll(new_crtc_state);
+ else
+ vlv_enable_pll(new_crtc_state);
+
+ intel_encoders_pre_enable(state, crtc);
+
+ i9xx_pfit_enable(new_crtc_state);
+
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit_noarm(new_crtc_state);
+ intel_color_commit_arm(new_crtc_state);
+ /* update DSPCNTR to configure gamma for pipe bottom color */
+ intel_disable_primary_plane(new_crtc_state);
+
+ intel_initial_watermarks(state, crtc);
+ intel_enable_transcoder(new_crtc_state);
+
+ intel_crtc_vblank_on(new_crtc_state);
+
+ intel_encoders_enable(state, crtc);
+}
+
+static void i9xx_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
+ return;
+
+ i9xx_configure_cpu_transcoder(new_crtc_state);
+
+ intel_set_pipe_src_size(new_crtc_state);
+
+ crtc->active = true;
+
+ if (DISPLAY_VER(dev_priv) != 2)
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
+ intel_encoders_pre_enable(state, crtc);
+
+ i9xx_enable_pll(new_crtc_state);
+
+ i9xx_pfit_enable(new_crtc_state);
+
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit_noarm(new_crtc_state);
+ intel_color_commit_arm(new_crtc_state);
+ /* update DSPCNTR to configure gamma for pipe bottom color */
+ intel_disable_primary_plane(new_crtc_state);
+
+ if (!intel_initial_watermarks(state, crtc))
+ intel_update_watermarks(dev_priv);
+ intel_enable_transcoder(new_crtc_state);
+
+ intel_crtc_vblank_on(new_crtc_state);
+
+ intel_encoders_enable(state, crtc);
+
+ /* prevents spurious underruns */
+ if (DISPLAY_VER(dev_priv) == 2)
+ intel_crtc_wait_for_next_vblank(crtc);
+}
+
+static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (!old_crtc_state->gmch_pfit.control)
+ return;
+
+ assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
+
+ drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
+ intel_de_read(dev_priv, PFIT_CONTROL));
+ intel_de_write(dev_priv, PFIT_CONTROL, 0);
+}
+
+static void i9xx_crtc_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /*
+ * On gen2 planes are double buffered but the pipe isn't, so we must
+ * wait for planes to fully turn off before disabling the pipe.
+ */
+ if (DISPLAY_VER(dev_priv) == 2)
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ intel_encoders_disable(state, crtc);
+
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_disable_transcoder(old_crtc_state);
+
+ i9xx_pfit_disable(old_crtc_state);
+
+ intel_encoders_post_disable(state, crtc);
+
+ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_disable_pll(dev_priv, pipe);
+ else if (IS_VALLEYVIEW(dev_priv))
+ vlv_disable_pll(dev_priv, pipe);
+ else
+ i9xx_disable_pll(old_crtc_state);
+ }
+
+ intel_encoders_post_pll_disable(state, crtc);
+
+ if (DISPLAY_VER(dev_priv) != 2)
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
+ if (!dev_priv->display.funcs.wm->initial_watermarks)
+ intel_update_watermarks(dev_priv);
+
+ /* clock the pipe down to 640x480@60 to potentially save power */
+ if (IS_I830(dev_priv))
+ i830_enable_pipe(dev_priv, pipe);
+}
+
+
+/*
+ * turn all crtc's off, but do not adjust state
+ * This has to be paired with a call to intel_modeset_setup_hw_state.
+ */
+int intel_display_suspend(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_atomic_state *state;
+ int ret;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return 0;
+
+ state = drm_atomic_helper_suspend(dev);
+ ret = PTR_ERR_OR_ZERO(state);
+ if (ret)
+ drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
+ ret);
+ else
+ dev_priv->modeset_restore_state = state;
+ return ret;
+}
+
+void intel_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
+static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
+{
+ const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /* GDG double wide on either pipe, otherwise pipe A only */
+ return DISPLAY_VER(dev_priv) < 4 &&
+ (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
+}
+
+static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
+{
+ u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
+ struct drm_rect src;
+
+ /*
+ * We only use IF-ID interlacing. If we ever use
+ * PF-ID we'll need to adjust the pixel_rate here.
+ */
+
+ if (!crtc_state->pch_pfit.enabled)
+ return pixel_rate;
+
+ drm_rect_init(&src, 0, 0,
+ drm_rect_width(&crtc_state->pipe_src) << 16,
+ drm_rect_height(&crtc_state->pipe_src) << 16);
+
+ return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
+ pixel_rate);
+}
+
+static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
+ const struct drm_display_mode *timings)
+{
+ mode->hdisplay = timings->crtc_hdisplay;
+ mode->htotal = timings->crtc_htotal;
+ mode->hsync_start = timings->crtc_hsync_start;
+ mode->hsync_end = timings->crtc_hsync_end;
+
+ mode->vdisplay = timings->crtc_vdisplay;
+ mode->vtotal = timings->crtc_vtotal;
+ mode->vsync_start = timings->crtc_vsync_start;
+ mode->vsync_end = timings->crtc_vsync_end;
+
+ mode->flags = timings->flags;
+ mode->type = DRM_MODE_TYPE_DRIVER;
+
+ mode->clock = timings->crtc_clock;
+
+ drm_mode_set_name(mode);
+}
+
+static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (HAS_GMCH(dev_priv))
+ /* FIXME calculate proper pipe pixel rate for GMCH pfit */
+ crtc_state->pixel_rate =
+ crtc_state->hw.pipe_mode.crtc_clock;
+ else
+ crtc_state->pixel_rate =
+ ilk_pipe_pixel_rate(crtc_state);
+}
+
+static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state,
+ struct drm_display_mode *mode)
+{
+ int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
+
+ if (num_pipes < 2)
+ return;
+
+ mode->crtc_clock /= num_pipes;
+ mode->crtc_hdisplay /= num_pipes;
+ mode->crtc_hblank_start /= num_pipes;
+ mode->crtc_hblank_end /= num_pipes;
+ mode->crtc_hsync_start /= num_pipes;
+ mode->crtc_hsync_end /= num_pipes;
+ mode->crtc_htotal /= num_pipes;
+}
+
+static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state,
+ struct drm_display_mode *mode)
+{
+ int overlap = crtc_state->splitter.pixel_overlap;
+ int n = crtc_state->splitter.link_count;
+
+ if (!crtc_state->splitter.enable)
+ return;
+
+ /*
+ * eDP MSO uses segment timings from EDID for transcoder
+ * timings, but full mode for everything else.
+ *
+ * h_full = (h_segment - pixel_overlap) * link_count
+ */
+ mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n;
+ mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n;
+ mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n;
+ mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n;
+ mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n;
+ mode->crtc_htotal = (mode->crtc_htotal - overlap) * n;
+ mode->crtc_clock *= n;
+}
+
+static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
+{
+ struct drm_display_mode *mode = &crtc_state->hw.mode;
+ struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+ /*
+ * Start with the adjusted_mode crtc timings, which
+ * have been filled with the transcoder timings.
+ */
+ drm_mode_copy(pipe_mode, adjusted_mode);
+
+ /* Expand MSO per-segment transcoder timings to full */
+ intel_splitter_adjust_timings(crtc_state, pipe_mode);
+
+ /*
+ * We want the full numbers in adjusted_mode normal timings,
+ * adjusted_mode crtc timings are left with the raw transcoder
+ * timings.
+ */
+ intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
+
+ /* Populate the "user" mode with full numbers */
+ drm_mode_copy(mode, pipe_mode);
+ intel_mode_from_crtc_timings(mode, mode);
+ mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) *
+ (intel_bigjoiner_num_pipes(crtc_state) ?: 1);
+ mode->vdisplay = drm_rect_height(&crtc_state->pipe_src);
+
+ /* Derive per-pipe timings in case bigjoiner is used */
+ intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
+ intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
+
+ intel_crtc_compute_pixel_rate(crtc_state);
+}
+
+void intel_encoder_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ encoder->get_config(encoder, crtc_state);
+
+ intel_crtc_readout_derived_state(crtc_state);
+}
+
+static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
+{
+ int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
+ int width, height;
+
+ if (num_pipes < 2)
+ return;
+
+ width = drm_rect_width(&crtc_state->pipe_src);
+ height = drm_rect_height(&crtc_state->pipe_src);
+
+ drm_rect_init(&crtc_state->pipe_src, 0, 0,
+ width / num_pipes, height);
+}
+
+static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ intel_bigjoiner_compute_pipe_src(crtc_state);
+
+ /*
+ * Pipe horizontal size must be even in:
+ * - DVO ganged mode
+ * - LVDS dual channel mode
+ * - Double wide pipe
+ */
+ if (drm_rect_width(&crtc_state->pipe_src) & 1) {
+ if (crtc_state->double_wide) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_is_dual_link_lvds(i915)) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int clock_limit = i915->max_dotclk_freq;
+
+ /*
+ * Start with the adjusted_mode crtc timings, which
+ * have been filled with the transcoder timings.
+ */
+ drm_mode_copy(pipe_mode, adjusted_mode);
+
+ /* Expand MSO per-segment transcoder timings to full */
+ intel_splitter_adjust_timings(crtc_state, pipe_mode);
+
+ /* Derive per-pipe timings in case bigjoiner is used */
+ intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
+ intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
+
+ if (DISPLAY_VER(i915) < 4) {
+ clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
+
+ /*
+ * Enable double wide mode when the dot clock
+ * is > 90% of the (display) core speed.
+ */
+ if (intel_crtc_supports_double_wide(crtc) &&
+ pipe_mode->crtc_clock > clock_limit) {
+ clock_limit = i915->max_dotclk_freq;
+ crtc_state->double_wide = true;
+ }
+ }
+
+ if (pipe_mode->crtc_clock > clock_limit) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
+ crtc->base.base.id, crtc->base.name,
+ pipe_mode->crtc_clock, clock_limit,
+ str_yes_no(crtc_state->double_wide));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int intel_crtc_compute_config(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ int ret;
+
+ ret = intel_dpll_crtc_compute_clock(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = intel_crtc_compute_pipe_src(crtc_state);
+ if (ret)
+ return ret;
+
+ ret = intel_crtc_compute_pipe_mode(crtc_state);
+ if (ret)
+ return ret;
+
+ intel_crtc_compute_pixel_rate(crtc_state);
+
+ if (crtc_state->has_pch_encoder)
+ return ilk_fdi_compute_config(crtc, crtc_state);
+
+ return 0;
+}
+
+static void
+intel_reduce_m_n_ratio(u32 *num, u32 *den)
+{
+ while (*num > DATA_LINK_M_N_MASK ||
+ *den > DATA_LINK_M_N_MASK) {
+ *num >>= 1;
+ *den >>= 1;
+ }
+}
+
+static void compute_m_n(u32 *ret_m, u32 *ret_n,
+ u32 m, u32 n, u32 constant_n)
+{
+ if (constant_n)
+ *ret_n = constant_n;
+ else
+ *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
+
+ *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
+ intel_reduce_m_n_ratio(ret_m, ret_n);
+}
+
+void
+intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n,
+ bool fec_enable)
+{
+ u32 data_clock = bits_per_pixel * pixel_clock;
+
+ if (fec_enable)
+ data_clock = intel_dp_mode_to_fec_clock(data_clock);
+
+ /*
+ * Windows/BIOS uses fixed M/N values always. Follow suit.
+ *
+ * Also several DP dongles in particular seem to be fussy
+ * about too large link M/N values. Presumably the 20bit
+ * value used by Windows/BIOS is acceptable to everyone.
+ */
+ m_n->tu = 64;
+ compute_m_n(&m_n->data_m, &m_n->data_n,
+ data_clock, link_clock * nlanes * 8,
+ 0x8000000);
+
+ compute_m_n(&m_n->link_m, &m_n->link_n,
+ pixel_clock, link_clock,
+ 0x80000);
+}
+
+static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
+{
+ /*
+ * There may be no VBT; and if the BIOS enabled SSC we can
+ * just keep using it to avoid unnecessary flicker. Whereas if the
+ * BIOS isn't using it, don't assume it will work even if the VBT
+ * indicates as much.
+ */
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ bool bios_lvds_use_ssc = intel_de_read(dev_priv,
+ PCH_DREF_CONTROL) &
+ DREF_SSC1_ENABLE;
+
+ if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+ drm_dbg_kms(&dev_priv->drm,
+ "SSC %s by BIOS, overriding VBT which says %s\n",
+ str_enabled_disabled(bios_lvds_use_ssc),
+ str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
+ dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
+ }
+ }
+}
+
+void intel_zero_m_n(struct intel_link_m_n *m_n)
+{
+ /* corresponds to 0 register value */
+ memset(m_n, 0, sizeof(*m_n));
+ m_n->tu = 1;
+}
+
+void intel_set_m_n(struct drm_i915_private *i915,
+ const struct intel_link_m_n *m_n,
+ i915_reg_t data_m_reg, i915_reg_t data_n_reg,
+ i915_reg_t link_m_reg, i915_reg_t link_n_reg)
+{
+ intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
+ intel_de_write(i915, data_n_reg, m_n->data_n);
+ intel_de_write(i915, link_m_reg, m_n->link_m);
+ /*
+ * On BDW+ writing LINK_N arms the double buffered update
+ * of all the M/N registers, so it must be written last.
+ */
+ intel_de_write(i915, link_n_reg, m_n->link_n);
+}
+
+bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+ enum transcoder transcoder)
+{
+ if (IS_HASWELL(dev_priv))
+ return transcoder == TRANSCODER_EDP;
+
+ return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
+}
+
+void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
+ enum transcoder transcoder,
+ const struct intel_link_m_n *m_n)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (DISPLAY_VER(dev_priv) >= 5)
+ intel_set_m_n(dev_priv, m_n,
+ PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
+ PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
+ else
+ intel_set_m_n(dev_priv, m_n,
+ PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
+ PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
+}
+
+void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
+ enum transcoder transcoder,
+ const struct intel_link_m_n *m_n)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
+ return;
+
+ intel_set_m_n(dev_priv, m_n,
+ PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
+ PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
+}
+
+static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ u32 crtc_vtotal, crtc_vblank_end;
+ int vsyncshift = 0;
+
+ /* We need to be careful not to changed the adjusted mode, for otherwise
+ * the hw state checker will get angry at the mismatch. */
+ crtc_vtotal = adjusted_mode->crtc_vtotal;
+ crtc_vblank_end = adjusted_mode->crtc_vblank_end;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ /* the chip adds 2 halflines automatically */
+ crtc_vtotal -= 1;
+ crtc_vblank_end -= 1;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
+ vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
+ else
+ vsyncshift = adjusted_mode->crtc_hsync_start -
+ adjusted_mode->crtc_htotal / 2;
+ if (vsyncshift < 0)
+ vsyncshift += adjusted_mode->crtc_htotal;
+ }
+
+ if (DISPLAY_VER(dev_priv) > 3)
+ intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
+ vsyncshift);
+
+ intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+ intel_de_write(dev_priv, HBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ intel_de_write(dev_priv, HSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+ intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
+ intel_de_write(dev_priv, VBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
+ intel_de_write(dev_priv, VSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+
+ /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
+ * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
+ * documented on the DDI_FUNC_CTL register description, EDP Input Select
+ * bits. */
+ if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
+ (pipe == PIPE_B || pipe == PIPE_C))
+ intel_de_write(dev_priv, VTOTAL(pipe),
+ intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
+
+}
+
+static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int width = drm_rect_width(&crtc_state->pipe_src);
+ int height = drm_rect_height(&crtc_state->pipe_src);
+ enum pipe pipe = crtc->pipe;
+
+ /* pipesrc controls the size that is scaled from, which should
+ * always be the user's requested size.
+ */
+ intel_de_write(dev_priv, PIPESRC(pipe),
+ PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
+}
+
+static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ return false;
+
+ if (DISPLAY_VER(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
+ else
+ return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
+}
+
+static void intel_get_transcoder_timings(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
+ pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+
+ if (!transcoder_is_dsi(cpu_transcoder)) {
+ tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
+ pipe_config->hw.adjusted_mode.crtc_hblank_start =
+ (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_hblank_end =
+ ((tmp >> 16) & 0xffff) + 1;
+ }
+ tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
+ pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
+
+ tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
+ pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+
+ if (!transcoder_is_dsi(cpu_transcoder)) {
+ tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
+ pipe_config->hw.adjusted_mode.crtc_vblank_start =
+ (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_vblank_end =
+ ((tmp >> 16) & 0xffff) + 1;
+ }
+ tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
+ pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+
+ if (intel_pipe_is_interlaced(pipe_config)) {
+ pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
+ pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
+ }
+}
+
+static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
+ enum pipe master_pipe, pipe = crtc->pipe;
+ int width;
+
+ if (num_pipes < 2)
+ return;
+
+ master_pipe = bigjoiner_master_pipe(crtc_state);
+ width = drm_rect_width(&crtc_state->pipe_src);
+
+ drm_rect_translate_to(&crtc_state->pipe_src,
+ (pipe - master_pipe) * width, 0);
+}
+
+static void intel_get_pipe_src_size(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
+
+ drm_rect_init(&pipe_config->pipe_src, 0, 0,
+ REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
+ REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1);
+
+ intel_bigjoiner_adjust_pipe_src(pipe_config);
+}
+
+void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 pipeconf = 0;
+
+ /*
+ * - We keep both pipes enabled on 830
+ * - During modeset the pipe is still disabled and must remain so
+ * - During fastset the pipe is already enabled and must remain so
+ */
+ if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
+ pipeconf |= PIPECONF_ENABLE;
+
+ if (crtc_state->double_wide)
+ pipeconf |= PIPECONF_DOUBLE_WIDE;
+
+ /* only g4x and later have fancy bpc/dither controls */
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
+ /* Bspec claims that we can't use dithering for 30bpp pipes. */
+ if (crtc_state->dither && crtc_state->pipe_bpp != 30)
+ pipeconf |= PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+
+ switch (crtc_state->pipe_bpp) {
+ default:
+ /* Case prevented by intel_choose_pipe_bpp_dither. */
+ MISSING_CASE(crtc_state->pipe_bpp);
+ fallthrough;
+ case 18:
+ pipeconf |= PIPECONF_BPC_6;
+ break;
+ case 24:
+ pipeconf |= PIPECONF_BPC_8;
+ break;
+ case 30:
+ pipeconf |= PIPECONF_BPC_10;
+ break;
+ }
+ }
+
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (DISPLAY_VER(dev_priv) < 4 ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
+ pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ else
+ pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
+ } else {
+ pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE;
+ }
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ crtc_state->limited_color_range)
+ pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
+
+ pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+
+ pipeconf |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
+
+ intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
+ intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
+}
+
+static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
+{
+ if (IS_I830(dev_priv))
+ return false;
+
+ return DISPLAY_VER(dev_priv) >= 4 ||
+ IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
+static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 tmp;
+
+ if (!i9xx_has_pfit(dev_priv))
+ return;
+
+ tmp = intel_de_read(dev_priv, PFIT_CONTROL);
+ if (!(tmp & PFIT_ENABLE))
+ return;
+
+ /* Check whether the pfit is attached to our pipe. */
+ if (DISPLAY_VER(dev_priv) < 4) {
+ if (crtc->pipe != PIPE_B)
+ return;
+ } else {
+ if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
+ return;
+ }
+
+ crtc_state->gmch_pfit.control = tmp;
+ crtc_state->gmch_pfit.pgm_ratios =
+ intel_de_read(dev_priv, PFIT_PGM_RATIOS);
+}
+
+static void vlv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ struct dpll clock;
+ u32 mdiv;
+ int refclk = 100000;
+
+ /* In case of DSI, DPLL will not be used */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
+ vlv_dpio_get(dev_priv);
+ mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
+ vlv_dpio_put(dev_priv);
+
+ clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
+ clock.m2 = mdiv & DPIO_M2DIV_MASK;
+ clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
+ clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
+ clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
+
+ pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
+}
+
+static void chv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ struct dpll clock;
+ u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
+ int refclk = 100000;
+
+ /* In case of DSI, DPLL will not be used */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
+ vlv_dpio_get(dev_priv);
+ cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
+ pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
+ pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
+ pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
+ pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+ vlv_dpio_put(dev_priv);
+
+ clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
+ clock.m2 = (pll_dw0 & 0xff) << 22;
+ if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
+ clock.m2 |= pll_dw2 & 0x3fffff;
+ clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
+ clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
+ clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
+
+ pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
+}
+
+static enum intel_output_format
+bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
+
+ if (tmp & PIPEMISC_YUV420_ENABLE) {
+ /* We support 4:2:0 in full blend mode only */
+ drm_WARN_ON(&dev_priv->drm,
+ (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
+
+ return INTEL_OUTPUT_FORMAT_YCBCR420;
+ } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
+ return INTEL_OUTPUT_FORMAT_YCBCR444;
+ } else {
+ return INTEL_OUTPUT_FORMAT_RGB;
+ }
+}
+
+static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
+
+ if (tmp & DISP_PIPE_GAMMA_ENABLE)
+ crtc_state->gamma_enable = true;
+
+ if (!HAS_GMCH(dev_priv) &&
+ tmp & DISP_PIPE_CSC_ENABLE)
+ crtc_state->csc_enable = true;
+}
+
+static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ u32 tmp;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+ pipe_config->shared_dpll = NULL;
+
+ ret = false;
+
+ tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
+ if (!(tmp & PIPECONF_ENABLE))
+ goto out;
+
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
+ switch (tmp & PIPECONF_BPC_MASK) {
+ case PIPECONF_BPC_6:
+ pipe_config->pipe_bpp = 18;
+ break;
+ case PIPECONF_BPC_8:
+ pipe_config->pipe_bpp = 24;
+ break;
+ case PIPECONF_BPC_10:
+ pipe_config->pipe_bpp = 30;
+ break;
+ default:
+ MISSING_CASE(tmp);
+ break;
+ }
+ }
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ (tmp & PIPECONF_COLOR_RANGE_SELECT))
+ pipe_config->limited_color_range = true;
+
+ pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp);
+
+ pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ pipe_config->cgm_mode = intel_de_read(dev_priv,
+ CGM_PIPE_MODE(crtc->pipe));
+
+ i9xx_get_pipe_color_config(pipe_config);
+ intel_color_get_config(pipe_config);
+
+ if (DISPLAY_VER(dev_priv) < 4)
+ pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
+
+ intel_get_transcoder_timings(crtc, pipe_config);
+ intel_get_pipe_src_size(crtc, pipe_config);
+
+ i9xx_get_pfit_config(pipe_config);
+
+ if (DISPLAY_VER(dev_priv) >= 4) {
+ /* No way to read it out on pipes B and C */
+ if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
+ tmp = dev_priv->chv_dpll_md[crtc->pipe];
+ else
+ tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
+ pipe_config->pixel_multiplier =
+ ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
+ >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
+ pipe_config->dpll_hw_state.dpll_md = tmp;
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
+ pipe_config->pixel_multiplier =
+ ((tmp & SDVO_MULTIPLIER_MASK)
+ >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
+ } else {
+ /* Note that on i915G/GM the pixel multiplier is in the sdvo
+ * port and will be fixed up in the encoder->get_config
+ * function. */
+ pipe_config->pixel_multiplier = 1;
+ }
+ pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
+ DPLL(crtc->pipe));
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
+ pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
+ FP0(crtc->pipe));
+ pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
+ FP1(crtc->pipe));
+ } else {
+ /* Mask out read-only status bits. */
+ pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
+ DPLL_PORTC_READY_MASK |
+ DPLL_PORTB_READY_MASK);
+ }
+
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_crtc_clock_get(crtc, pipe_config);
+ else if (IS_VALLEYVIEW(dev_priv))
+ vlv_crtc_clock_get(crtc, pipe_config);
+ else
+ i9xx_crtc_clock_get(crtc, pipe_config);
+
+ /*
+ * Normally the dotclock is filled in by the encoder .get_config()
+ * but in case the pipe is enabled w/o any ports we need a sane
+ * default.
+ */
+ pipe_config->hw.adjusted_mode.crtc_clock =
+ pipe_config->port_clock / pipe_config->pixel_multiplier;
+
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 val = 0;
+
+ /*
+ * - During modeset the pipe is still disabled and must remain so
+ * - During fastset the pipe is already enabled and must remain so
+ */
+ if (!intel_crtc_needs_modeset(crtc_state))
+ val |= PIPECONF_ENABLE;
+
+ switch (crtc_state->pipe_bpp) {
+ default:
+ /* Case prevented by intel_choose_pipe_bpp_dither. */
+ MISSING_CASE(crtc_state->pipe_bpp);
+ fallthrough;
+ case 18:
+ val |= PIPECONF_BPC_6;
+ break;
+ case 24:
+ val |= PIPECONF_BPC_8;
+ break;
+ case 30:
+ val |= PIPECONF_BPC_10;
+ break;
+ case 36:
+ val |= PIPECONF_BPC_12;
+ break;
+ }
+
+ if (crtc_state->dither)
+ val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
+
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACE_IF_ID_ILK;
+ else
+ val |= PIPECONF_INTERLACE_PF_PD_ILK;
+
+ /*
+ * This would end up with an odd purple hue over
+ * the entire display. Make sure we don't do it.
+ */
+ drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->limited_color_range &&
+ !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
+ val |= PIPECONF_COLOR_RANGE_SELECT;
+
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
+
+ val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+
+ val |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
+ val |= PIPECONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
+
+ intel_de_write(dev_priv, PIPECONF(pipe), val);
+ intel_de_posting_read(dev_priv, PIPECONF(pipe));
+}
+
+static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val = 0;
+
+ /*
+ * - During modeset the pipe is still disabled and must remain so
+ * - During fastset the pipe is already enabled and must remain so
+ */
+ if (!intel_crtc_needs_modeset(crtc_state))
+ val |= PIPECONF_ENABLE;
+
+ if (IS_HASWELL(dev_priv) && crtc_state->dither)
+ val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
+
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACE_IF_ID_ILK;
+ else
+ val |= PIPECONF_INTERLACE_PF_PD_ILK;
+
+ if (IS_HASWELL(dev_priv) &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
+
+ intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
+}
+
+static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 val = 0;
+
+ switch (crtc_state->pipe_bpp) {
+ case 18:
+ val |= PIPEMISC_BPC_6;
+ break;
+ case 24:
+ val |= PIPEMISC_BPC_8;
+ break;
+ case 30:
+ val |= PIPEMISC_BPC_10;
+ break;
+ case 36:
+ /* Port output 12BPC defined for ADLP+ */
+ if (DISPLAY_VER(dev_priv) > 12)
+ val |= PIPEMISC_BPC_12_ADLP;
+ break;
+ default:
+ MISSING_CASE(crtc_state->pipe_bpp);
+ break;
+ }
+
+ if (crtc_state->dither)
+ val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ val |= PIPEMISC_YUV420_ENABLE |
+ PIPEMISC_YUV420_MODE_FULL_BLEND;
+
+ if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
+ val |= PIPEMISC_HDR_MODE_PRECISION;
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
+
+ intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
+}
+
+int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
+
+ switch (tmp & PIPEMISC_BPC_MASK) {
+ case PIPEMISC_BPC_6:
+ return 18;
+ case PIPEMISC_BPC_8:
+ return 24;
+ case PIPEMISC_BPC_10:
+ return 30;
+ /*
+ * PORT OUTPUT 12 BPC defined for ADLP+.
+ *
+ * TODO:
+ * For previous platforms with DSI interface, bits 5:7
+ * are used for storing pipe_bpp irrespective of dithering.
+ * Since the value of 12 BPC is not defined for these bits
+ * on older platforms, need to find a workaround for 12 BPC
+ * MIPI DSI HW readout.
+ */
+ case PIPEMISC_BPC_12_ADLP:
+ if (DISPLAY_VER(dev_priv) > 12)
+ return 36;
+ fallthrough;
+ default:
+ MISSING_CASE(tmp);
+ return 0;
+ }
+}
+
+int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
+{
+ /*
+ * Account for spread spectrum to avoid
+ * oversubscribing the link. Max center spread
+ * is 2.5%; use 5% for safety's sake.
+ */
+ u32 bps = target_clock * bpp * 21 / 20;
+ return DIV_ROUND_UP(bps, link_bw * 8);
+}
+
+void intel_get_m_n(struct drm_i915_private *i915,
+ struct intel_link_m_n *m_n,
+ i915_reg_t data_m_reg, i915_reg_t data_n_reg,
+ i915_reg_t link_m_reg, i915_reg_t link_n_reg)
+{
+ m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
+ m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
+ m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
+ m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
+ m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
+}
+
+void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
+ enum transcoder transcoder,
+ struct intel_link_m_n *m_n)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (DISPLAY_VER(dev_priv) >= 5)
+ intel_get_m_n(dev_priv, m_n,
+ PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
+ PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
+ else
+ intel_get_m_n(dev_priv, m_n,
+ PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
+ PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
+}
+
+void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
+ enum transcoder transcoder,
+ struct intel_link_m_n *m_n)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
+ return;
+
+ intel_get_m_n(dev_priv, m_n,
+ PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
+ PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
+}
+
+static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
+ u32 pos, u32 size)
+{
+ drm_rect_init(&crtc_state->pch_pfit.dst,
+ pos >> 16, pos & 0xffff,
+ size >> 16, size & 0xffff);
+}
+
+static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
+ int id = -1;
+ int i;
+
+ /* find scaler attached to this pipe */
+ for (i = 0; i < crtc->num_scalers; i++) {
+ u32 ctl, pos, size;
+
+ ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
+ if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
+ continue;
+
+ id = i;
+ crtc_state->pch_pfit.enabled = true;
+
+ pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
+ size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
+
+ ilk_get_pfit_pos_size(crtc_state, pos, size);
+
+ scaler_state->scalers[i].in_use = true;
+ break;
+ }
+
+ scaler_state->scaler_id = id;
+ if (id >= 0)
+ scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
+ else
+ scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
+}
+
+static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 ctl, pos, size;
+
+ ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
+ if ((ctl & PF_ENABLE) == 0)
+ return;
+
+ crtc_state->pch_pfit.enabled = true;
+
+ pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
+ size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
+
+ ilk_get_pfit_pos_size(crtc_state, pos, size);
+
+ /*
+ * We currently do not free assignements of panel fitters on
+ * ivb/hsw (since we don't use the higher upscaling modes which
+ * differentiates them) so just WARN about this case for now.
+ */
+ drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
+ (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
+}
+
+static bool ilk_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ u32 tmp;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+ pipe_config->shared_dpll = NULL;
+
+ ret = false;
+ tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
+ if (!(tmp & PIPECONF_ENABLE))
+ goto out;
+
+ switch (tmp & PIPECONF_BPC_MASK) {
+ case PIPECONF_BPC_6:
+ pipe_config->pipe_bpp = 18;
+ break;
+ case PIPECONF_BPC_8:
+ pipe_config->pipe_bpp = 24;
+ break;
+ case PIPECONF_BPC_10:
+ pipe_config->pipe_bpp = 30;
+ break;
+ case PIPECONF_BPC_12:
+ pipe_config->pipe_bpp = 36;
+ break;
+ default:
+ break;
+ }
+
+ if (tmp & PIPECONF_COLOR_RANGE_SELECT)
+ pipe_config->limited_color_range = true;
+
+ switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
+ case PIPECONF_OUTPUT_COLORSPACE_YUV601:
+ case PIPECONF_OUTPUT_COLORSPACE_YUV709:
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
+ break;
+ default:
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ break;
+ }
+
+ pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp);
+
+ pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
+
+ pipe_config->msa_timing_delay = REG_FIELD_GET(PIPECONF_MSA_TIMING_DELAY_MASK, tmp);
+
+ pipe_config->csc_mode = intel_de_read(dev_priv,
+ PIPE_CSC_MODE(crtc->pipe));
+
+ i9xx_get_pipe_color_config(pipe_config);
+ intel_color_get_config(pipe_config);
+
+ pipe_config->pixel_multiplier = 1;
+
+ ilk_pch_get_config(pipe_config);
+
+ intel_get_transcoder_timings(crtc, pipe_config);
+ intel_get_pipe_src_size(crtc, pipe_config);
+
+ ilk_get_pfit_config(pipe_config);
+
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static u8 bigjoiner_pipes(struct drm_i915_private *i915)
+{
+ u8 pipes;
+
+ if (DISPLAY_VER(i915) >= 12)
+ pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
+ else if (DISPLAY_VER(i915) >= 11)
+ pipes = BIT(PIPE_B) | BIT(PIPE_C);
+ else
+ pipes = 0;
+
+ return pipes & RUNTIME_INFO(i915)->pipe_mask;
+}
+
+static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ u32 tmp = 0;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+
+ with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ return tmp & TRANS_DDI_FUNC_ENABLE;
+}
+
+static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv,
+ u8 *master_pipes, u8 *slave_pipes)
+{
+ struct intel_crtc *crtc;
+
+ *master_pipes = 0;
+ *slave_pipes = 0;
+
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc,
+ bigjoiner_pipes(dev_priv)) {
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
+
+ power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
+ with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
+ u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
+
+ if (!(tmp & BIG_JOINER_ENABLE))
+ continue;
+
+ if (tmp & MASTER_BIG_JOINER_ENABLE)
+ *master_pipes |= BIT(pipe);
+ else
+ *slave_pipes |= BIT(pipe);
+ }
+
+ if (DISPLAY_VER(dev_priv) < 13)
+ continue;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
+ u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
+
+ if (tmp & UNCOMPRESSED_JOINER_MASTER)
+ *master_pipes |= BIT(pipe);
+ if (tmp & UNCOMPRESSED_JOINER_SLAVE)
+ *slave_pipes |= BIT(pipe);
+ }
+ }
+
+ /* Bigjoiner pipes should always be consecutive master and slave */
+ drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1,
+ "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
+ *master_pipes, *slave_pipes);
+}
+
+static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
+{
+ if ((slave_pipes & BIT(pipe)) == 0)
+ return pipe;
+
+ /* ignore everything above our pipe */
+ master_pipes &= ~GENMASK(7, pipe);
+
+ /* highest remaining bit should be our master pipe */
+ return fls(master_pipes) - 1;
+}
+
+static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
+{
+ enum pipe master_pipe, next_master_pipe;
+
+ master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes);
+
+ if ((master_pipes & BIT(master_pipe)) == 0)
+ return 0;
+
+ /* ignore our master pipe and everything below it */
+ master_pipes &= ~GENMASK(master_pipe, 0);
+ /* make sure a high bit is set for the ffs() */
+ master_pipes |= BIT(7);
+ /* lowest remaining bit should be the next master pipe */
+ next_master_pipe = ffs(master_pipes) - 1;
+
+ return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe);
+}
+
+static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
+{
+ u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
+
+ if (DISPLAY_VER(i915) >= 11)
+ panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+
+ return panel_transcoder_mask;
+}
+
+static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
+ enum transcoder cpu_transcoder;
+ u8 master_pipes, slave_pipes;
+ u8 enabled_transcoders = 0;
+
+ /*
+ * XXX: Do intel_display_power_get_if_enabled before reading this (for
+ * consistency and less surprising code; it's in always on power).
+ */
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
+ panel_transcoder_mask) {
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ enum pipe trans_pipe;
+ u32 tmp = 0;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ if (!(tmp & TRANS_DDI_FUNC_ENABLE))
+ continue;
+
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ default:
+ drm_WARN(dev, 1,
+ "unknown pipe linked to transcoder %s\n",
+ transcoder_name(cpu_transcoder));
+ fallthrough;
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ trans_pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ trans_pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ trans_pipe = PIPE_C;
+ break;
+ case TRANS_DDI_EDP_INPUT_D_ONOFF:
+ trans_pipe = PIPE_D;
+ break;
+ }
+
+ if (trans_pipe == crtc->pipe)
+ enabled_transcoders |= BIT(cpu_transcoder);
+ }
+
+ /* single pipe or bigjoiner master */
+ cpu_transcoder = (enum transcoder) crtc->pipe;
+ if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
+ enabled_transcoders |= BIT(cpu_transcoder);
+
+ /* bigjoiner slave -> consider the master pipe's transcoder as well */
+ enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes);
+ if (slave_pipes & BIT(crtc->pipe)) {
+ cpu_transcoder = (enum transcoder)
+ get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes);
+ if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
+ enabled_transcoders |= BIT(cpu_transcoder);
+ }
+
+ return enabled_transcoders;
+}
+
+static bool has_edp_transcoders(u8 enabled_transcoders)
+{
+ return enabled_transcoders & BIT(TRANSCODER_EDP);
+}
+
+static bool has_dsi_transcoders(u8 enabled_transcoders)
+{
+ return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
+ BIT(TRANSCODER_DSI_1));
+}
+
+static bool has_pipe_transcoders(u8 enabled_transcoders)
+{
+ return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
+ BIT(TRANSCODER_DSI_0) |
+ BIT(TRANSCODER_DSI_1));
+}
+
+static void assert_enabled_transcoders(struct drm_i915_private *i915,
+ u8 enabled_transcoders)
+{
+ /* Only one type of transcoder please */
+ drm_WARN_ON(&i915->drm,
+ has_edp_transcoders(enabled_transcoders) +
+ has_dsi_transcoders(enabled_transcoders) +
+ has_pipe_transcoders(enabled_transcoders) > 1);
+
+ /* Only DSI transcoders can be ganged */
+ drm_WARN_ON(&i915->drm,
+ !has_dsi_transcoders(enabled_transcoders) &&
+ !is_power_of_2(enabled_transcoders));
+}
+
+static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ struct intel_display_power_domain_set *power_domain_set)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned long enabled_transcoders;
+ u32 tmp;
+
+ enabled_transcoders = hsw_enabled_transcoders(crtc);
+ if (!enabled_transcoders)
+ return false;
+
+ assert_enabled_transcoders(dev_priv, enabled_transcoders);
+
+ /*
+ * With the exception of DSI we should only ever have
+ * a single enabled transcoder. With DSI let's just
+ * pick the first one.
+ */
+ pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
+
+ if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+ POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
+ return false;
+
+ if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
+
+ if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
+ pipe_config->pch_pfit.force_thru = true;
+ }
+
+ tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
+
+ return tmp & PIPECONF_ENABLE;
+}
+
+static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ struct intel_display_power_domain_set *power_domain_set)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum transcoder cpu_transcoder;
+ enum port port;
+ u32 tmp;
+
+ for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
+ if (port == PORT_A)
+ cpu_transcoder = TRANSCODER_DSI_A;
+ else
+ cpu_transcoder = TRANSCODER_DSI_C;
+
+ if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
+ continue;
+
+ /*
+ * The PLL needs to be enabled with a valid divider
+ * configuration, otherwise accessing DSI registers will hang
+ * the machine. See BSpec North Display Engine
+ * registers/MIPI[BXT]. We can break out here early, since we
+ * need the same DSI PLL to be enabled for both DSI ports.
+ */
+ if (!bxt_dsi_pll_is_enabled(dev_priv))
+ break;
+
+ /* XXX: this works for video mode only */
+ tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
+ if (!(tmp & DPI_ENABLE))
+ continue;
+
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
+ continue;
+
+ pipe_config->cpu_transcoder = cpu_transcoder;
+ break;
+ }
+
+ return transcoder_is_dsi(pipe_config->cpu_transcoder);
+}
+
+static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ u8 master_pipes, slave_pipes;
+ enum pipe pipe = crtc->pipe;
+
+ enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes);
+
+ if (((master_pipes | slave_pipes) & BIT(pipe)) == 0)
+ return;
+
+ crtc_state->bigjoiner_pipes =
+ BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) |
+ get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes);
+}
+
+static bool hsw_get_pipe_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display_power_domain_set power_domain_set = { };
+ bool active;
+ u32 tmp;
+
+ if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
+ return false;
+
+ pipe_config->shared_dpll = NULL;
+
+ active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
+
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
+ drm_WARN_ON(&dev_priv->drm, active);
+ active = true;
+ }
+
+ if (!active)
+ goto out;
+
+ intel_dsc_get_config(pipe_config);
+ intel_bigjoiner_get_config(pipe_config);
+
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
+ DISPLAY_VER(dev_priv) >= 11)
+ intel_get_transcoder_timings(crtc, pipe_config);
+
+ if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ intel_vrr_get_config(crtc, pipe_config);
+
+ intel_get_pipe_src_size(crtc, pipe_config);
+
+ if (IS_HASWELL(dev_priv)) {
+ u32 tmp = intel_de_read(dev_priv,
+ PIPECONF(pipe_config->cpu_transcoder));
+
+ if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
+ else
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ } else {
+ pipe_config->output_format =
+ bdw_get_pipemisc_output_format(crtc);
+ }
+
+ pipe_config->gamma_mode = intel_de_read(dev_priv,
+ GAMMA_MODE(crtc->pipe));
+
+ pipe_config->csc_mode = intel_de_read(dev_priv,
+ PIPE_CSC_MODE(crtc->pipe));
+
+ if (DISPLAY_VER(dev_priv) >= 9) {
+ tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
+
+ if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
+ pipe_config->gamma_enable = true;
+
+ if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
+ pipe_config->csc_enable = true;
+ } else {
+ i9xx_get_pipe_color_config(pipe_config);
+ }
+
+ intel_color_get_config(pipe_config);
+
+ tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
+ pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ pipe_config->ips_linetime =
+ REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
+
+ if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
+ POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
+ if (DISPLAY_VER(dev_priv) >= 9)
+ skl_get_pfit_config(pipe_config);
+ else
+ ilk_get_pfit_config(pipe_config);
+ }
+
+ hsw_ips_get_config(pipe_config);
+
+ if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
+ !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
+ pipe_config->pixel_multiplier =
+ intel_de_read(dev_priv,
+ PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
+ } else {
+ pipe_config->pixel_multiplier = 1;
+ }
+
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
+ tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ?
+ MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) :
+ CHICKEN_TRANS(pipe_config->cpu_transcoder));
+
+ pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
+ } else {
+ /* no idea if this is correct */
+ pipe_config->framestart_delay = 1;
+ }
+
+out:
+ intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
+
+ return active;
+}
+
+bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
+ return false;
+
+ crtc_state->hw.active = true;
+
+ intel_crtc_readout_derived_state(crtc_state);
+
+ return true;
+}
+
+/* VESA 640x480x72Hz mode to set on the pipe */
+static const struct drm_display_mode load_detect_mode = {
+ DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+};
+
+static int intel_modeset_disable_planes(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ int ret, i;
+
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
+ if (plane_state->crtc != crtc)
+ continue;
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+ if (ret)
+ return ret;
+
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+ }
+
+ return 0;
+}
+
+int intel_get_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_encoder *encoder =
+ intel_attached_encoder(to_intel_connector(connector));
+ struct intel_crtc *possible_crtc;
+ struct intel_crtc *crtc = NULL;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_atomic_state *state = NULL, *restore_state = NULL;
+ struct drm_connector_state *connector_state;
+ struct intel_crtc_state *crtc_state;
+ int ret;
+
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, connector->name,
+ encoder->base.base.id, encoder->base.name);
+
+ old->restore_state = NULL;
+
+ drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
+
+ /*
+ * Algorithm gets a little messy:
+ *
+ * - if the connector already has an assigned crtc, use it (but make
+ * sure it's on first)
+ *
+ * - try to find the first unused crtc that can drive this connector,
+ * and use that if we find one
+ */
+
+ /* See if we already have a CRTC for this connector */
+ if (connector->state->crtc) {
+ crtc = to_intel_crtc(connector->state->crtc);
+
+ ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+ if (ret)
+ goto fail;
+
+ /* Make sure the crtc and connector are running */
+ goto found;
+ }
+
+ /* Find an unused one (if possible) */
+ for_each_intel_crtc(dev, possible_crtc) {
+ if (!(encoder->base.possible_crtcs &
+ drm_crtc_mask(&possible_crtc->base)))
+ continue;
+
+ ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
+ if (ret)
+ goto fail;
+
+ if (possible_crtc->base.state->enable) {
+ drm_modeset_unlock(&possible_crtc->base.mutex);
+ continue;
+ }
+
+ crtc = possible_crtc;
+ break;
+ }
+
+ /*
+ * If we didn't find an unused CRTC, don't use any.
+ */
+ if (!crtc) {
+ drm_dbg_kms(&dev_priv->drm,
+ "no pipe available for load-detect\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+found:
+ state = drm_atomic_state_alloc(dev);
+ restore_state = drm_atomic_state_alloc(dev);
+ if (!state || !restore_state) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ state->acquire_ctx = ctx;
+ restore_state->acquire_ctx = ctx;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state)) {
+ ret = PTR_ERR(connector_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
+ if (ret)
+ goto fail;
+
+ crtc_state = intel_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+
+ crtc_state->uapi.active = true;
+
+ ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
+ &load_detect_mode);
+ if (ret)
+ goto fail;
+
+ ret = intel_modeset_disable_planes(state, &crtc->base);
+ if (ret)
+ goto fail;
+
+ ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
+ if (!ret)
+ ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
+ if (!ret)
+ ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to create a copy of old state to restore: %i\n",
+ ret);
+ goto fail;
+ }
+
+ ret = drm_atomic_commit(state);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to set mode on load-detect pipe\n");
+ goto fail;
+ }
+
+ old->restore_state = restore_state;
+ drm_atomic_state_put(state);
+
+ /* let the connector get through one full cycle before testing */
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ return true;
+
+fail:
+ if (state) {
+ drm_atomic_state_put(state);
+ state = NULL;
+ }
+ if (restore_state) {
+ drm_atomic_state_put(restore_state);
+ restore_state = NULL;
+ }
+
+ if (ret == -EDEADLK)
+ return ret;
+
+ return false;
+}
+
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_atomic_state *state = old->restore_state;
+ int ret;
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, connector->name,
+ encoder->base.id, encoder->name);
+
+ if (!state)
+ return;
+
+ ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
+ if (ret)
+ drm_dbg_kms(&i915->drm,
+ "Couldn't release load detect pipe: %i\n", ret);
+ drm_atomic_state_put(state);
+}
+
+static int i9xx_pll_refclk(struct drm_device *dev,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
+
+ if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+ return dev_priv->display.vbt.lvds_ssc_freq;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ return 120000;
+ else if (DISPLAY_VER(dev_priv) != 2)
+ return 96000;
+ else
+ return 48000;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
+ u32 fp;
+ struct dpll clock;
+ int port_clock;
+ int refclk = i9xx_pll_refclk(dev, pipe_config);
+
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = pipe_config->dpll_hw_state.fp0;
+ else
+ fp = pipe_config->dpll_hw_state.fp1;
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+ if (IS_PINEVIEW(dev_priv)) {
+ clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+ clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ } else {
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ }
+
+ if (DISPLAY_VER(dev_priv) != 2) {
+ if (IS_PINEVIEW(dev_priv))
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
+ else
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+ switch (dpll & DPLL_MODE_MASK) {
+ case DPLLB_MODE_DAC_SERIAL:
+ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
+ 5 : 10;
+ break;
+ case DPLLB_MODE_LVDS:
+ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
+ 7 : 14;
+ break;
+ default:
+ drm_dbg_kms(&dev_priv->drm,
+ "Unknown DPLL mode %08x in programmed "
+ "mode\n", (int)(dpll & DPLL_MODE_MASK));
+ return;
+ }
+
+ if (IS_PINEVIEW(dev_priv))
+ port_clock = pnv_calc_dpll_params(refclk, &clock);
+ else
+ port_clock = i9xx_calc_dpll_params(refclk, &clock);
+ } else {
+ enum pipe lvds_pipe;
+
+ if (IS_I85X(dev_priv) &&
+ intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
+ lvds_pipe == crtc->pipe) {
+ u32 lvds = intel_de_read(dev_priv, LVDS);
+
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+ if (lvds & LVDS_CLKB_POWER_UP)
+ clock.p2 = 7;
+ else
+ clock.p2 = 14;
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+ else {
+ clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+ }
+ if (dpll & PLL_P2_DIVIDE_BY_4)
+ clock.p2 = 4;
+ else
+ clock.p2 = 2;
+ }
+
+ port_clock = i9xx_calc_dpll_params(refclk, &clock);
+ }
+
+ /*
+ * This value includes pixel_multiplier. We will use
+ * port_clock to compute adjusted_mode.crtc_clock in the
+ * encoder's get_config() function.
+ */
+ pipe_config->port_clock = port_clock;
+}
+
+int intel_dotclock_calculate(int link_freq,
+ const struct intel_link_m_n *m_n)
+{
+ /*
+ * The calculation for the data clock is:
+ * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
+ * But we want to avoid losing precison if possible, so:
+ * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
+ *
+ * and the link clock is simpler:
+ * link_clock = (m * link_clock) / n
+ */
+
+ if (!m_n->link_n)
+ return 0;
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq),
+ m_n->link_n);
+}
+
+int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
+{
+ int dotclock;
+
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+ else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
+ dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24,
+ pipe_config->pipe_bpp);
+ else
+ dotclock = pipe_config->port_clock;
+
+ if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
+ !intel_crtc_has_dp_encoder(pipe_config))
+ dotclock *= 2;
+
+ if (pipe_config->pixel_multiplier)
+ dotclock /= pipe_config->pixel_multiplier;
+
+ return dotclock;
+}
+
+/* Returns the currently programmed mode of the given encoder. */
+struct drm_display_mode *
+intel_encoder_current_mode(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc_state *crtc_state;
+ struct drm_display_mode *mode;
+ struct intel_crtc *crtc;
+ enum pipe pipe;
+
+ if (!encoder->get_hw_state(encoder, &pipe))
+ return NULL;
+
+ crtc = intel_crtc_for_pipe(dev_priv, pipe);
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ crtc_state = intel_crtc_state_alloc(crtc);
+ if (!crtc_state) {
+ kfree(mode);
+ return NULL;
+ }
+
+ if (!intel_crtc_get_pipe_config(crtc_state)) {
+ kfree(crtc_state);
+ kfree(mode);
+ return NULL;
+ }
+
+ intel_encoder_get_config(encoder, crtc_state);
+
+ intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
+
+ kfree(crtc_state);
+
+ return mode;
+}
+
+static bool encoders_cloneable(const struct intel_encoder *a,
+ const struct intel_encoder *b)
+{
+ /* masks could be asymmetric, so check both ways */
+ return a == b || (a->cloneable & (1 << b->type) &&
+ b->cloneable & (1 << a->type));
+}
+
+static bool check_single_encoder_cloning(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_encoder *source_encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+ if (connector_state->crtc != &crtc->base)
+ continue;
+
+ source_encoder =
+ to_intel_encoder(connector_state->best_encoder);
+ if (!encoders_cloneable(encoder, source_encoder))
+ return false;
+ }
+
+ return true;
+}
+
+static int icl_add_linked_planes(struct intel_atomic_state *state)
+{
+ struct intel_plane *plane, *linked;
+ struct intel_plane_state *plane_state, *linked_plane_state;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ linked = plane_state->planar_linked_plane;
+
+ if (!linked)
+ continue;
+
+ linked_plane_state = intel_atomic_get_plane_state(state, linked);
+ if (IS_ERR(linked_plane_state))
+ return PTR_ERR(linked_plane_state);
+
+ drm_WARN_ON(state->base.dev,
+ linked_plane_state->planar_linked_plane != plane);
+ drm_WARN_ON(state->base.dev,
+ linked_plane_state->planar_slave == plane_state->planar_slave);
+ }
+
+ return 0;
+}
+
+static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_plane *plane, *linked;
+ struct intel_plane_state *plane_state;
+ int i;
+
+ if (DISPLAY_VER(dev_priv) < 11)
+ return 0;
+
+ /*
+ * Destroy all old plane links and make the slave plane invisible
+ * in the crtc_state->active_planes mask.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
+ continue;
+
+ plane_state->planar_linked_plane = NULL;
+ if (plane_state->planar_slave && !plane_state->uapi.visible) {
+ crtc_state->enabled_planes &= ~BIT(plane->id);
+ crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->update_planes |= BIT(plane->id);
+ crtc_state->data_rate[plane->id] = 0;
+ crtc_state->rel_data_rate[plane->id] = 0;
+ }
+
+ plane_state->planar_slave = false;
+ }
+
+ if (!crtc_state->nv12_planes)
+ return 0;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_plane_state *linked_state = NULL;
+
+ if (plane->pipe != crtc->pipe ||
+ !(crtc_state->nv12_planes & BIT(plane->id)))
+ continue;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
+ if (!icl_is_nv12_y_plane(dev_priv, linked->id))
+ continue;
+
+ if (crtc_state->active_planes & BIT(linked->id))
+ continue;
+
+ linked_state = intel_atomic_get_plane_state(state, linked);
+ if (IS_ERR(linked_state))
+ return PTR_ERR(linked_state);
+
+ break;
+ }
+
+ if (!linked_state) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Need %d free Y planes for planar YUV\n",
+ hweight8(crtc_state->nv12_planes));
+
+ return -EINVAL;
+ }
+
+ plane_state->planar_linked_plane = linked;
+
+ linked_state->planar_slave = true;
+ linked_state->planar_linked_plane = plane;
+ crtc_state->enabled_planes |= BIT(linked->id);
+ crtc_state->active_planes |= BIT(linked->id);
+ crtc_state->update_planes |= BIT(linked->id);
+ crtc_state->data_rate[linked->id] =
+ crtc_state->data_rate_y[plane->id];
+ crtc_state->rel_data_rate[linked->id] =
+ crtc_state->rel_data_rate_y[plane->id];
+ drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
+ linked->base.name, plane->base.name);
+
+ /* Copy parameters to slave plane */
+ linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
+ linked_state->color_ctl = plane_state->color_ctl;
+ linked_state->view = plane_state->view;
+ linked_state->decrypt = plane_state->decrypt;
+
+ intel_plane_copy_hw_state(linked_state, plane_state);
+ linked_state->uapi.src = plane_state->uapi.src;
+ linked_state->uapi.dst = plane_state->uapi.dst;
+
+ if (icl_is_hdr_plane(dev_priv, plane->id)) {
+ if (linked->id == PLANE_SPRITE5)
+ plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
+ else if (linked->id == PLANE_SPRITE4)
+ plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
+ else if (linked->id == PLANE_SPRITE3)
+ plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
+ else if (linked->id == PLANE_SPRITE2)
+ plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
+ else
+ MISSING_CASE(linked->id);
+ }
+ }
+
+ return 0;
+}
+
+static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->uapi.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
+}
+
+static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *pipe_mode =
+ &crtc_state->hw.pipe_mode;
+ int linetime_wm;
+
+ if (!crtc_state->hw.enable)
+ return 0;
+
+ linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
+ pipe_mode->crtc_clock);
+
+ return min(linetime_wm, 0x1ff);
+}
+
+static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ const struct drm_display_mode *pipe_mode =
+ &crtc_state->hw.pipe_mode;
+ int linetime_wm;
+
+ if (!crtc_state->hw.enable)
+ return 0;
+
+ linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
+ cdclk_state->logical.cdclk);
+
+ return min(linetime_wm, 0x1ff);
+}
+
+static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_display_mode *pipe_mode =
+ &crtc_state->hw.pipe_mode;
+ int linetime_wm;
+
+ if (!crtc_state->hw.enable)
+ return 0;
+
+ linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
+ crtc_state->pixel_rate);
+
+ /* Display WA #1135: BXT:ALL GLK:ALL */
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ skl_watermark_ipc_enabled(dev_priv))
+ linetime_wm /= 2;
+
+ return min(linetime_wm, 0x1ff);
+}
+
+static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_cdclk_state *cdclk_state;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ crtc_state->linetime = skl_linetime_wm(crtc_state);
+ else
+ crtc_state->linetime = hsw_linetime_wm(crtc_state);
+
+ if (!hsw_crtc_supports_ips(crtc))
+ return 0;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
+ cdclk_state);
+
+ return 0;
+}
+
+static int intel_crtc_atomic_check(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ bool mode_changed = intel_crtc_needs_modeset(crtc_state);
+ int ret;
+
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
+ mode_changed && !crtc_state->hw.active)
+ crtc_state->update_wm_post = true;
+
+ if (mode_changed) {
+ ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * May need to update pipe gamma enable bits
+ * when C8 planes are getting enabled/disabled.
+ */
+ if (c8_planes_changed(crtc_state))
+ crtc_state->uapi.color_mgmt_changed = true;
+
+ if (mode_changed || crtc_state->update_pipe ||
+ crtc_state->uapi.color_mgmt_changed) {
+ ret = intel_color_check(crtc_state);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_compute_pipe_wm(state, crtc);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Target pipe watermarks are invalid\n");
+ return ret;
+ }
+
+ /*
+ * Calculate 'intermediate' watermarks that satisfy both the
+ * old state and the new state. We can program these
+ * immediately.
+ */
+ ret = intel_compute_intermediate_wm(state, crtc);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "No valid intermediate pipe watermarks are possible\n");
+ return ret;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 9) {
+ if (mode_changed || crtc_state->update_pipe) {
+ ret = skl_update_scaler_crtc(crtc_state);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
+ if (ret)
+ return ret;
+ }
+
+ if (HAS_IPS(dev_priv)) {
+ ret = hsw_ips_compute_config(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ ret = hsw_compute_linetime_wm(state, crtc);
+ if (ret)
+ return ret;
+
+ }
+
+ ret = intel_psr2_sel_fetch_update(state, crtc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_connector *connector = conn_state->connector;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_display_info *info = &connector->display_info;
+ int bpp;
+
+ switch (conn_state->max_bpc) {
+ case 6 ... 7:
+ bpp = 6 * 3;
+ break;
+ case 8 ... 9:
+ bpp = 8 * 3;
+ break;
+ case 10 ... 11:
+ bpp = 10 * 3;
+ break;
+ case 12 ... 16:
+ bpp = 12 * 3;
+ break;
+ default:
+ MISSING_CASE(conn_state->max_bpc);
+ return -EINVAL;
+ }
+
+ if (bpp < crtc_state->pipe_bpp) {
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Limiting display bpp to %d "
+ "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
+ connector->base.id, connector->name,
+ bpp, 3 * info->bpc,
+ 3 * conn_state->max_requested_bpc,
+ crtc_state->pipe_bpp);
+
+ crtc_state->pipe_bpp = bpp;
+ }
+
+ return 0;
+}
+
+static int
+compute_baseline_pipe_bpp(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int bpp, i;
+
+ if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)))
+ bpp = 10*3;
+ else if (DISPLAY_VER(dev_priv) >= 5)
+ bpp = 12*3;
+ else
+ bpp = 8*3;
+
+ crtc_state->pipe_bpp = bpp;
+
+ /* Clamp display bpp to connector max bpp */
+ for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+ int ret;
+
+ if (connector_state->crtc != &crtc->base)
+ continue;
+
+ ret = compute_sink_pipe_bpp(connector_state, crtc_state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool check_digital_port_conflicts(struct intel_atomic_state *state)
+{
+ struct drm_device *dev = state->base.dev;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ unsigned int used_ports = 0;
+ unsigned int used_mst_ports = 0;
+ bool ret = true;
+
+ /*
+ * We're going to peek into connector->state,
+ * hence connection_mutex must be held.
+ */
+ drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
+
+ /*
+ * Walk the connector list instead of the encoder
+ * list to detect the problem on ddi platforms
+ * where there's just one encoder per digital port.
+ */
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *connector_state;
+ struct intel_encoder *encoder;
+
+ connector_state =
+ drm_atomic_get_new_connector_state(&state->base,
+ connector);
+ if (!connector_state)
+ connector_state = connector->state;
+
+ if (!connector_state->best_encoder)
+ continue;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
+ drm_WARN_ON(dev, !connector_state->crtc);
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_DDI:
+ if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
+ break;
+ fallthrough;
+ case INTEL_OUTPUT_DP:
+ case INTEL_OUTPUT_HDMI:
+ case INTEL_OUTPUT_EDP:
+ /* the same port mustn't appear more than once */
+ if (used_ports & BIT(encoder->port))
+ ret = false;
+
+ used_ports |= BIT(encoder->port);
+ break;
+ case INTEL_OUTPUT_DP_MST:
+ used_mst_ports |=
+ 1 << encoder->port;
+ break;
+ default:
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ /* can't mix MST and SST/HDMI on the same port */
+ if (used_ports & used_mst_ports)
+ return false;
+
+ return ret;
+}
+
+static void
+intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
+
+ drm_property_replace_blob(&crtc_state->hw.degamma_lut,
+ crtc_state->uapi.degamma_lut);
+ drm_property_replace_blob(&crtc_state->hw.gamma_lut,
+ crtc_state->uapi.gamma_lut);
+ drm_property_replace_blob(&crtc_state->hw.ctm,
+ crtc_state->uapi.ctm);
+}
+
+static void
+intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
+
+ crtc_state->hw.enable = crtc_state->uapi.enable;
+ crtc_state->hw.active = crtc_state->uapi.active;
+ drm_mode_copy(&crtc_state->hw.mode,
+ &crtc_state->uapi.mode);
+ drm_mode_copy(&crtc_state->hw.adjusted_mode,
+ &crtc_state->uapi.adjusted_mode);
+ crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
+
+ intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
+}
+
+static void
+copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state,
+ struct intel_crtc *slave_crtc)
+{
+ struct intel_crtc_state *slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+ struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
+ const struct intel_crtc_state *master_crtc_state =
+ intel_atomic_get_new_crtc_state(state, master_crtc);
+
+ drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut,
+ master_crtc_state->hw.degamma_lut);
+ drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut,
+ master_crtc_state->hw.gamma_lut);
+ drm_property_replace_blob(&slave_crtc_state->hw.ctm,
+ master_crtc_state->hw.ctm);
+
+ slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed;
+}
+
+static int
+copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
+ struct intel_crtc *slave_crtc)
+{
+ struct intel_crtc_state *slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+ struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
+ const struct intel_crtc_state *master_crtc_state =
+ intel_atomic_get_new_crtc_state(state, master_crtc);
+ struct intel_crtc_state *saved_state;
+
+ WARN_ON(master_crtc_state->bigjoiner_pipes !=
+ slave_crtc_state->bigjoiner_pipes);
+
+ saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL);
+ if (!saved_state)
+ return -ENOMEM;
+
+ /* preserve some things from the slave's original crtc state */
+ saved_state->uapi = slave_crtc_state->uapi;
+ saved_state->scaler_state = slave_crtc_state->scaler_state;
+ saved_state->shared_dpll = slave_crtc_state->shared_dpll;
+ saved_state->crc_enabled = slave_crtc_state->crc_enabled;
+
+ intel_crtc_free_hw_state(slave_crtc_state);
+ memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state));
+ kfree(saved_state);
+
+ /* Re-init hw state */
+ memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw));
+ slave_crtc_state->hw.enable = master_crtc_state->hw.enable;
+ slave_crtc_state->hw.active = master_crtc_state->hw.active;
+ drm_mode_copy(&slave_crtc_state->hw.mode,
+ &master_crtc_state->hw.mode);
+ drm_mode_copy(&slave_crtc_state->hw.pipe_mode,
+ &master_crtc_state->hw.pipe_mode);
+ drm_mode_copy(&slave_crtc_state->hw.adjusted_mode,
+ &master_crtc_state->hw.adjusted_mode);
+ slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
+
+ copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
+
+ slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
+ slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed;
+ slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed;
+
+ WARN_ON(master_crtc_state->bigjoiner_pipes !=
+ slave_crtc_state->bigjoiner_pipes);
+
+ return 0;
+}
+
+static int
+intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *saved_state;
+
+ saved_state = intel_crtc_state_alloc(crtc);
+ if (!saved_state)
+ return -ENOMEM;
+
+ /* free the old crtc_state->hw members */
+ intel_crtc_free_hw_state(crtc_state);
+
+ /* FIXME: before the switch to atomic started, a new pipe_config was
+ * kzalloc'd. Code that depends on any field being zero should be
+ * fixed, so that the crtc_state can be safely duplicated. For now,
+ * only fields that are know to not cause problems are preserved. */
+
+ saved_state->uapi = crtc_state->uapi;
+ saved_state->inherited = crtc_state->inherited;
+ saved_state->scaler_state = crtc_state->scaler_state;
+ saved_state->shared_dpll = crtc_state->shared_dpll;
+ saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
+ memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
+ sizeof(saved_state->icl_port_dplls));
+ saved_state->crc_enabled = crtc_state->crc_enabled;
+ if (IS_G4X(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ saved_state->wm = crtc_state->wm;
+
+ memcpy(crtc_state, saved_state, sizeof(*crtc_state));
+ kfree(saved_state);
+
+ intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc);
+
+ return 0;
+}
+
+static int
+intel_modeset_pipe_config(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int pipe_src_w, pipe_src_h;
+ int base_bpp, ret, i;
+ bool retry = true;
+
+ crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe;
+
+ crtc_state->framestart_delay = 1;
+
+ /*
+ * Sanitize sync polarity flags based on requested ones. If neither
+ * positive or negative polarity is requested, treat this as meaning
+ * negative polarity.
+ */
+ if (!(crtc_state->hw.adjusted_mode.flags &
+ (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
+ crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (!(crtc_state->hw.adjusted_mode.flags &
+ (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
+ crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+
+ ret = compute_baseline_pipe_bpp(state, crtc);
+ if (ret)
+ return ret;
+
+ base_bpp = crtc_state->pipe_bpp;
+
+ /*
+ * Determine the real pipe dimensions. Note that stereo modes can
+ * increase the actual pipe size due to the frame doubling and
+ * insertion of additional space for blanks between the frame. This
+ * is stored in the crtc timings. We use the requested mode to do this
+ * computation to clearly distinguish it from the adjusted mode, which
+ * can be changed by the connectors in the below retry loop.
+ */
+ drm_mode_get_hv_timing(&crtc_state->hw.mode,
+ &pipe_src_w, &pipe_src_h);
+ drm_rect_init(&crtc_state->pipe_src, 0, 0,
+ pipe_src_w, pipe_src_h);
+
+ for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(connector_state->best_encoder);
+
+ if (connector_state->crtc != &crtc->base)
+ continue;
+
+ if (!check_single_encoder_cloning(state, crtc, encoder)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] rejecting invalid cloning configuration\n",
+ encoder->base.base.id, encoder->base.name);
+ return -EINVAL;
+ }
+
+ /*
+ * Determine output_types before calling the .compute_config()
+ * hooks so that the hooks can use this information safely.
+ */
+ if (encoder->compute_output_type)
+ crtc_state->output_types |=
+ BIT(encoder->compute_output_type(encoder, crtc_state,
+ connector_state));
+ else
+ crtc_state->output_types |= BIT(encoder->type);
+ }
+
+encoder_retry:
+ /* Ensure the port clock defaults are reset when retrying. */
+ crtc_state->port_clock = 0;
+ crtc_state->pixel_multiplier = 1;
+
+ /* Fill in default crtc timings, allow encoders to overwrite them. */
+ drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode,
+ CRTC_STEREO_DOUBLE);
+
+ /* Pass our mode to the connectors and the CRTC to give them a chance to
+ * adjust it according to limitations or connector properties, and also
+ * a chance to reject the mode entirely.
+ */
+ for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(connector_state->best_encoder);
+
+ if (connector_state->crtc != &crtc->base)
+ continue;
+
+ ret = encoder->compute_config(encoder, crtc_state,
+ connector_state);
+ if (ret == -EDEADLK)
+ return ret;
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n",
+ encoder->base.base.id, encoder->base.name, ret);
+ return ret;
+ }
+ }
+
+ /* Set default port clock if not overwritten by the encoder. Needs to be
+ * done afterwards in case the encoder adjusts the mode. */
+ if (!crtc_state->port_clock)
+ crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock
+ * crtc_state->pixel_multiplier;
+
+ ret = intel_crtc_compute_config(state, crtc);
+ if (ret == -EDEADLK)
+ return ret;
+ if (ret == -EAGAIN) {
+ if (drm_WARN(&i915->drm, !retry,
+ "[CRTC:%d:%s] loop in pipe configuration computation\n",
+ crtc->base.base.id, crtc->base.name))
+ return -EINVAL;
+
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] bw constrained, retrying\n",
+ crtc->base.base.id, crtc->base.name);
+ retry = false;
+ goto encoder_retry;
+ }
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n",
+ crtc->base.base.id, crtc->base.name, ret);
+ return ret;
+ }
+
+ /* Dithering seems to not pass-through bits correctly when it should, so
+ * only enable it on 6bpc panels and when its not a compliance
+ * test requesting 6bpc video pattern.
+ */
+ crtc_state->dither = (crtc_state->pipe_bpp == 6*3) &&
+ !crtc_state->dither_force_disable;
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
+ crtc->base.base.id, crtc->base.name,
+ base_bpp, crtc_state->pipe_bpp, crtc_state->dither);
+
+ return 0;
+}
+
+static int
+intel_modeset_pipe_config_late(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ int i;
+
+ intel_bigjoiner_adjust_pipe_src(crtc_state);
+
+ for_each_new_connector_in_state(&state->base, connector,
+ conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ int ret;
+
+ if (conn_state->crtc != &crtc->base ||
+ !encoder->compute_config_late)
+ continue;
+
+ ret = encoder->compute_config_late(encoder, crtc_state,
+ conn_state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+bool intel_fuzzy_clock_check(int clock1, int clock2)
+{
+ int diff;
+
+ if (clock1 == clock2)
+ return true;
+
+ if (!clock1 || !clock2)
+ return false;
+
+ diff = abs(clock1 - clock2);
+
+ if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
+ return true;
+
+ return false;
+}
+
+static bool
+intel_compare_link_m_n(const struct intel_link_m_n *m_n,
+ const struct intel_link_m_n *m2_n2)
+{
+ return m_n->tu == m2_n2->tu &&
+ m_n->data_m == m2_n2->data_m &&
+ m_n->data_n == m2_n2->data_n &&
+ m_n->link_m == m2_n2->link_m &&
+ m_n->link_n == m2_n2->link_n;
+}
+
+static bool
+intel_compare_infoframe(const union hdmi_infoframe *a,
+ const union hdmi_infoframe *b)
+{
+ return memcmp(a, b, sizeof(*a)) == 0;
+}
+
+static bool
+intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
+ const struct drm_dp_vsc_sdp *b)
+{
+ return memcmp(a, b, sizeof(*a)) == 0;
+}
+
+static void
+pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
+ bool fastset, const char *name,
+ const union hdmi_infoframe *a,
+ const union hdmi_infoframe *b)
+{
+ if (fastset) {
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "fastset mismatch in %s infoframe\n", name);
+ drm_dbg_kms(&dev_priv->drm, "expected:\n");
+ hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
+ drm_dbg_kms(&dev_priv->drm, "found:\n");
+ hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
+ } else {
+ drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
+ drm_err(&dev_priv->drm, "expected:\n");
+ hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
+ drm_err(&dev_priv->drm, "found:\n");
+ hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
+ }
+}
+
+static void
+pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
+ bool fastset, const char *name,
+ const struct drm_dp_vsc_sdp *a,
+ const struct drm_dp_vsc_sdp *b)
+{
+ if (fastset) {
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "fastset mismatch in %s dp sdp\n", name);
+ drm_dbg_kms(&dev_priv->drm, "expected:\n");
+ drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
+ drm_dbg_kms(&dev_priv->drm, "found:\n");
+ drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
+ } else {
+ drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
+ drm_err(&dev_priv->drm, "expected:\n");
+ drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
+ drm_err(&dev_priv->drm, "found:\n");
+ drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
+ }
+}
+
+static void __printf(4, 5)
+pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
+ const char *name, const char *format, ...)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ if (fastset)
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
+ else
+ drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
+
+ va_end(args);
+}
+
+static bool fastboot_enabled(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->params.fastboot != -1)
+ return dev_priv->params.fastboot;
+
+ /* Enable fastboot by default on Skylake and newer */
+ if (DISPLAY_VER(dev_priv) >= 9)
+ return true;
+
+ /* Enable fastboot by default on VLV and CHV */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return true;
+
+ /* Disabled by default on all others */
+ return false;
+}
+
+bool
+intel_pipe_config_compare(const struct intel_crtc_state *current_config,
+ const struct intel_crtc_state *pipe_config,
+ bool fastset)
+{
+ struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ bool ret = true;
+ u32 bp_gamma = 0;
+ bool fixup_inherited = fastset &&
+ current_config->inherited && !pipe_config->inherited;
+
+ if (fixup_inherited && !fastboot_enabled(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "initial modeset and fastboot not set\n");
+ ret = false;
+ }
+
+#define PIPE_CONF_CHECK_X(name) do { \
+ if (current_config->name != pipe_config->name) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected 0x%08x, found 0x%08x)", \
+ current_config->name, \
+ pipe_config->name); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
+ if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected 0x%08x, found 0x%08x)", \
+ current_config->name & (mask), \
+ pipe_config->name & (mask)); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_I(name) do { \
+ if (current_config->name != pipe_config->name) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %i, found %i)", \
+ current_config->name, \
+ pipe_config->name); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_BOOL(name) do { \
+ if (current_config->name != pipe_config->name) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %s, found %s)", \
+ str_yes_no(current_config->name), \
+ str_yes_no(pipe_config->name)); \
+ ret = false; \
+ } \
+} while (0)
+
+/*
+ * Checks state where we only read out the enabling, but not the entire
+ * state itself (like full infoframes or ELD for audio). These states
+ * require a full modeset on bootup to fix up.
+ */
+#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
+ if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
+ PIPE_CONF_CHECK_BOOL(name); \
+ } else { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
+ str_yes_no(current_config->name), \
+ str_yes_no(pipe_config->name)); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_P(name) do { \
+ if (current_config->name != pipe_config->name) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %p, found %p)", \
+ current_config->name, \
+ pipe_config->name); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_M_N(name) do { \
+ if (!intel_compare_link_m_n(&current_config->name, \
+ &pipe_config->name)) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected tu %i data %i/%i link %i/%i, " \
+ "found tu %i, data %i/%i link %i/%i)", \
+ current_config->name.tu, \
+ current_config->name.data_m, \
+ current_config->name.data_n, \
+ current_config->name.link_m, \
+ current_config->name.link_n, \
+ pipe_config->name.tu, \
+ pipe_config->name.data_m, \
+ pipe_config->name.data_n, \
+ pipe_config->name.link_m, \
+ pipe_config->name.link_n); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_TIMINGS(name) do { \
+ PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
+ PIPE_CONF_CHECK_I(name.crtc_htotal); \
+ PIPE_CONF_CHECK_I(name.crtc_hblank_start); \
+ PIPE_CONF_CHECK_I(name.crtc_hblank_end); \
+ PIPE_CONF_CHECK_I(name.crtc_hsync_start); \
+ PIPE_CONF_CHECK_I(name.crtc_hsync_end); \
+ PIPE_CONF_CHECK_I(name.crtc_vdisplay); \
+ PIPE_CONF_CHECK_I(name.crtc_vtotal); \
+ PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
+ PIPE_CONF_CHECK_I(name.crtc_vblank_end); \
+ PIPE_CONF_CHECK_I(name.crtc_vsync_start); \
+ PIPE_CONF_CHECK_I(name.crtc_vsync_end); \
+} while (0)
+
+#define PIPE_CONF_CHECK_RECT(name) do { \
+ PIPE_CONF_CHECK_I(name.x1); \
+ PIPE_CONF_CHECK_I(name.x2); \
+ PIPE_CONF_CHECK_I(name.y1); \
+ PIPE_CONF_CHECK_I(name.y2); \
+} while (0)
+
+/* This is required for BDW+ where there is only one set of registers for
+ * switching between high and low RR.
+ * This macro can be used whenever a comparison has to be made between one
+ * hw state and multiple sw state variables.
+ */
+#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
+ if (!intel_compare_link_m_n(&current_config->name, \
+ &pipe_config->name) && \
+ !intel_compare_link_m_n(&current_config->alt_name, \
+ &pipe_config->name)) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected tu %i data %i/%i link %i/%i, " \
+ "or tu %i data %i/%i link %i/%i, " \
+ "found tu %i, data %i/%i link %i/%i)", \
+ current_config->name.tu, \
+ current_config->name.data_m, \
+ current_config->name.data_n, \
+ current_config->name.link_m, \
+ current_config->name.link_n, \
+ current_config->alt_name.tu, \
+ current_config->alt_name.data_m, \
+ current_config->alt_name.data_n, \
+ current_config->alt_name.link_m, \
+ current_config->alt_name.link_n, \
+ pipe_config->name.tu, \
+ pipe_config->name.data_m, \
+ pipe_config->name.data_n, \
+ pipe_config->name.link_m, \
+ pipe_config->name.link_n); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
+ if ((current_config->name ^ pipe_config->name) & (mask)) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(%x) (expected %i, found %i)", \
+ (mask), \
+ current_config->name & (mask), \
+ pipe_config->name & (mask)); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
+ if (!intel_compare_infoframe(&current_config->infoframes.name, \
+ &pipe_config->infoframes.name)) { \
+ pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
+ &current_config->infoframes.name, \
+ &pipe_config->infoframes.name); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
+ if (!current_config->has_psr && !pipe_config->has_psr && \
+ !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
+ &pipe_config->infoframes.name)) { \
+ pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
+ &current_config->infoframes.name, \
+ &pipe_config->infoframes.name); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
+ if (current_config->name1 != pipe_config->name1) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name1), \
+ "(expected %i, found %i, won't compare lut values)", \
+ current_config->name1, \
+ pipe_config->name1); \
+ ret = false;\
+ } else { \
+ if (!intel_color_lut_equal(current_config->name2, \
+ pipe_config->name2, pipe_config->name1, \
+ bit_precision)) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name2), \
+ "hw_state doesn't match sw_state"); \
+ ret = false; \
+ } \
+ } \
+} while (0)
+
+#define PIPE_CONF_QUIRK(quirk) \
+ ((current_config->quirks | pipe_config->quirks) & (quirk))
+
+ PIPE_CONF_CHECK_I(hw.enable);
+ PIPE_CONF_CHECK_I(hw.active);
+
+ PIPE_CONF_CHECK_I(cpu_transcoder);
+ PIPE_CONF_CHECK_I(mst_master_transcoder);
+
+ PIPE_CONF_CHECK_BOOL(has_pch_encoder);
+ PIPE_CONF_CHECK_I(fdi_lanes);
+ PIPE_CONF_CHECK_M_N(fdi_m_n);
+
+ PIPE_CONF_CHECK_I(lane_count);
+ PIPE_CONF_CHECK_X(lane_lat_optim_mask);
+
+ if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
+ if (!fastset || !pipe_config->seamless_m_n)
+ PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
+ } else {
+ PIPE_CONF_CHECK_M_N(dp_m_n);
+ PIPE_CONF_CHECK_M_N(dp_m2_n2);
+ }
+
+ PIPE_CONF_CHECK_X(output_types);
+
+ PIPE_CONF_CHECK_I(framestart_delay);
+ PIPE_CONF_CHECK_I(msa_timing_delay);
+
+ PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode);
+ PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode);
+
+ PIPE_CONF_CHECK_I(pixel_multiplier);
+
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
+ DRM_MODE_FLAG_INTERLACE);
+
+ if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
+ DRM_MODE_FLAG_PHSYNC);
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
+ DRM_MODE_FLAG_NHSYNC);
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
+ DRM_MODE_FLAG_PVSYNC);
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
+ DRM_MODE_FLAG_NVSYNC);
+ }
+
+ PIPE_CONF_CHECK_I(output_format);
+ PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
+ if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ PIPE_CONF_CHECK_BOOL(limited_color_range);
+
+ PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
+ PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
+ PIPE_CONF_CHECK_BOOL(has_infoframe);
+ PIPE_CONF_CHECK_BOOL(fec_enable);
+
+ PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
+
+ PIPE_CONF_CHECK_X(gmch_pfit.control);
+ /* pfit ratios are autocomputed by the hw on gen4+ */
+ if (DISPLAY_VER(dev_priv) < 4)
+ PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
+ PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
+
+ /*
+ * Changing the EDP transcoder input mux
+ * (A_ONOFF vs. A_ON) requires a full modeset.
+ */
+ PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
+
+ if (!fastset) {
+ PIPE_CONF_CHECK_RECT(pipe_src);
+
+ PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
+ PIPE_CONF_CHECK_RECT(pch_pfit.dst);
+
+ PIPE_CONF_CHECK_I(scaler_state.scaler_id);
+ PIPE_CONF_CHECK_I(pixel_rate);
+
+ PIPE_CONF_CHECK_X(gamma_mode);
+ if (IS_CHERRYVIEW(dev_priv))
+ PIPE_CONF_CHECK_X(cgm_mode);
+ else
+ PIPE_CONF_CHECK_X(csc_mode);
+ PIPE_CONF_CHECK_BOOL(gamma_enable);
+ PIPE_CONF_CHECK_BOOL(csc_enable);
+
+ PIPE_CONF_CHECK_I(linetime);
+ PIPE_CONF_CHECK_I(ips_linetime);
+
+ bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
+ if (bp_gamma)
+ PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
+
+ if (current_config->active_planes) {
+ PIPE_CONF_CHECK_BOOL(has_psr);
+ PIPE_CONF_CHECK_BOOL(has_psr2);
+ PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
+ PIPE_CONF_CHECK_I(dc3co_exitline);
+ }
+ }
+
+ PIPE_CONF_CHECK_BOOL(double_wide);
+
+ if (dev_priv->display.dpll.mgr) {
+ PIPE_CONF_CHECK_P(shared_dpll);
+
+ PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
+ PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
+ PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
+ PIPE_CONF_CHECK_X(dpll_hw_state.spll);
+ PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
+ PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.div0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
+ PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
+ }
+
+ PIPE_CONF_CHECK_X(dsi_pll.ctrl);
+ PIPE_CONF_CHECK_X(dsi_pll.div);
+
+ if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
+ PIPE_CONF_CHECK_I(pipe_bpp);
+
+ if (!fastset || !pipe_config->seamless_m_n) {
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
+ }
+ PIPE_CONF_CHECK_I(port_clock);
+
+ PIPE_CONF_CHECK_I(min_voltage_level);
+
+ if (current_config->has_psr || pipe_config->has_psr)
+ PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
+ ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
+ else
+ PIPE_CONF_CHECK_X(infoframes.enable);
+
+ PIPE_CONF_CHECK_X(infoframes.gcp);
+ PIPE_CONF_CHECK_INFOFRAME(avi);
+ PIPE_CONF_CHECK_INFOFRAME(spd);
+ PIPE_CONF_CHECK_INFOFRAME(hdmi);
+ PIPE_CONF_CHECK_INFOFRAME(drm);
+ PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
+
+ PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
+ PIPE_CONF_CHECK_I(master_transcoder);
+ PIPE_CONF_CHECK_X(bigjoiner_pipes);
+
+ PIPE_CONF_CHECK_I(dsc.compression_enable);
+ PIPE_CONF_CHECK_I(dsc.dsc_split);
+ PIPE_CONF_CHECK_I(dsc.compressed_bpp);
+
+ PIPE_CONF_CHECK_BOOL(splitter.enable);
+ PIPE_CONF_CHECK_I(splitter.link_count);
+ PIPE_CONF_CHECK_I(splitter.pixel_overlap);
+
+ PIPE_CONF_CHECK_BOOL(vrr.enable);
+ PIPE_CONF_CHECK_I(vrr.vmin);
+ PIPE_CONF_CHECK_I(vrr.vmax);
+ PIPE_CONF_CHECK_I(vrr.flipline);
+ PIPE_CONF_CHECK_I(vrr.pipeline_full);
+ PIPE_CONF_CHECK_I(vrr.guardband);
+
+#undef PIPE_CONF_CHECK_X
+#undef PIPE_CONF_CHECK_I
+#undef PIPE_CONF_CHECK_BOOL
+#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
+#undef PIPE_CONF_CHECK_P
+#undef PIPE_CONF_CHECK_FLAGS
+#undef PIPE_CONF_CHECK_COLOR_LUT
+#undef PIPE_CONF_CHECK_TIMINGS
+#undef PIPE_CONF_CHECK_RECT
+#undef PIPE_CONF_QUIRK
+
+ return ret;
+}
+
+static void
+intel_verify_planes(struct intel_atomic_state *state)
+{
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane,
+ plane_state, i)
+ assert_plane(plane, plane_state->planar_slave ||
+ plane_state->uapi.visible);
+}
+
+int intel_modeset_all_pipes(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ /*
+ * Add all pipes to the state, and force
+ * a modeset on all the active ones.
+ */
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+ int ret;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (!crtc_state->hw.active ||
+ drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
+ continue;
+
+ crtc_state->uapi.mode_changed = true;
+
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ &crtc->base);
+ if (ret)
+ return ret;
+
+ ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = intel_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+
+ crtc_state->update_planes |= crtc_state->active_planes;
+ }
+
+ return 0;
+}
+
+void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_display_mode adjusted_mode;
+
+ drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
+
+ if (crtc_state->vrr.enable) {
+ adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
+ adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
+ crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+ }
+
+ drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
+
+ crtc->mode_flags = crtc_state->mode_flags;
+
+ /*
+ * The scanline counter increments at the leading edge of hsync.
+ *
+ * On most platforms it starts counting from vtotal-1 on the
+ * first active line. That means the scanline counter value is
+ * always one less than what we would expect. Ie. just after
+ * start of vblank, which also occurs at start of hsync (on the
+ * last active line), the scanline counter will read vblank_start-1.
+ *
+ * On gen2 the scanline counter starts counting from 1 instead
+ * of vtotal-1, so we have to subtract one (or rather add vtotal-1
+ * to keep the value positive), instead of adding one.
+ *
+ * On HSW+ the behaviour of the scanline counter depends on the output
+ * type. For DP ports it behaves like most other platforms, but on HDMI
+ * there's an extra 1 line difference. So we need to add two instead of
+ * one to the value.
+ *
+ * On VLV/CHV DSI the scanline counter would appear to increment
+ * approx. 1/3 of a scanline before start of vblank. Unfortunately
+ * that means we can't tell whether we're in vblank or not while
+ * we're on that particular line. We must still set scanline_offset
+ * to 1 so that the vblank timestamps come out correct when we query
+ * the scanline counter from within the vblank interrupt handler.
+ * However if queried just before the start of vblank we'll get an
+ * answer that's slightly in the future.
+ */
+ if (DISPLAY_VER(dev_priv) == 2) {
+ int vtotal;
+
+ vtotal = adjusted_mode.crtc_vtotal;
+ if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal /= 2;
+
+ crtc->scanline_offset = vtotal - 1;
+ } else if (HAS_DDI(dev_priv) &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ crtc->scanline_offset = 2;
+ } else {
+ crtc->scanline_offset = 1;
+ }
+}
+
+/*
+ * This implements the workaround described in the "notes" section of the mode
+ * set sequence documentation. When going from no pipes or single pipe to
+ * multiple pipes, and planes are enabled after the pipe, we need to wait at
+ * least 2 vblanks on the first pipe before enabling planes on the second pipe.
+ */
+static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *first_crtc_state = NULL;
+ struct intel_crtc_state *other_crtc_state = NULL;
+ enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
+ int i;
+
+ /* look at all crtc's that are going to be enabled in during modeset */
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!crtc_state->hw.active ||
+ !intel_crtc_needs_modeset(crtc_state))
+ continue;
+
+ if (first_crtc_state) {
+ other_crtc_state = crtc_state;
+ break;
+ } else {
+ first_crtc_state = crtc_state;
+ first_pipe = crtc->pipe;
+ }
+ }
+
+ /* No workaround needed? */
+ if (!first_crtc_state)
+ return 0;
+
+ /* w/a possibly needed, check how many crtc's are already enabled. */
+ for_each_intel_crtc(state->base.dev, crtc) {
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->hsw_workaround_pipe = INVALID_PIPE;
+
+ if (!crtc_state->hw.active ||
+ intel_crtc_needs_modeset(crtc_state))
+ continue;
+
+ /* 2 or more enabled crtcs means no need for w/a */
+ if (enabled_pipe != INVALID_PIPE)
+ return 0;
+
+ enabled_pipe = crtc->pipe;
+ }
+
+ if (enabled_pipe != INVALID_PIPE)
+ first_crtc_state->hsw_workaround_pipe = enabled_pipe;
+ else if (other_crtc_state)
+ other_crtc_state->hsw_workaround_pipe = first_pipe;
+
+ return 0;
+}
+
+u8 intel_calc_active_pipes(struct intel_atomic_state *state,
+ u8 active_pipes)
+{
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->hw.active)
+ active_pipes |= BIT(crtc->pipe);
+ else
+ active_pipes &= ~BIT(crtc->pipe);
+ }
+
+ return active_pipes;
+}
+
+static int intel_modeset_checks(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ state->modeset = true;
+
+ if (IS_HASWELL(dev_priv))
+ return hsw_mode_set_planes_workaround(state);
+
+ return 0;
+}
+
+static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
+ return;
+
+ new_crtc_state->uapi.mode_changed = false;
+ new_crtc_state->update_pipe = true;
+}
+
+static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ u8 plane_ids_mask)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+
+ if ((plane_ids_mask & BIT(plane->id)) == 0)
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
+}
+
+int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ return intel_crtc_add_planes_to_state(state, crtc,
+ old_crtc_state->enabled_planes |
+ new_crtc_state->enabled_planes);
+}
+
+static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
+{
+ /* See {hsw,vlv,ivb}_plane_ratio() */
+ return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv);
+}
+
+static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_crtc *other)
+{
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ u8 plane_ids = 0;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->pipe == crtc->pipe)
+ plane_ids |= BIT(plane->id);
+ }
+
+ return intel_crtc_add_planes_to_state(state, other, plane_ids);
+}
+
+static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ struct intel_crtc *other;
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
+ crtc_state->bigjoiner_pipes) {
+ int ret;
+
+ if (crtc == other)
+ continue;
+
+ ret = intel_crtc_add_bigjoiner_planes(state, crtc, other);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int intel_atomic_check_planes(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+ int i, ret;
+
+ ret = icl_add_linked_planes(state);
+ if (ret)
+ return ret;
+
+ ret = intel_bigjoiner_add_affected_planes(state);
+ if (ret)
+ return ret;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ ret = intel_plane_atomic_check(state, plane);
+ if (ret) {
+ drm_dbg_atomic(&dev_priv->drm,
+ "[PLANE:%d:%s] atomic driver check failed\n",
+ plane->base.base.id, plane->base.name);
+ return ret;
+ }
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ u8 old_active_planes, new_active_planes;
+
+ ret = icl_check_nv12_planes(new_crtc_state);
+ if (ret)
+ return ret;
+
+ /*
+ * On some platforms the number of active planes affects
+ * the planes' minimum cdclk calculation. Add such planes
+ * to the state before we compute the minimum cdclk.
+ */
+ if (!active_planes_affects_min_cdclk(dev_priv))
+ continue;
+
+ old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+
+ if (hweight8(old_active_planes) == hweight8(new_active_planes))
+ continue;
+
+ ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ int ret;
+
+ ret = intel_crtc_atomic_check(state, crtc);
+ if (ret) {
+ drm_dbg_atomic(&i915->drm,
+ "[CRTC:%d:%s] atomic driver check failed\n",
+ crtc->base.base.id, crtc->base.name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
+ u8 transcoders)
+{
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->hw.enable &&
+ transcoders & BIT(new_crtc_state->cpu_transcoder) &&
+ intel_crtc_needs_modeset(new_crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
+ u8 pipes)
+{
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->hw.enable &&
+ pipes & BIT(crtc->pipe) &&
+ intel_crtc_needs_modeset(new_crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
+static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
+ struct intel_crtc *master_crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *master_crtc_state =
+ intel_atomic_get_new_crtc_state(state, master_crtc);
+ struct intel_crtc *slave_crtc;
+
+ if (!master_crtc_state->bigjoiner_pipes)
+ return 0;
+
+ /* sanity check */
+ if (drm_WARN_ON(&i915->drm,
+ master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state)))
+ return -EINVAL;
+
+ if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] Cannot act as big joiner master "
+ "(need 0x%x as pipes, only 0x%x possible)\n",
+ master_crtc->base.base.id, master_crtc->base.name,
+ master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915));
+ return -EINVAL;
+ }
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
+ struct intel_crtc_state *slave_crtc_state;
+ int ret;
+
+ slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
+ if (IS_ERR(slave_crtc_state))
+ return PTR_ERR(slave_crtc_state);
+
+ /* master being enabled, slave was already configured? */
+ if (slave_crtc_state->uapi.enable) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
+ "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
+ slave_crtc->base.base.id, slave_crtc->base.name,
+ master_crtc->base.base.id, master_crtc->base.name);
+ return -EINVAL;
+ }
+
+ /*
+ * The state copy logic assumes the master crtc gets processed
+ * before the slave crtc during the main compute_config loop.
+ * This works because the crtcs are created in pipe order,
+ * and the hardware requires master pipe < slave pipe as well.
+ * Should that change we need to rethink the logic.
+ */
+ if (WARN_ON(drm_crtc_index(&master_crtc->base) >
+ drm_crtc_index(&slave_crtc->base)))
+ return -EINVAL;
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n",
+ slave_crtc->base.base.id, slave_crtc->base.name,
+ master_crtc->base.base.id, master_crtc->base.name);
+
+ slave_crtc_state->bigjoiner_pipes =
+ master_crtc_state->bigjoiner_pipes;
+
+ ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void kill_bigjoiner_slave(struct intel_atomic_state *state,
+ struct intel_crtc *master_crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *master_crtc_state =
+ intel_atomic_get_new_crtc_state(state, master_crtc);
+ struct intel_crtc *slave_crtc;
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
+ struct intel_crtc_state *slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+
+ slave_crtc_state->bigjoiner_pipes = 0;
+
+ intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc);
+ }
+
+ master_crtc_state->bigjoiner_pipes = 0;
+}
+
+/**
+ * DOC: asynchronous flip implementation
+ *
+ * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
+ * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
+ * Correspondingly, support is currently added for primary plane only.
+ *
+ * Async flip can only change the plane surface address, so anything else
+ * changing is rejected from the intel_async_flip_check_hw() function.
+ * Once this check is cleared, flip done interrupt is enabled using
+ * the intel_crtc_enable_flip_done() function.
+ *
+ * As soon as the surface address register is written, flip done interrupt is
+ * generated and the requested events are sent to the usersapce in the interrupt
+ * handler itself. The timestamp and sequence sent during the flip done event
+ * correspond to the last vblank and have no relation to the actual time when
+ * the flip done event was sent.
+ */
+static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *old_plane_state;
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ if (!new_crtc_state->uapi.async_flip)
+ return 0;
+
+ if (!new_crtc_state->uapi.active) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] not active\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+
+ if (intel_crtc_needs_modeset(new_crtc_state)) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] modeset required\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+
+ /*
+ * FIXME: Bigjoiner+async flip is busted currently.
+ * Remove this check once the issues are fixed.
+ */
+ if (new_crtc_state->bigjoiner_pipes) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] async flip disallowed with bigjoiner\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i) {
+ if (plane->pipe != crtc->pipe)
+ continue;
+
+ /*
+ * TODO: Async flip is only supported through the page flip IOCTL
+ * as of now. So support currently added for primary plane only.
+ * Support for other planes on platforms on which supports
+ * this(vlv/chv and icl+) should be added when async flip is
+ * enabled in the atomic IOCTL path.
+ */
+ if (!plane->async_flip) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] async flip not supported\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
+ if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] no old or new framebuffer\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ const struct intel_plane_state *new_plane_state, *old_plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!new_crtc_state->uapi.async_flip)
+ return 0;
+
+ if (!new_crtc_state->hw.active) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] not active\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+
+ if (intel_crtc_needs_modeset(new_crtc_state)) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] modeset required\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+
+ if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] Active planes cannot be in async flip\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i) {
+ if (plane->pipe != crtc->pipe)
+ continue;
+
+ /*
+ * Only async flip capable planes should be in the state
+ * if we're really about to ask the hardware to perform
+ * an async flip. We should never get this far otherwise.
+ */
+ if (drm_WARN_ON(&i915->drm,
+ new_crtc_state->do_async_flip && !plane->async_flip))
+ return -EINVAL;
+
+ /*
+ * Only check async flip capable planes other planes
+ * may be involved in the initial commit due to
+ * the wm0/ddb optimization.
+ *
+ * TODO maybe should track which planes actually
+ * were requested to do the async flip...
+ */
+ if (!plane->async_flip)
+ continue;
+
+ /*
+ * FIXME: This check is kept generic for all platforms.
+ * Need to verify this for all gen9 platforms to enable
+ * this selectively if required.
+ */
+ switch (new_plane_state->hw.fb->modifier) {
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ case I915_FORMAT_MOD_4_TILED:
+ break;
+ default:
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] Modifier does not support async flips\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
+ if (new_plane_state->hw.fb->format->num_planes > 1) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] Planar formats do not support async flips\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
+ if (old_plane_state->view.color_plane[0].mapping_stride !=
+ new_plane_state->view.color_plane[0].mapping_stride) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] Stride cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
+ if (old_plane_state->hw.fb->modifier !=
+ new_plane_state->hw.fb->modifier) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
+ if (old_plane_state->hw.fb->format !=
+ new_plane_state->hw.fb->format) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
+ if (old_plane_state->hw.rotation !=
+ new_plane_state->hw.rotation) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
+ if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
+ !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
+ if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
+ if (old_plane_state->hw.pixel_blend_mode !=
+ new_plane_state->hw.pixel_blend_mode) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
+ if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
+ if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] Color range cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+
+ /* plane decryption is allow to change only in synchronous flips */
+ if (old_plane_state->decrypt != new_plane_state->decrypt) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
+ plane->base.base.id, plane->base.name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ u8 affected_pipes = 0;
+ u8 modeset_pipes = 0;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ affected_pipes |= crtc_state->bigjoiner_pipes;
+ if (intel_crtc_needs_modeset(crtc_state))
+ modeset_pipes |= crtc_state->bigjoiner_pipes;
+ }
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
+ int ret;
+
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ crtc_state->uapi.mode_changed = true;
+
+ ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
+ if (ret)
+ return ret;
+
+ ret = intel_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ /* Kill old bigjoiner link, we may re-establish afterwards */
+ if (intel_crtc_needs_modeset(crtc_state) &&
+ intel_crtc_is_bigjoiner_master(crtc_state))
+ kill_bigjoiner_slave(state, crtc);
+ }
+
+ return 0;
+}
+
+/**
+ * intel_atomic_check - validate state object
+ * @dev: drm device
+ * @_state: state to validate
+ */
+static int intel_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+ int ret, i;
+ bool any_ms = false;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (new_crtc_state->inherited != old_crtc_state->inherited)
+ new_crtc_state->uapi.mode_changed = true;
+
+ if (new_crtc_state->uapi.scaling_filter !=
+ old_crtc_state->uapi.scaling_filter)
+ new_crtc_state->uapi.mode_changed = true;
+ }
+
+ intel_vrr_check_modeset(state);
+
+ ret = drm_atomic_helper_check_modeset(dev, &state->base);
+ if (ret)
+ goto fail;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ ret = intel_async_flip_check_uapi(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_bigjoiner_add_affected_crtcs(state);
+ if (ret)
+ goto fail;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!intel_crtc_needs_modeset(new_crtc_state)) {
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
+ copy_bigjoiner_crtc_state_nomodeset(state, crtc);
+ else
+ intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
+ continue;
+ }
+
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
+ drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
+ continue;
+ }
+
+ ret = intel_crtc_prepare_cleared_state(state, crtc);
+ if (ret)
+ goto fail;
+
+ if (!new_crtc_state->hw.enable)
+ continue;
+
+ ret = intel_modeset_pipe_config(state, crtc);
+ if (ret)
+ goto fail;
+
+ ret = intel_atomic_check_bigjoiner(state, crtc);
+ if (ret)
+ goto fail;
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (new_crtc_state->hw.enable) {
+ ret = intel_modeset_pipe_config_late(state, crtc);
+ if (ret)
+ goto fail;
+ }
+
+ intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
+ }
+
+ /**
+ * Check if fastset is allowed by external dependencies like other
+ * pipes and transcoders.
+ *
+ * Right now it only forces a fullmodeset when the MST master
+ * transcoder did not changed but the pipe of the master transcoder
+ * needs a fullmodeset so all slaves also needs to do a fullmodeset or
+ * in case of port synced crtcs, if one of the synced crtcs
+ * needs a full modeset, all other synced crtcs should be
+ * forced a full modeset.
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
+ enum transcoder master = new_crtc_state->mst_master_transcoder;
+
+ if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
+ }
+ }
+
+ if (is_trans_port_sync_mode(new_crtc_state)) {
+ u8 trans = new_crtc_state->sync_mode_slaves_mask;
+
+ if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
+ trans |= BIT(new_crtc_state->master_transcoder);
+
+ if (intel_cpu_transcoders_need_modeset(state, trans)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
+ }
+ }
+
+ if (new_crtc_state->bigjoiner_pipes) {
+ if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
+ }
+ }
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ any_ms = true;
+
+ intel_release_shared_dplls(state, crtc);
+ }
+
+ if (any_ms && !check_digital_port_conflicts(state)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "rejecting conflicting digital port configuration\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = drm_dp_mst_atomic_check(&state->base);
+ if (ret)
+ goto fail;
+
+ ret = intel_atomic_check_planes(state);
+ if (ret)
+ goto fail;
+
+ ret = intel_compute_global_watermarks(state);
+ if (ret)
+ goto fail;
+
+ ret = intel_bw_atomic_check(state);
+ if (ret)
+ goto fail;
+
+ ret = intel_cdclk_atomic_check(state, &any_ms);
+ if (ret)
+ goto fail;
+
+ if (intel_any_crtc_needs_modeset(state))
+ any_ms = true;
+
+ if (any_ms) {
+ ret = intel_modeset_checks(state);
+ if (ret)
+ goto fail;
+
+ ret = intel_modeset_calc_cdclk(state);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_atomic_check_crtcs(state);
+ if (ret)
+ goto fail;
+
+ ret = intel_fbc_atomic_check(state);
+ if (ret)
+ goto fail;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = intel_async_flip_check_hw(state, crtc);
+ if (ret)
+ goto fail;
+
+ if (!intel_crtc_needs_modeset(new_crtc_state) &&
+ !new_crtc_state->update_pipe)
+ continue;
+
+ intel_crtc_state_dump(new_crtc_state, state,
+ intel_crtc_needs_modeset(new_crtc_state) ?
+ "modeset" : "fastset");
+ }
+
+ return 0;
+
+ fail:
+ if (ret == -EDEADLK)
+ return ret;
+
+ /*
+ * FIXME would probably be nice to know which crtc specifically
+ * caused the failure, in cases where we can pinpoint it.
+ */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i)
+ intel_crtc_state_dump(new_crtc_state, state, "failed");
+
+ return ret;
+}
+
+static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i, ret;
+
+ ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
+ if (ret < 0)
+ return ret;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ bool mode_changed = intel_crtc_needs_modeset(crtc_state);
+
+ if (mode_changed || crtc_state->update_pipe ||
+ crtc_state->uapi.color_mgmt_changed) {
+ intel_dsb_prepare(crtc_state);
+ }
+ }
+
+ return 0;
+}
+
+void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+
+ if (crtc_state->has_pch_encoder) {
+ enum pipe pch_transcoder =
+ intel_crtc_pch_transcoder(crtc);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
+ }
+}
+
+static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /*
+ * Update pipe size and adjust fitter if needed: the reason for this is
+ * that in compute_mode_changes we check the native mode (not the pfit
+ * mode) to see if we can flip rather than do a full mode set. In the
+ * fastboot case, we'll flip, but if we don't update the pipesrc and
+ * pfit state, we'll end up with a big fb scanned out into the wrong
+ * sized surface.
+ */
+ intel_set_pipe_src_size(new_crtc_state);
+
+ /* on skylake this is done by detaching scalers */
+ if (DISPLAY_VER(dev_priv) >= 9) {
+ if (new_crtc_state->pch_pfit.enabled)
+ skl_pfit_enable(new_crtc_state);
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ if (new_crtc_state->pch_pfit.enabled)
+ ilk_pfit_enable(new_crtc_state);
+ else if (old_crtc_state->pch_pfit.enabled)
+ ilk_pfit_disable(old_crtc_state);
+ }
+
+ /*
+ * The register is supposedly single buffered so perhaps
+ * not 100% correct to do this here. But SKL+ calculate
+ * this based on the adjust pixel rate so pfit changes do
+ * affect it and so it must be updated for fastsets.
+ * HSW/BDW only really need this here for fastboot, after
+ * that the value should not change without a full modeset.
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ hsw_set_linetime_wm(new_crtc_state);
+
+ if (new_crtc_state->seamless_m_n)
+ intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
+ &new_crtc_state->dp_m_n);
+}
+
+static void commit_pipe_pre_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
+
+ /*
+ * During modesets pipe configuration was programmed as the
+ * CRTC was enabled.
+ */
+ if (!modeset) {
+ if (new_crtc_state->uapi.color_mgmt_changed ||
+ new_crtc_state->update_pipe)
+ intel_color_commit_arm(new_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ bdw_set_pipemisc(new_crtc_state);
+
+ if (new_crtc_state->update_pipe)
+ intel_pipe_fastset(old_crtc_state, new_crtc_state);
+ }
+
+ intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
+
+ intel_atomic_update_watermarks(state, crtc);
+}
+
+static void commit_pipe_post_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ /*
+ * Disable the scaler(s) after the plane(s) so that we don't
+ * get a catastrophic underrun even if the two operations
+ * end up happening in two different frames.
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 &&
+ !intel_crtc_needs_modeset(new_crtc_state))
+ skl_detach_scalers(new_crtc_state);
+}
+
+static void intel_enable_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ return;
+
+ intel_crtc_update_active_timings(new_crtc_state);
+
+ dev_priv->display.funcs.display->crtc_enable(state, crtc);
+
+ if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
+ return;
+
+ /* vblanks work again, re-enable pipe CRC. */
+ intel_crtc_enable_pipe_crc(crtc);
+}
+
+static void intel_update_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
+
+ if (!modeset) {
+ if (new_crtc_state->preload_luts &&
+ (new_crtc_state->uapi.color_mgmt_changed ||
+ new_crtc_state->update_pipe))
+ intel_color_load_luts(new_crtc_state);
+
+ intel_pre_plane_update(state, crtc);
+
+ if (new_crtc_state->update_pipe)
+ intel_encoders_update_pipe(state, crtc);
+
+ if (DISPLAY_VER(i915) >= 11 &&
+ new_crtc_state->update_pipe)
+ icl_set_pipe_chicken(new_crtc_state);
+ }
+
+ intel_fbc_update(state, crtc);
+
+ if (!modeset &&
+ (new_crtc_state->uapi.color_mgmt_changed ||
+ new_crtc_state->update_pipe))
+ intel_color_commit_noarm(new_crtc_state);
+
+ intel_crtc_planes_update_noarm(state, crtc);
+
+ /* Perform vblank evasion around commit operation */
+ intel_pipe_update_start(new_crtc_state);
+
+ commit_pipe_pre_planes(state, crtc);
+
+ intel_crtc_planes_update_arm(state, crtc);
+
+ commit_pipe_post_planes(state, crtc);
+
+ intel_pipe_update_end(new_crtc_state);
+
+ /*
+ * We usually enable FIFO underrun interrupts as part of the
+ * CRTC enable sequence during modesets. But when we inherit a
+ * valid pipe configuration from the BIOS we need to take care
+ * of enabling them on the CRTC's first fastset.
+ */
+ if (new_crtc_state->update_pipe && !modeset &&
+ old_crtc_state->inherited)
+ intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
+}
+
+static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ /*
+ * We need to disable pipe CRC before disabling the pipe,
+ * or we race against vblank off.
+ */
+ intel_crtc_disable_pipe_crc(crtc);
+
+ dev_priv->display.funcs.display->crtc_disable(state, crtc);
+ crtc->active = false;
+ intel_fbc_disable(crtc);
+ intel_disable_shared_dpll(old_crtc_state);
+
+ /* FIXME unify this for all platforms */
+ if (!new_crtc_state->hw.active &&
+ !HAS_GMCH(dev_priv))
+ intel_initial_watermarks(state, crtc);
+}
+
+static void intel_commit_modeset_disables(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_crtc *crtc;
+ u32 handled = 0;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (!old_crtc_state->hw.active)
+ continue;
+
+ intel_pre_plane_update(state, crtc);
+ intel_crtc_disable_planes(state, crtc);
+ }
+
+ /* Only disable port sync and MST slaves */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (!old_crtc_state->hw.active)
+ continue;
+
+ /* In case of Transcoder port Sync master slave CRTCs can be
+ * assigned in any order and we need to make sure that
+ * slave CRTCs are disabled first and then master CRTC since
+ * Slave vblanks are masked till Master Vblanks.
+ */
+ if (!is_trans_port_sync_slave(old_crtc_state) &&
+ !intel_dp_mst_is_slave_trans(old_crtc_state) &&
+ !intel_crtc_is_bigjoiner_slave(old_crtc_state))
+ continue;
+
+ intel_old_crtc_state_disables(state, old_crtc_state,
+ new_crtc_state, crtc);
+ handled |= BIT(crtc->pipe);
+ }
+
+ /* Disable everything else left on */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!intel_crtc_needs_modeset(new_crtc_state) ||
+ (handled & BIT(crtc->pipe)))
+ continue;
+
+ if (!old_crtc_state->hw.active)
+ continue;
+
+ intel_old_crtc_state_disables(state, old_crtc_state,
+ new_crtc_state, crtc);
+ }
+}
+
+static void intel_commit_modeset_enables(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->hw.active)
+ continue;
+
+ intel_enable_crtc(state, crtc);
+ intel_update_crtc(state, crtc);
+ }
+}
+
+static void skl_commit_modeset_enables(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ u8 update_pipes = 0, modeset_pipes = 0;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ if (!new_crtc_state->hw.active)
+ continue;
+
+ /* ignore allocations for crtc's that have been turned off. */
+ if (!intel_crtc_needs_modeset(new_crtc_state)) {
+ entries[pipe] = old_crtc_state->wm.skl.ddb;
+ update_pipes |= BIT(pipe);
+ } else {
+ modeset_pipes |= BIT(pipe);
+ }
+ }
+
+ /*
+ * Whenever the number of active pipes changes, we need to make sure we
+ * update the pipes in the right order so that their ddb allocations
+ * never overlap with each other between CRTC updates. Otherwise we'll
+ * cause pipe underruns and other bad stuff.
+ *
+ * So first lets enable all pipes that do not need a fullmodeset as
+ * those don't have any external dependency.
+ */
+ while (update_pipes) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ if ((update_pipes & BIT(pipe)) == 0)
+ continue;
+
+ if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, I915_MAX_PIPES, pipe))
+ continue;
+
+ entries[pipe] = new_crtc_state->wm.skl.ddb;
+ update_pipes &= ~BIT(pipe);
+
+ intel_update_crtc(state, crtc);
+
+ /*
+ * If this is an already active pipe, it's DDB changed,
+ * and this isn't the last pipe that needs updating
+ * then we need to wait for a vblank to pass for the
+ * new ddb allocation to take effect.
+ */
+ if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
+ &old_crtc_state->wm.skl.ddb) &&
+ (update_pipes | modeset_pipes))
+ intel_crtc_wait_for_next_vblank(crtc);
+ }
+ }
+
+ update_pipes = modeset_pipes;
+
+ /*
+ * Enable all pipes that needs a modeset and do not depends on other
+ * pipes
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ if ((modeset_pipes & BIT(pipe)) == 0)
+ continue;
+
+ if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
+ is_trans_port_sync_master(new_crtc_state) ||
+ intel_crtc_is_bigjoiner_master(new_crtc_state))
+ continue;
+
+ modeset_pipes &= ~BIT(pipe);
+
+ intel_enable_crtc(state, crtc);
+ }
+
+ /*
+ * Then we enable all remaining pipes that depend on other
+ * pipes: MST slaves and port sync masters, big joiner master
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ if ((modeset_pipes & BIT(pipe)) == 0)
+ continue;
+
+ modeset_pipes &= ~BIT(pipe);
+
+ intel_enable_crtc(state, crtc);
+ }
+
+ /*
+ * Finally we do the plane updates/etc. for all pipes that got enabled.
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ if ((update_pipes & BIT(pipe)) == 0)
+ continue;
+
+ drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, I915_MAX_PIPES, pipe));
+
+ entries[pipe] = new_crtc_state->wm.skl.ddb;
+ update_pipes &= ~BIT(pipe);
+
+ intel_update_crtc(state, crtc);
+ }
+
+ drm_WARN_ON(&dev_priv->drm, modeset_pipes);
+ drm_WARN_ON(&dev_priv->drm, update_pipes);
+}
+
+static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
+{
+ struct intel_atomic_state *state, *next;
+ struct llist_node *freed;
+
+ freed = llist_del_all(&dev_priv->display.atomic_helper.free_list);
+ llist_for_each_entry_safe(state, next, freed, freed)
+ drm_atomic_state_put(&state->base);
+}
+
+static void intel_atomic_helper_free_state_worker(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), display.atomic_helper.free_work);
+
+ intel_atomic_helper_free_state(dev_priv);
+}
+
+static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
+{
+ struct wait_queue_entry wait_fence, wait_reset;
+ struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
+
+ init_wait_entry(&wait_fence, 0);
+ init_wait_entry(&wait_reset, 0);
+ for (;;) {
+ prepare_to_wait(&intel_state->commit_ready.wait,
+ &wait_fence, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
+ I915_RESET_MODESET),
+ &wait_reset, TASK_UNINTERRUPTIBLE);
+
+
+ if (i915_sw_fence_done(&intel_state->commit_ready) ||
+ test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
+ break;
+
+ schedule();
+ }
+ finish_wait(&intel_state->commit_ready.wait, &wait_fence);
+ finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
+ I915_RESET_MODESET),
+ &wait_reset);
+}
+
+static void intel_cleanup_dsbs(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i)
+ intel_dsb_cleanup(old_crtc_state);
+}
+
+static void intel_atomic_cleanup_work(struct work_struct *work)
+{
+ struct intel_atomic_state *state =
+ container_of(work, struct intel_atomic_state, base.commit_work);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ intel_cleanup_dsbs(state);
+ drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
+ drm_atomic_helper_commit_cleanup_done(&state->base);
+ drm_atomic_state_put(&state->base);
+
+ intel_atomic_helper_free_state(i915);
+}
+
+static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_plane *plane;
+ struct intel_plane_state *plane_state;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ int cc_plane;
+ int ret;
+
+ if (!fb)
+ continue;
+
+ cc_plane = intel_fb_rc_ccs_cc_plane(fb);
+ if (cc_plane < 0)
+ continue;
+
+ /*
+ * The layout of the fast clear color value expected by HW
+ * (the DRM ABI requiring this value to be located in fb at
+ * offset 0 of cc plane, plane #2 previous generations or
+ * plane #1 for flat ccs):
+ * - 4 x 4 bytes per-channel value
+ * (in surface type specific float/int format provided by the fb user)
+ * - 8 bytes native color value used by the display
+ * (converted/written by GPU during a fast clear operation using the
+ * above per-channel values)
+ *
+ * The commit's FB prepare hook already ensured that FB obj is pinned and the
+ * caller made sure that the object is synced wrt. the related color clear value
+ * GPU write on it.
+ */
+ ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
+ fb->offsets[cc_plane] + 16,
+ &plane_state->ccval,
+ sizeof(plane_state->ccval));
+ /* The above could only fail if the FB obj has an unexpected backing store type. */
+ drm_WARN_ON(&i915->drm, ret);
+ }
+}
+
+static void intel_atomic_commit_tail(struct intel_atomic_state *state)
+{
+ struct drm_device *dev = state->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_crtc *crtc;
+ struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
+ intel_wakeref_t wakeref = 0;
+ int i;
+
+ intel_atomic_commit_fence_wait(state);
+
+ drm_atomic_helper_wait_for_dependencies(&state->base);
+ drm_dp_mst_atomic_wait_for_dependencies(&state->base);
+
+ if (state->modeset)
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+
+ intel_atomic_prepare_plane_clear_colors(state);
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (intel_crtc_needs_modeset(new_crtc_state) ||
+ new_crtc_state->update_pipe) {
+ intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
+ }
+ }
+
+ intel_commit_modeset_disables(state);
+
+ /* FIXME: Eventually get rid of our crtc->config pointer */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ crtc->config = new_crtc_state;
+
+ if (state->modeset) {
+ drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
+
+ intel_set_cdclk_pre_plane_update(state);
+
+ intel_modeset_verify_disabled(dev_priv, state);
+ }
+
+ intel_sagv_pre_plane_update(state);
+
+ /* Complete the events for pipes that have now been disabled */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ bool modeset = intel_crtc_needs_modeset(new_crtc_state);
+
+ /* Complete events for now disable pipes here. */
+ if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
+ spin_lock_irq(&dev->event_lock);
+ drm_crtc_send_vblank_event(&crtc->base,
+ new_crtc_state->uapi.event);
+ spin_unlock_irq(&dev->event_lock);
+
+ new_crtc_state->uapi.event = NULL;
+ }
+ }
+
+ intel_encoders_update_prepare(state);
+
+ intel_dbuf_pre_plane_update(state);
+ intel_mbus_dbox_update(state);
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->do_async_flip)
+ intel_crtc_enable_flip_done(state, crtc);
+ }
+
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ dev_priv->display.funcs.display->commit_modeset_enables(state);
+
+ intel_encoders_update_complete(state);
+
+ if (state->modeset)
+ intel_set_cdclk_post_plane_update(state);
+
+ intel_wait_for_vblank_workers(state);
+
+ /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
+ * already, but still need the state for the delayed optimization. To
+ * fix this:
+ * - wrap the optimization/post_plane_update stuff into a per-crtc work.
+ * - schedule that vblank worker _before_ calling hw_done
+ * - at the start of commit_tail, cancel it _synchrously
+ * - switch over to the vblank wait helper in the core after that since
+ * we don't need out special handling any more.
+ */
+ drm_atomic_helper_wait_for_flip_done(dev, &state->base);
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->do_async_flip)
+ intel_crtc_disable_flip_done(state, crtc);
+ }
+
+ /*
+ * Now that the vblank has passed, we can go ahead and program the
+ * optimal watermarks on platforms that need two-step watermark
+ * programming.
+ *
+ * TODO: Move this (and other cleanup) to an async worker eventually.
+ */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So re-enable underrun reporting after some planes get enabled.
+ *
+ * We do this before .optimize_watermarks() so that we have a
+ * chance of catching underruns with the intermediate watermarks
+ * vs. the new plane configuration.
+ */
+ if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+
+ intel_optimize_watermarks(state, crtc);
+ }
+
+ intel_dbuf_post_plane_update(state);
+ intel_psr_post_plane_update(state);
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ intel_post_plane_update(state, crtc);
+
+ intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
+
+ intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
+
+ /*
+ * DSB cleanup is done in cleanup_work aligning with framebuffer
+ * cleanup. So copy and reset the dsb structure to sync with
+ * commit_done and later do dsb cleanup in cleanup_work.
+ */
+ old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
+ }
+
+ /* Underruns don't always raise interrupts, so check manually */
+ intel_check_cpu_fifo_underruns(dev_priv);
+ intel_check_pch_fifo_underruns(dev_priv);
+
+ if (state->modeset)
+ intel_verify_planes(state);
+
+ intel_sagv_post_plane_update(state);
+
+ drm_atomic_helper_commit_hw_done(&state->base);
+
+ if (state->modeset) {
+ /* As one of the primary mmio accessors, KMS has a high
+ * likelihood of triggering bugs in unclaimed access. After we
+ * finish modesetting, see if an error has been flagged, and if
+ * so enable debugging for the next modeset - and hope we catch
+ * the culprit.
+ */
+ intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
+ }
+ intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+
+ /*
+ * Defer the cleanup of the old state to a separate worker to not
+ * impede the current task (userspace for blocking modesets) that
+ * are executed inline. For out-of-line asynchronous modesets/flips,
+ * deferring to a new worker seems overkill, but we would place a
+ * schedule point (cond_resched()) here anyway to keep latencies
+ * down.
+ */
+ INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
+ queue_work(system_highpri_wq, &state->base.commit_work);
+}
+
+static void intel_atomic_commit_work(struct work_struct *work)
+{
+ struct intel_atomic_state *state =
+ container_of(work, struct intel_atomic_state, base.commit_work);
+
+ intel_atomic_commit_tail(state);
+}
+
+static int
+intel_atomic_commit_ready(struct i915_sw_fence *fence,
+ enum i915_sw_fence_notify notify)
+{
+ struct intel_atomic_state *state =
+ container_of(fence, struct intel_atomic_state, commit_ready);
+
+ switch (notify) {
+ case FENCE_COMPLETE:
+ /* we do blocking waits in the worker, nothing to do here */
+ break;
+ case FENCE_FREE:
+ {
+ struct intel_atomic_helper *helper =
+ &to_i915(state->base.dev)->display.atomic_helper;
+
+ if (llist_add(&state->freed, &helper->free_list))
+ schedule_work(&helper->free_work);
+ break;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void intel_atomic_track_fbs(struct intel_atomic_state *state)
+{
+ struct intel_plane_state *old_plane_state, *new_plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i)
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+ to_intel_frontbuffer(new_plane_state->hw.fb),
+ plane->frontbuffer_bit);
+}
+
+static int intel_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *_state,
+ bool nonblock)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int ret = 0;
+
+ state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ drm_atomic_state_get(&state->base);
+ i915_sw_fence_init(&state->commit_ready,
+ intel_atomic_commit_ready);
+
+ /*
+ * The intel_legacy_cursor_update() fast path takes care
+ * of avoiding the vblank waits for simple cursor
+ * movement and flips. For cursor on/off and size changes,
+ * we want to perform the vblank waits so that watermark
+ * updates happen during the correct frames. Gen9+ have
+ * double buffered watermarks and so shouldn't need this.
+ *
+ * Unset state->legacy_cursor_update before the call to
+ * drm_atomic_helper_setup_commit() because otherwise
+ * drm_atomic_helper_wait_for_flip_done() is a noop and
+ * we get FIFO underruns because we didn't wait
+ * for vblank.
+ *
+ * FIXME doing watermarks and fb cleanup from a vblank worker
+ * (assuming we had any) would solve these problems.
+ */
+ if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ if (new_crtc_state->wm.need_postvbl_update ||
+ new_crtc_state->update_wm_post)
+ state->base.legacy_cursor_update = false;
+ }
+
+ ret = intel_atomic_prepare_commit(state);
+ if (ret) {
+ drm_dbg_atomic(&dev_priv->drm,
+ "Preparing state failed with %i\n", ret);
+ i915_sw_fence_commit(&state->commit_ready);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ return ret;
+ }
+
+ ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
+ if (!ret)
+ ret = drm_atomic_helper_swap_state(&state->base, true);
+ if (!ret)
+ intel_atomic_swap_global_state(state);
+
+ if (ret) {
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ i915_sw_fence_commit(&state->commit_ready);
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ intel_dsb_cleanup(new_crtc_state);
+
+ drm_atomic_helper_cleanup_planes(dev, &state->base);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ return ret;
+ }
+ intel_shared_dpll_swap_state(state);
+ intel_atomic_track_fbs(state);
+
+ drm_atomic_state_get(&state->base);
+ INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
+
+ i915_sw_fence_commit(&state->commit_ready);
+ if (nonblock && state->modeset) {
+ queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
+ } else if (nonblock) {
+ queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
+ } else {
+ if (state->modeset)
+ flush_workqueue(dev_priv->display.wq.modeset);
+ intel_atomic_commit_tail(state);
+ }
+
+ return 0;
+}
+
+/**
+ * intel_plane_destroy - destroy a plane
+ * @plane: plane to destroy
+ *
+ * Common destruction function for all types of planes (primary, cursor,
+ * sprite).
+ */
+void intel_plane_destroy(struct drm_plane *plane)
+{
+ drm_plane_cleanup(plane);
+ kfree(to_intel_plane(plane));
+}
+
+static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_plane *plane;
+
+ for_each_intel_plane(&dev_priv->drm, plane) {
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
+ plane->pipe);
+
+ plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
+ }
+}
+
+
+int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
+ struct drm_crtc *drmmode_crtc;
+ struct intel_crtc *crtc;
+
+ drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
+ if (!drmmode_crtc)
+ return -ENOENT;
+
+ crtc = to_intel_crtc(drmmode_crtc);
+ pipe_from_crtc_id->pipe = crtc->pipe;
+
+ return 0;
+}
+
+static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_encoder *source_encoder;
+ u32 possible_clones = 0;
+
+ for_each_intel_encoder(dev, source_encoder) {
+ if (encoders_cloneable(encoder, source_encoder))
+ possible_clones |= drm_encoder_mask(&source_encoder->base);
+ }
+
+ return possible_clones;
+}
+
+static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_crtc *crtc;
+ u32 possible_crtcs = 0;
+
+ for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
+ possible_crtcs |= drm_crtc_mask(&crtc->base);
+
+ return possible_crtcs;
+}
+
+static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
+{
+ if (!IS_MOBILE(dev_priv))
+ return false;
+
+ if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
+ return false;
+
+ if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
+ return false;
+
+ return true;
+}
+
+static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
+{
+ if (DISPLAY_VER(dev_priv) >= 9)
+ return false;
+
+ if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+ return false;
+
+ if (HAS_PCH_LPT_H(dev_priv) &&
+ intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+ return false;
+
+ /* DDI E can't be used if DDI A requires 4 lanes */
+ if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ return false;
+
+ if (!dev_priv->display.vbt.int_crt_support)
+ return false;
+
+ return true;
+}
+
+static void intel_setup_outputs(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ bool dpd_is_edp = false;
+
+ intel_pps_unlock_regs_wa(dev_priv);
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ if (IS_DG2(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
+ intel_ddi_init(dev_priv, PORT_D_XELPD);
+ intel_ddi_init(dev_priv, PORT_TC1);
+ } else if (IS_ALDERLAKE_P(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_TC1);
+ intel_ddi_init(dev_priv, PORT_TC2);
+ intel_ddi_init(dev_priv, PORT_TC3);
+ intel_ddi_init(dev_priv, PORT_TC4);
+ icl_dsi_init(dev_priv);
+ } else if (IS_ALDERLAKE_S(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_TC1);
+ intel_ddi_init(dev_priv, PORT_TC2);
+ intel_ddi_init(dev_priv, PORT_TC3);
+ intel_ddi_init(dev_priv, PORT_TC4);
+ } else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_TC1);
+ intel_ddi_init(dev_priv, PORT_TC2);
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_TC1);
+ intel_ddi_init(dev_priv, PORT_TC2);
+ intel_ddi_init(dev_priv, PORT_TC3);
+ intel_ddi_init(dev_priv, PORT_TC4);
+ intel_ddi_init(dev_priv, PORT_TC5);
+ intel_ddi_init(dev_priv, PORT_TC6);
+ icl_dsi_init(dev_priv);
+ } else if (IS_JSL_EHL(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
+ intel_ddi_init(dev_priv, PORT_D);
+ icl_dsi_init(dev_priv);
+ } else if (DISPLAY_VER(dev_priv) == 11) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
+ intel_ddi_init(dev_priv, PORT_D);
+ intel_ddi_init(dev_priv, PORT_E);
+ intel_ddi_init(dev_priv, PORT_F);
+ icl_dsi_init(dev_priv);
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
+ vlv_dsi_init(dev_priv);
+ } else if (DISPLAY_VER(dev_priv) >= 9) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
+ intel_ddi_init(dev_priv, PORT_D);
+ intel_ddi_init(dev_priv, PORT_E);
+ } else if (HAS_DDI(dev_priv)) {
+ u32 found;
+
+ if (intel_ddi_crt_present(dev_priv))
+ intel_crt_init(dev_priv);
+
+ /* Haswell uses DDI functions to detect digital outputs. */
+ found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
+ if (found)
+ intel_ddi_init(dev_priv, PORT_A);
+
+ found = intel_de_read(dev_priv, SFUSE_STRAP);
+ if (found & SFUSE_STRAP_DDIB_DETECTED)
+ intel_ddi_init(dev_priv, PORT_B);
+ if (found & SFUSE_STRAP_DDIC_DETECTED)
+ intel_ddi_init(dev_priv, PORT_C);
+ if (found & SFUSE_STRAP_DDID_DETECTED)
+ intel_ddi_init(dev_priv, PORT_D);
+ if (found & SFUSE_STRAP_DDIF_DETECTED)
+ intel_ddi_init(dev_priv, PORT_F);
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ int found;
+
+ /*
+ * intel_edp_init_connector() depends on this completing first,
+ * to prevent the registration of both eDP and LVDS and the
+ * incorrect sharing of the PPS.
+ */
+ intel_lvds_init(dev_priv);
+ intel_crt_init(dev_priv);
+
+ dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
+
+ if (ilk_has_edp_a(dev_priv))
+ g4x_dp_init(dev_priv, DP_A, PORT_A);
+
+ if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
+ /* PCH SDVOB multiplex with HDMIB */
+ found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
+ if (!found)
+ g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
+ if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
+ g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
+ }
+
+ if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
+ g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
+
+ if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
+ g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
+
+ if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
+ g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
+
+ if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
+ g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ bool has_edp, has_port;
+
+ if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
+ intel_crt_init(dev_priv);
+
+ /*
+ * The DP_DETECTED bit is the latched state of the DDC
+ * SDA pin at boot. However since eDP doesn't require DDC
+ * (no way to plug in a DP->HDMI dongle) the DDC pins for
+ * eDP ports may have been muxed to an alternate function.
+ * Thus we can't rely on the DP_DETECTED bit alone to detect
+ * eDP ports. Consult the VBT as well as DP_DETECTED to
+ * detect eDP ports.
+ *
+ * Sadly the straps seem to be missing sometimes even for HDMI
+ * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
+ * and VBT for the presence of the port. Additionally we can't
+ * trust the port type the VBT declares as we've seen at least
+ * HDMI ports that the VBT claim are DP or eDP.
+ */
+ has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
+ has_port = intel_bios_is_port_present(dev_priv, PORT_B);
+ if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
+ has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
+ if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
+ g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
+
+ has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
+ has_port = intel_bios_is_port_present(dev_priv, PORT_C);
+ if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
+ has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
+ if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
+ g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ /*
+ * eDP not supported on port D,
+ * so no need to worry about it
+ */
+ has_port = intel_bios_is_port_present(dev_priv, PORT_D);
+ if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
+ g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
+ if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
+ g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
+ }
+
+ vlv_dsi_init(dev_priv);
+ } else if (IS_PINEVIEW(dev_priv)) {
+ intel_lvds_init(dev_priv);
+ intel_crt_init(dev_priv);
+ } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
+ bool found = false;
+
+ if (IS_MOBILE(dev_priv))
+ intel_lvds_init(dev_priv);
+
+ intel_crt_init(dev_priv);
+
+ if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
+ drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
+ found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
+ if (!found && IS_G4X(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "probing HDMI on SDVOB\n");
+ g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
+ }
+
+ if (!found && IS_G4X(dev_priv))
+ g4x_dp_init(dev_priv, DP_B, PORT_B);
+ }
+
+ /* Before G4X SDVOC doesn't have its own detect register */
+
+ if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
+ drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
+ found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
+ }
+
+ if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
+
+ if (IS_G4X(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "probing HDMI on SDVOC\n");
+ g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
+ }
+ if (IS_G4X(dev_priv))
+ g4x_dp_init(dev_priv, DP_C, PORT_C);
+ }
+
+ if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
+ g4x_dp_init(dev_priv, DP_D, PORT_D);
+
+ if (SUPPORTS_TV(dev_priv))
+ intel_tv_init(dev_priv);
+ } else if (DISPLAY_VER(dev_priv) == 2) {
+ if (IS_I85X(dev_priv))
+ intel_lvds_init(dev_priv);
+
+ intel_crt_init(dev_priv);
+ intel_dvo_init(dev_priv);
+ }
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ encoder->base.possible_crtcs =
+ intel_encoder_possible_crtcs(encoder);
+ encoder->base.possible_clones =
+ intel_encoder_possible_clones(encoder);
+ }
+
+ intel_init_pch_refclk(dev_priv);
+
+ drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
+}
+
+static int max_dotclock(struct drm_i915_private *i915)
+{
+ int max_dotclock = i915->max_dotclk_freq;
+
+ /* icl+ might use bigjoiner */
+ if (DISPLAY_VER(i915) >= 11)
+ max_dotclock *= 2;
+
+ return max_dotclock;
+}
+
+static enum drm_mode_status
+intel_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int hdisplay_max, htotal_max;
+ int vdisplay_max, vtotal_max;
+
+ /*
+ * Can't reject DBLSCAN here because Xorg ddxen can add piles
+ * of DBLSCAN modes to the output's mode list when they detect
+ * the scaling mode property on the connector. And they don't
+ * ask the kernel to validate those modes in any way until
+ * modeset time at which point the client gets a protocol error.
+ * So in order to not upset those clients we silently ignore the
+ * DBLSCAN flag on such connectors. For other connectors we will
+ * reject modes with the DBLSCAN flag in encoder->compute_config().
+ * And we always reject DBLSCAN modes in connector->mode_valid()
+ * as we never want such modes on the connector's mode list.
+ */
+
+ if (mode->vscan > 1)
+ return MODE_NO_VSCAN;
+
+ if (mode->flags & DRM_MODE_FLAG_HSKEW)
+ return MODE_H_ILLEGAL;
+
+ if (mode->flags & (DRM_MODE_FLAG_CSYNC |
+ DRM_MODE_FLAG_NCSYNC |
+ DRM_MODE_FLAG_PCSYNC))
+ return MODE_HSYNC;
+
+ if (mode->flags & (DRM_MODE_FLAG_BCAST |
+ DRM_MODE_FLAG_PIXMUX |
+ DRM_MODE_FLAG_CLKDIV2))
+ return MODE_BAD;
+
+ /*
+ * Reject clearly excessive dotclocks early to
+ * avoid having to worry about huge integers later.
+ */
+ if (mode->clock > max_dotclock(dev_priv))
+ return MODE_CLOCK_HIGH;
+
+ /* Transcoder timing limits */
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ hdisplay_max = 16384;
+ vdisplay_max = 8192;
+ htotal_max = 16384;
+ vtotal_max = 8192;
+ } else if (DISPLAY_VER(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
+ vdisplay_max = 4096;
+ htotal_max = 8192;
+ vtotal_max = 8192;
+ } else if (DISPLAY_VER(dev_priv) >= 3) {
+ hdisplay_max = 4096;
+ vdisplay_max = 4096;
+ htotal_max = 8192;
+ vtotal_max = 8192;
+ } else {
+ hdisplay_max = 2048;
+ vdisplay_max = 2048;
+ htotal_max = 4096;
+ vtotal_max = 4096;
+ }
+
+ if (mode->hdisplay > hdisplay_max ||
+ mode->hsync_start > htotal_max ||
+ mode->hsync_end > htotal_max ||
+ mode->htotal > htotal_max)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vdisplay > vdisplay_max ||
+ mode->vsync_start > vtotal_max ||
+ mode->vsync_end > vtotal_max ||
+ mode->vtotal > vtotal_max)
+ return MODE_V_ILLEGAL;
+
+ return MODE_OK;
+}
+
+enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv,
+ const struct drm_display_mode *mode)
+{
+ /*
+ * Additional transcoder timing limits,
+ * excluding BXT/GLK DSI transcoders.
+ */
+ if (DISPLAY_VER(dev_priv) >= 5) {
+ if (mode->hdisplay < 64 ||
+ mode->htotal - mode->hdisplay < 32)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vtotal - mode->vdisplay < 5)
+ return MODE_V_ILLEGAL;
+ } else {
+ if (mode->htotal - mode->hdisplay < 32)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vtotal - mode->vdisplay < 3)
+ return MODE_V_ILLEGAL;
+ }
+
+ /*
+ * Cantiga+ cannot handle modes with a hsync front porch of 0.
+ * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
+ */
+ if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
+ mode->hsync_start == mode->hdisplay)
+ return MODE_H_ILLEGAL;
+
+ return MODE_OK;
+}
+
+enum drm_mode_status
+intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+ const struct drm_display_mode *mode,
+ bool bigjoiner)
+{
+ int plane_width_max, plane_height_max;
+
+ /*
+ * intel_mode_valid() should be
+ * sufficient on older platforms.
+ */
+ if (DISPLAY_VER(dev_priv) < 9)
+ return MODE_OK;
+
+ /*
+ * Most people will probably want a fullscreen
+ * plane so let's not advertize modes that are
+ * too big for that.
+ */
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ plane_width_max = 5120 << bigjoiner;
+ plane_height_max = 4320;
+ } else {
+ plane_width_max = 5120;
+ plane_height_max = 4096;
+ }
+
+ if (mode->hdisplay > plane_width_max)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vdisplay > plane_height_max)
+ return MODE_V_ILLEGAL;
+
+ return MODE_OK;
+}
+
+static const struct drm_mode_config_funcs intel_mode_funcs = {
+ .fb_create = intel_user_framebuffer_create,
+ .get_format_info = intel_fb_get_format_info,
+ .output_poll_changed = intel_fbdev_output_poll_changed,
+ .mode_valid = intel_mode_valid,
+ .atomic_check = intel_atomic_check,
+ .atomic_commit = intel_atomic_commit,
+ .atomic_state_alloc = intel_atomic_state_alloc,
+ .atomic_state_clear = intel_atomic_state_clear,
+ .atomic_state_free = intel_atomic_state_free,
+};
+
+static const struct intel_display_funcs skl_display_funcs = {
+ .get_pipe_config = hsw_get_pipe_config,
+ .crtc_enable = hsw_crtc_enable,
+ .crtc_disable = hsw_crtc_disable,
+ .commit_modeset_enables = skl_commit_modeset_enables,
+ .get_initial_plane_config = skl_get_initial_plane_config,
+};
+
+static const struct intel_display_funcs ddi_display_funcs = {
+ .get_pipe_config = hsw_get_pipe_config,
+ .crtc_enable = hsw_crtc_enable,
+ .crtc_disable = hsw_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
+static const struct intel_display_funcs pch_split_display_funcs = {
+ .get_pipe_config = ilk_get_pipe_config,
+ .crtc_enable = ilk_crtc_enable,
+ .crtc_disable = ilk_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
+static const struct intel_display_funcs vlv_display_funcs = {
+ .get_pipe_config = i9xx_get_pipe_config,
+ .crtc_enable = valleyview_crtc_enable,
+ .crtc_disable = i9xx_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
+static const struct intel_display_funcs i9xx_display_funcs = {
+ .get_pipe_config = i9xx_get_pipe_config,
+ .crtc_enable = i9xx_crtc_enable,
+ .crtc_disable = i9xx_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
+/**
+ * intel_init_display_hooks - initialize the display modesetting hooks
+ * @dev_priv: device private
+ */
+void intel_init_display_hooks(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ intel_init_cdclk_hooks(dev_priv);
+ intel_audio_hooks_init(dev_priv);
+
+ intel_dpll_init_clock_hook(dev_priv);
+
+ if (DISPLAY_VER(dev_priv) >= 9) {
+ dev_priv->display.funcs.display = &skl_display_funcs;
+ } else if (HAS_DDI(dev_priv)) {
+ dev_priv->display.funcs.display = &ddi_display_funcs;
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ dev_priv->display.funcs.display = &pch_split_display_funcs;
+ } else if (IS_CHERRYVIEW(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv)) {
+ dev_priv->display.funcs.display = &vlv_display_funcs;
+ } else {
+ dev_priv->display.funcs.display = &i9xx_display_funcs;
+ }
+
+ intel_fdi_init_hook(dev_priv);
+}
+
+void intel_modeset_init_hw(struct drm_i915_private *i915)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state);
+
+ intel_update_cdclk(i915);
+ intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK");
+ cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
+}
+
+static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(state->dev, crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = intel_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (crtc_state->hw.active) {
+ /*
+ * Preserve the inherited flag to avoid
+ * taking the full modeset path.
+ */
+ crtc_state->inherited = true;
+ }
+ }
+
+ drm_for_each_plane(plane, state->dev) {
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
+}
+
+/*
+ * Calculate what we think the watermarks should be for the state we've read
+ * out of the hardware and then immediately program those watermarks so that
+ * we ensure the hardware settings match our internal state.
+ *
+ * We can calculate what we think WM's should be by creating a duplicate of the
+ * current state (which was constructed during hardware readout) and running it
+ * through the atomic check code to calculate new watermark values in the
+ * state object.
+ */
+static void sanitize_watermarks(struct drm_i915_private *dev_priv)
+{
+ struct drm_atomic_state *state;
+ struct intel_atomic_state *intel_state;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+ int i;
+
+ /* Only supported on platforms that use atomic watermark design */
+ if (!dev_priv->display.funcs.wm->optimize_watermarks)
+ return;
+
+ state = drm_atomic_state_alloc(&dev_priv->drm);
+ if (drm_WARN_ON(&dev_priv->drm, !state))
+ return;
+
+ intel_state = to_intel_atomic_state(state);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ state->acquire_ctx = &ctx;
+
+ /*
+ * Hardware readout is the only time we don't want to calculate
+ * intermediate watermarks (since we don't trust the current
+ * watermarks).
+ */
+ if (!HAS_GMCH(dev_priv))
+ intel_state->skip_intermediate_wm = true;
+
+ ret = sanitize_watermarks_add_affected(state);
+ if (ret)
+ goto fail;
+
+ ret = intel_atomic_check(&dev_priv->drm, state);
+ if (ret)
+ goto fail;
+
+ /* Write calculated watermark values back */
+ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ crtc_state->wm.need_postvbl_update = true;
+ intel_optimize_watermarks(intel_state, crtc);
+
+ to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
+ }
+
+fail:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ /*
+ * If we fail here, it means that the hardware appears to be
+ * programmed in a way that shouldn't be possible, given our
+ * understanding of watermark requirements. This might mean a
+ * mistake in the hardware readout code or a mistake in the
+ * watermark calculations for a given platform. Raise a WARN
+ * so that this is noticeable.
+ *
+ * If this actually happens, we'll have to just leave the
+ * BIOS-programmed watermarks untouched and hope for the best.
+ */
+ drm_WARN(&dev_priv->drm, ret,
+ "Could not determine valid watermarks for inherited state\n");
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+static int intel_initial_commit(struct drm_device *dev)
+{
+ struct drm_atomic_state *state = NULL;
+ struct drm_modeset_acquire_ctx ctx;
+ struct intel_crtc *crtc;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ state->acquire_ctx = &ctx;
+
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_crtc_state(state, crtc);
+
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ if (crtc_state->hw.active) {
+ struct intel_encoder *encoder;
+
+ /*
+ * We've not yet detected sink capabilities
+ * (audio,infoframes,etc.) and thus we don't want to
+ * force a full state recomputation yet. We want that to
+ * happen only for the first real commit from userspace.
+ * So preserve the inherited flag for the time being.
+ */
+ crtc_state->inherited = true;
+
+ ret = drm_atomic_add_affected_planes(state, &crtc->base);
+ if (ret)
+ goto out;
+
+ /*
+ * FIXME hack to force a LUT update to avoid the
+ * plane update forcing the pipe gamma on without
+ * having a proper LUT loaded. Remove once we
+ * have readout for pipe gamma enable.
+ */
+ crtc_state->uapi.color_mgmt_changed = true;
+
+ for_each_intel_encoder_mask(dev, encoder,
+ crtc_state->uapi.encoder_mask) {
+ if (encoder->initial_fastset_check &&
+ !encoder->initial_fastset_check(encoder, crtc_state)) {
+ ret = drm_atomic_add_affected_connectors(state,
+ &crtc->base);
+ if (ret)
+ goto out;
+ }
+ }
+ }
+ }
+
+ ret = drm_atomic_commit(state);
+
+out:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
+static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
+ .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+};
+
+static void intel_mode_config_init(struct drm_i915_private *i915)
+{
+ struct drm_mode_config *mode_config = &i915->drm.mode_config;
+
+ drm_mode_config_init(&i915->drm);
+ INIT_LIST_HEAD(&i915->global_obj_list);
+
+ mode_config->min_width = 0;
+ mode_config->min_height = 0;
+
+ mode_config->preferred_depth = 24;
+ mode_config->prefer_shadow = 1;
+
+ mode_config->funcs = &intel_mode_funcs;
+ mode_config->helper_private = &intel_mode_config_funcs;
+
+ mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
+
+ /*
+ * Maximum framebuffer dimensions, chosen to match
+ * the maximum render engine surface size on gen4+.
+ */
+ if (DISPLAY_VER(i915) >= 7) {
+ mode_config->max_width = 16384;
+ mode_config->max_height = 16384;
+ } else if (DISPLAY_VER(i915) >= 4) {
+ mode_config->max_width = 8192;
+ mode_config->max_height = 8192;
+ } else if (DISPLAY_VER(i915) == 3) {
+ mode_config->max_width = 4096;
+ mode_config->max_height = 4096;
+ } else {
+ mode_config->max_width = 2048;
+ mode_config->max_height = 2048;
+ }
+
+ if (IS_I845G(i915) || IS_I865G(i915)) {
+ mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
+ mode_config->cursor_height = 1023;
+ } else if (IS_I830(i915) || IS_I85X(i915) ||
+ IS_I915G(i915) || IS_I915GM(i915)) {
+ mode_config->cursor_width = 64;
+ mode_config->cursor_height = 64;
+ } else {
+ mode_config->cursor_width = 256;
+ mode_config->cursor_height = 256;
+ }
+}
+
+static void intel_mode_config_cleanup(struct drm_i915_private *i915)
+{
+ intel_atomic_global_obj_cleanup(i915);
+ drm_mode_config_cleanup(&i915->drm);
+}
+
+/* part #1: call before irq install */
+int intel_modeset_init_noirq(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (i915_inject_probe_failure(i915))
+ return -ENODEV;
+
+ if (HAS_DISPLAY(i915)) {
+ ret = drm_vblank_init(&i915->drm,
+ INTEL_NUM_PIPES(i915));
+ if (ret)
+ return ret;
+ }
+
+ intel_bios_init(i915);
+
+ ret = intel_vga_register(i915);
+ if (ret)
+ goto cleanup_bios;
+
+ /* FIXME: completely on the wrong abstraction layer */
+ intel_power_domains_init_hw(i915, false);
+
+ if (!HAS_DISPLAY(i915))
+ return 0;
+
+ intel_dmc_ucode_init(i915);
+
+ i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
+ i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
+ WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+
+ intel_mode_config_init(i915);
+
+ ret = intel_cdclk_init(i915);
+ if (ret)
+ goto cleanup_vga_client_pw_domain_dmc;
+
+ ret = intel_dbuf_init(i915);
+ if (ret)
+ goto cleanup_vga_client_pw_domain_dmc;
+
+ ret = intel_bw_init(i915);
+ if (ret)
+ goto cleanup_vga_client_pw_domain_dmc;
+
+ init_llist_head(&i915->display.atomic_helper.free_list);
+ INIT_WORK(&i915->display.atomic_helper.free_work,
+ intel_atomic_helper_free_state_worker);
+
+ intel_init_quirks(i915);
+
+ intel_fbc_init(i915);
+
+ return 0;
+
+cleanup_vga_client_pw_domain_dmc:
+ intel_dmc_ucode_fini(i915);
+ intel_power_domains_driver_remove(i915);
+ intel_vga_unregister(i915);
+cleanup_bios:
+ intel_bios_driver_remove(i915);
+
+ return ret;
+}
+
+/* part #2: call after irq install, but before gem init */
+int intel_modeset_init_nogem(struct drm_i915_private *i915)
+{
+ struct drm_device *dev = &i915->drm;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ int ret;
+
+ if (!HAS_DISPLAY(i915))
+ return 0;
+
+ intel_init_pm(i915);
+
+ intel_panel_sanitize_ssc(i915);
+
+ intel_pps_setup(i915);
+
+ intel_gmbus_setup(i915);
+
+ drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
+ INTEL_NUM_PIPES(i915),
+ INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
+
+ for_each_pipe(i915, pipe) {
+ ret = intel_crtc_init(i915, pipe);
+ if (ret) {
+ intel_mode_config_cleanup(i915);
+ return ret;
+ }
+ }
+
+ intel_plane_possible_crtcs_init(i915);
+ intel_shared_dpll_init(i915);
+ intel_fdi_pll_freq_update(i915);
+
+ intel_update_czclk(i915);
+ intel_modeset_init_hw(i915);
+ intel_dpll_update_ref_clks(i915);
+
+ intel_hdcp_component_init(i915);
+
+ if (i915->display.cdclk.max_cdclk_freq == 0)
+ intel_update_max_cdclk(i915);
+
+ /*
+ * If the platform has HTI, we need to find out whether it has reserved
+ * any display resources before we create our display outputs.
+ */
+ if (INTEL_INFO(i915)->display.has_hti)
+ i915->hti_state = intel_de_read(i915, HDPORT_STATE);
+
+ /* Just disable it once at startup */
+ intel_vga_disable(i915);
+ intel_setup_outputs(i915);
+
+ drm_modeset_lock_all(dev);
+ intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
+ intel_acpi_assign_connector_fwnodes(i915);
+ drm_modeset_unlock_all(dev);
+
+ for_each_intel_crtc(dev, crtc) {
+ if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
+ continue;
+ intel_crtc_initial_plane_config(crtc);
+ }
+
+ /*
+ * Make sure hardware watermarks really match the state we read out.
+ * Note that we need to do this after reconstructing the BIOS fb's
+ * since the watermark calculation done here will use pstate->fb.
+ */
+ if (!HAS_GMCH(i915))
+ sanitize_watermarks(i915);
+
+ return 0;
+}
+
+/* part #3: call after gem init */
+int intel_modeset_init(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (!HAS_DISPLAY(i915))
+ return 0;
+
+ /*
+ * Force all active planes to recompute their states. So that on
+ * mode_setcrtc after probe, all the intel_plane_state variables
+ * are already calculated and there is no assert_plane warnings
+ * during bootup.
+ */
+ ret = intel_initial_commit(&i915->drm);
+ if (ret)
+ drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
+
+ intel_overlay_setup(i915);
+
+ ret = intel_fbdev_init(&i915->drm);
+ if (ret)
+ return ret;
+
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ intel_hpd_init(i915);
+ intel_hpd_poll_disable(i915);
+
+ skl_watermark_ipc_init(i915);
+
+ return 0;
+}
+
+void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ /* 640x480@60Hz, ~25175 kHz */
+ struct dpll clock = {
+ .m1 = 18,
+ .m2 = 7,
+ .p1 = 13,
+ .p2 = 4,
+ .n = 2,
+ };
+ u32 dpll, fp;
+ int i;
+
+ drm_WARN_ON(&dev_priv->drm,
+ i9xx_calc_dpll_params(48000, &clock) != 25154);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
+ pipe_name(pipe), clock.vco, clock.dot);
+
+ fp = i9xx_dpll_compute_fp(&clock);
+ dpll = DPLL_DVO_2X_MODE |
+ DPLL_VGA_MODE_DIS |
+ ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
+ PLL_P2_DIVIDE_BY_4 |
+ PLL_REF_INPUT_DREFCLK |
+ DPLL_VCO_ENABLE;
+
+ intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
+ intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
+ intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
+ intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
+ intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
+ intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
+ intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
+
+ intel_de_write(dev_priv, FP0(pipe), fp);
+ intel_de_write(dev_priv, FP1(pipe), fp);
+
+ /*
+ * Apparently we need to have VGA mode enabled prior to changing
+ * the P1/P2 dividers. Otherwise the DPLL will keep using the old
+ * dividers, even though the register value does change.
+ */
+ intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+ udelay(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
+
+ /* We do this three times for luck */
+ for (i = 0; i < 3 ; i++) {
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+ udelay(150); /* wait for warmup */
+ }
+
+ intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE);
+ intel_de_posting_read(dev_priv, PIPECONF(pipe));
+
+ intel_wait_for_pipe_scanline_moving(crtc);
+}
+
+void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+
+ drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
+ pipe_name(pipe));
+
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
+
+ intel_de_write(dev_priv, PIPECONF(pipe), 0);
+ intel_de_posting_read(dev_priv, PIPECONF(pipe));
+
+ intel_wait_for_pipe_scanline_stopped(crtc);
+
+ intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+}
+
+void intel_display_resume(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_atomic_state *state = i915->modeset_restore_state;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ i915->modeset_restore_state = NULL;
+ if (state)
+ state->acquire_ctx = &ctx;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ while (1) {
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (ret != -EDEADLK)
+ break;
+
+ drm_modeset_backoff(&ctx);
+ }
+
+ if (!ret)
+ ret = __intel_display_resume(i915, state, &ctx);
+
+ skl_watermark_ipc_update(i915);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ if (ret)
+ drm_err(&i915->drm,
+ "Restoring old state failed with %i\n", ret);
+ if (state)
+ drm_atomic_state_put(state);
+}
+
+static void intel_hpd_poll_fini(struct drm_i915_private *i915)
+{
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ /* Kill all the work that may have been queued by hpd. */
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ if (connector->modeset_retry_work.func)
+ cancel_work_sync(&connector->modeset_retry_work);
+ if (connector->hdcp.shim) {
+ cancel_delayed_work_sync(&connector->hdcp.check_work);
+ cancel_work_sync(&connector->hdcp.prop_work);
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
+/* part #1: call before irq uninstall */
+void intel_modeset_driver_remove(struct drm_i915_private *i915)
+{
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ flush_workqueue(i915->display.wq.flip);
+ flush_workqueue(i915->display.wq.modeset);
+
+ flush_work(&i915->display.atomic_helper.free_work);
+ drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list));
+
+ /*
+ * MST topology needs to be suspended so we don't have any calls to
+ * fbdev after it's finalized. MST will be destroyed later as part of
+ * drm_mode_config_cleanup()
+ */
+ intel_dp_mst_suspend(i915);
+}
+
+/* part #2: call after irq uninstall */
+void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
+{
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ /*
+ * Due to the hpd irq storm handling the hotplug work can re-arm the
+ * poll handlers. Hence disable polling after hpd handling is shut down.
+ */
+ intel_hpd_poll_fini(i915);
+
+ /* poll work can call into fbdev, hence clean that up afterwards */
+ intel_fbdev_fini(i915);
+
+ intel_unregister_dsm_handler();
+
+ /* flush any delayed tasks or pending work */
+ flush_scheduled_work();
+
+ intel_hdcp_component_fini(i915);
+
+ intel_mode_config_cleanup(i915);
+
+ intel_overlay_cleanup(i915);
+
+ intel_gmbus_teardown(i915);
+
+ destroy_workqueue(i915->display.wq.flip);
+ destroy_workqueue(i915->display.wq.modeset);
+
+ intel_fbc_cleanup(i915);
+}
+
+/* part #3: call after gem init */
+void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
+{
+ intel_dmc_ucode_fini(i915);
+
+ intel_power_domains_driver_remove(i915);
+
+ intel_vga_unregister(i915);
+
+ intel_bios_driver_remove(i915);
+}
+
+bool intel_modeset_probe_defer(struct pci_dev *pdev)
+{
+ struct drm_privacy_screen *privacy_screen;
+
+ /*
+ * apple-gmux is needed on dual GPU MacBook Pro
+ * to probe the panel if we're the inactive GPU.
+ */
+ if (vga_switcheroo_client_probe_defer(pdev))
+ return true;
+
+ /* If the LCD panel has a privacy-screen, wait for it */
+ privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
+ if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
+ return true;
+
+ drm_privacy_screen_put(privacy_screen);
+
+ return false;
+}
+
+void intel_display_driver_register(struct drm_i915_private *i915)
+{
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ intel_display_debugfs_register(i915);
+
+ /* Must be done after probing outputs */
+ intel_opregion_register(i915);
+ intel_acpi_video_register(i915);
+
+ intel_audio_init(i915);
+
+ /*
+ * Some ports require correctly set-up hpd registers for
+ * detection to work properly (leading to ghost connected
+ * connector status), e.g. VGA on gm45. Hence we can only set
+ * up the initial fbdev config after hpd irqs are fully
+ * enabled. We do it last so that the async config cannot run
+ * before the connectors are registered.
+ */
+ intel_fbdev_initial_config_async(&i915->drm);
+
+ /*
+ * We need to coordinate the hotplugs with the asynchronous
+ * fbdev configuration, for which we use the
+ * fbdev->async_cookie.
+ */
+ drm_kms_helper_poll_init(&i915->drm);
+}
+
+void intel_display_driver_unregister(struct drm_i915_private *i915)
+{
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ intel_fbdev_unregister(i915);
+ intel_audio_deinit(i915);
+
+ /*
+ * After flushing the fbdev (incl. a late async config which
+ * will have delayed queuing of a hotplug event), then flush
+ * the hotplug events.
+ */
+ drm_kms_helper_poll_fini(&i915->drm);
+ drm_atomic_helper_shutdown(&i915->drm);
+
+ acpi_video_unregister();
+ intel_opregion_unregister(i915);
+}
+
+bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
new file mode 100644
index 000000000..b4f941674
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -0,0 +1,721 @@
+/*
+ * Copyright © 2006-2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DISPLAY_H_
+#define _INTEL_DISPLAY_H_
+
+#include <drm/drm_util.h>
+
+#include "i915_reg_defs.h"
+
+enum drm_scaling_filter;
+struct dpll;
+struct drm_connector;
+struct drm_device;
+struct drm_display_mode;
+struct drm_encoder;
+struct drm_file;
+struct drm_format_info;
+struct drm_framebuffer;
+struct drm_i915_gem_object;
+struct drm_i915_private;
+struct drm_mode_fb_cmd2;
+struct drm_modeset_acquire_ctx;
+struct drm_plane;
+struct drm_plane_state;
+struct i915_address_space;
+struct i915_gtt_view;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_dp;
+struct intel_encoder;
+struct intel_initial_plane_config;
+struct intel_load_detect_pipe;
+struct intel_plane;
+struct intel_plane_state;
+struct intel_power_domain_mask;
+struct intel_remapped_info;
+struct intel_rotation_info;
+struct pci_dev;
+
+enum i915_gpio {
+ GPIOA,
+ GPIOB,
+ GPIOC,
+ GPIOD,
+ GPIOE,
+ GPIOF,
+ GPIOG,
+ GPIOH,
+ __GPIOI_UNUSED,
+ GPIOJ,
+ GPIOK,
+ GPIOL,
+ GPIOM,
+ GPION,
+ GPIOO,
+};
+
+/*
+ * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
+ * rest have consecutive values and match the enum values of transcoders
+ * with a 1:1 transcoder -> pipe mapping.
+ */
+enum pipe {
+ INVALID_PIPE = -1,
+
+ PIPE_A = 0,
+ PIPE_B,
+ PIPE_C,
+ PIPE_D,
+ _PIPE_EDP,
+
+ I915_MAX_PIPES = _PIPE_EDP
+};
+
+#define pipe_name(p) ((p) + 'A')
+
+enum transcoder {
+ INVALID_TRANSCODER = -1,
+ /*
+ * The following transcoders have a 1:1 transcoder -> pipe mapping,
+ * keep their values fixed: the code assumes that TRANSCODER_A=0, the
+ * rest have consecutive values and match the enum values of the pipes
+ * they map to.
+ */
+ TRANSCODER_A = PIPE_A,
+ TRANSCODER_B = PIPE_B,
+ TRANSCODER_C = PIPE_C,
+ TRANSCODER_D = PIPE_D,
+
+ /*
+ * The following transcoders can map to any pipe, their enum value
+ * doesn't need to stay fixed.
+ */
+ TRANSCODER_EDP,
+ TRANSCODER_DSI_0,
+ TRANSCODER_DSI_1,
+ TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */
+ TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */
+
+ I915_MAX_TRANSCODERS
+};
+
+static inline const char *transcoder_name(enum transcoder transcoder)
+{
+ switch (transcoder) {
+ case TRANSCODER_A:
+ return "A";
+ case TRANSCODER_B:
+ return "B";
+ case TRANSCODER_C:
+ return "C";
+ case TRANSCODER_D:
+ return "D";
+ case TRANSCODER_EDP:
+ return "EDP";
+ case TRANSCODER_DSI_A:
+ return "DSI A";
+ case TRANSCODER_DSI_C:
+ return "DSI C";
+ default:
+ return "<invalid>";
+ }
+}
+
+static inline bool transcoder_is_dsi(enum transcoder transcoder)
+{
+ return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
+}
+
+/*
+ * Global legacy plane identifier. Valid only for primary/sprite
+ * planes on pre-g4x, and only for primary planes on g4x-bdw.
+ */
+enum i9xx_plane_id {
+ PLANE_A,
+ PLANE_B,
+ PLANE_C,
+};
+
+#define plane_name(p) ((p) + 'A')
+#define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+
+/*
+ * Per-pipe plane identifier.
+ * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
+ * number of planes per CRTC. Not all platforms really have this many planes,
+ * which means some arrays of size I915_MAX_PLANES may have unused entries
+ * between the topmost sprite plane and the cursor plane.
+ *
+ * This is expected to be passed to various register macros
+ * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
+ */
+enum plane_id {
+ PLANE_PRIMARY,
+ PLANE_SPRITE0,
+ PLANE_SPRITE1,
+ PLANE_SPRITE2,
+ PLANE_SPRITE3,
+ PLANE_SPRITE4,
+ PLANE_SPRITE5,
+ PLANE_CURSOR,
+
+ I915_MAX_PLANES,
+};
+
+#define for_each_plane_id_on_crtc(__crtc, __p) \
+ for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
+ for_each_if((__crtc)->plane_ids_mask & BIT(__p))
+
+#define for_each_dbuf_slice(__dev_priv, __slice) \
+ for ((__slice) = DBUF_S1; (__slice) < I915_MAX_DBUF_SLICES; (__slice)++) \
+ for_each_if(INTEL_INFO(__dev_priv)->display.dbuf.slice_mask & BIT(__slice))
+
+#define for_each_dbuf_slice_in_mask(__dev_priv, __slice, __mask) \
+ for_each_dbuf_slice((__dev_priv), (__slice)) \
+ for_each_if((__mask) & BIT(__slice))
+
+enum port {
+ PORT_NONE = -1,
+
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ PORT_F,
+ PORT_G,
+ PORT_H,
+ PORT_I,
+
+ /* tgl+ */
+ PORT_TC1 = PORT_D,
+ PORT_TC2,
+ PORT_TC3,
+ PORT_TC4,
+ PORT_TC5,
+ PORT_TC6,
+
+ /* XE_LPD repositions D/E offsets and bitfields */
+ PORT_D_XELPD = PORT_TC5,
+ PORT_E_XELPD,
+
+ I915_MAX_PORTS
+};
+
+#define port_name(p) ((p) + 'A')
+
+/*
+ * Ports identifier referenced from other drivers.
+ * Expected to remain stable over time
+ */
+static inline const char *port_identifier(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return "Port A";
+ case PORT_B:
+ return "Port B";
+ case PORT_C:
+ return "Port C";
+ case PORT_D:
+ return "Port D";
+ case PORT_E:
+ return "Port E";
+ case PORT_F:
+ return "Port F";
+ case PORT_G:
+ return "Port G";
+ case PORT_H:
+ return "Port H";
+ case PORT_I:
+ return "Port I";
+ default:
+ return "<invalid>";
+ }
+}
+
+enum tc_port {
+ TC_PORT_NONE = -1,
+
+ TC_PORT_1 = 0,
+ TC_PORT_2,
+ TC_PORT_3,
+ TC_PORT_4,
+ TC_PORT_5,
+ TC_PORT_6,
+
+ I915_MAX_TC_PORTS
+};
+
+enum tc_port_mode {
+ TC_PORT_DISCONNECTED,
+ TC_PORT_TBT_ALT,
+ TC_PORT_DP_ALT,
+ TC_PORT_LEGACY,
+};
+
+enum dpio_channel {
+ DPIO_CH0,
+ DPIO_CH1
+};
+
+enum dpio_phy {
+ DPIO_PHY0,
+ DPIO_PHY1,
+ DPIO_PHY2,
+};
+
+enum aux_ch {
+ AUX_CH_A,
+ AUX_CH_B,
+ AUX_CH_C,
+ AUX_CH_D,
+ AUX_CH_E, /* ICL+ */
+ AUX_CH_F,
+ AUX_CH_G,
+ AUX_CH_H,
+ AUX_CH_I,
+
+ /* tgl+ */
+ AUX_CH_USBC1 = AUX_CH_D,
+ AUX_CH_USBC2,
+ AUX_CH_USBC3,
+ AUX_CH_USBC4,
+ AUX_CH_USBC5,
+ AUX_CH_USBC6,
+
+ /* XE_LPD repositions D/E offsets and bitfields */
+ AUX_CH_D_XELPD = AUX_CH_USBC5,
+ AUX_CH_E_XELPD,
+};
+
+#define aux_ch_name(a) ((a) + 'A')
+
+/* Used by dp and fdi links */
+struct intel_link_m_n {
+ u32 tu;
+ u32 data_m;
+ u32 data_n;
+ u32 link_m;
+ u32 link_n;
+};
+
+enum phy {
+ PHY_NONE = -1,
+
+ PHY_A = 0,
+ PHY_B,
+ PHY_C,
+ PHY_D,
+ PHY_E,
+ PHY_F,
+ PHY_G,
+ PHY_H,
+ PHY_I,
+
+ I915_MAX_PHYS
+};
+
+#define phy_name(a) ((a) + 'A')
+
+enum phy_fia {
+ FIA1,
+ FIA2,
+ FIA3,
+};
+
+enum hpd_pin {
+ HPD_NONE = 0,
+ HPD_TV = HPD_NONE, /* TV is known to be unreliable */
+ HPD_CRT,
+ HPD_SDVO_B,
+ HPD_SDVO_C,
+ HPD_PORT_A,
+ HPD_PORT_B,
+ HPD_PORT_C,
+ HPD_PORT_D,
+ HPD_PORT_E,
+ HPD_PORT_TC1,
+ HPD_PORT_TC2,
+ HPD_PORT_TC3,
+ HPD_PORT_TC4,
+ HPD_PORT_TC5,
+ HPD_PORT_TC6,
+
+ HPD_NUM_PINS
+};
+
+#define for_each_hpd_pin(__pin) \
+ for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
+
+#define for_each_pipe(__dev_priv, __p) \
+ for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \
+ for_each_if(RUNTIME_INFO(__dev_priv)->pipe_mask & BIT(__p))
+
+#define for_each_pipe_masked(__dev_priv, __p, __mask) \
+ for_each_pipe(__dev_priv, __p) \
+ for_each_if((__mask) & BIT(__p))
+
+#define for_each_cpu_transcoder(__dev_priv, __t) \
+ for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \
+ for_each_if (RUNTIME_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t))
+
+#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
+ for_each_cpu_transcoder(__dev_priv, __t) \
+ for_each_if ((__mask) & BIT(__t))
+
+#define for_each_sprite(__dev_priv, __p, __s) \
+ for ((__s) = 0; \
+ (__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \
+ (__s)++)
+
+#define for_each_port(__port) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)
+
+#define for_each_port_masked(__port, __ports_mask) \
+ for_each_port(__port) \
+ for_each_if((__ports_mask) & BIT(__port))
+
+#define for_each_phy_masked(__phy, __phys_mask) \
+ for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
+ for_each_if((__phys_mask) & BIT(__phy))
+
+#define for_each_crtc(dev, crtc) \
+ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
+
+#define for_each_intel_plane(dev, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head)
+
+#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ for_each_if((plane_mask) & \
+ drm_plane_mask(&intel_plane->base))
+
+#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ for_each_if((intel_plane)->pipe == (intel_crtc)->pipe)
+
+#define for_each_intel_crtc(dev, intel_crtc) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head)
+
+#define for_each_intel_crtc_in_pipe_mask(dev, intel_crtc, pipe_mask) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ for_each_if((pipe_mask) & BIT(intel_crtc->pipe))
+
+#define for_each_intel_encoder(dev, intel_encoder) \
+ list_for_each_entry(intel_encoder, \
+ &(dev)->mode_config.encoder_list, \
+ base.head)
+
+#define for_each_intel_encoder_mask(dev, intel_encoder, encoder_mask) \
+ list_for_each_entry(intel_encoder, \
+ &(dev)->mode_config.encoder_list, \
+ base.head) \
+ for_each_if((encoder_mask) & \
+ drm_encoder_mask(&intel_encoder->base))
+
+#define for_each_intel_encoder_mask_with_psr(dev, intel_encoder, encoder_mask) \
+ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+ for_each_if(((encoder_mask) & drm_encoder_mask(&(intel_encoder)->base)) && \
+ intel_encoder_can_psr(intel_encoder))
+
+#define for_each_intel_dp(dev, intel_encoder) \
+ for_each_intel_encoder(dev, intel_encoder) \
+ for_each_if(intel_encoder_is_dp(intel_encoder))
+
+#define for_each_intel_encoder_with_psr(dev, intel_encoder) \
+ for_each_intel_encoder((dev), (intel_encoder)) \
+ for_each_if(intel_encoder_can_psr(intel_encoder))
+
+#define for_each_intel_connector_iter(intel_connector, iter) \
+ while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
+
+#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
+ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+ for_each_if((intel_encoder)->base.crtc == (__crtc))
+
+#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
+ list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
+ for_each_if((intel_connector)->base.encoder == (__encoder))
+
+#define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
+#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
+#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(crtc)
+
+#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
+ (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
+#define for_each_oldnew_intel_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(crtc)
+
+#define for_each_oldnew_intel_crtc_in_state_reverse(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+ for ((__i) = (__state)->base.dev->mode_config.num_crtc - 1; \
+ (__i) >= 0 && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)--) \
+ for_each_if(crtc)
+
+#define intel_atomic_crtc_state_for_each_plane_state( \
+ plane, plane_state, \
+ crtc_state) \
+ for_each_intel_plane_mask(((crtc_state)->uapi.state->dev), (plane), \
+ ((crtc_state)->uapi.plane_mask)) \
+ for_each_if ((plane_state = \
+ to_intel_plane_state(__drm_atomic_get_current_plane_state((crtc_state)->uapi.state, &plane->base))))
+
+#define for_each_new_intel_connector_in_state(__state, connector, new_connector_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.num_connector; \
+ (__i)++) \
+ for_each_if ((__state)->base.connectors[__i].ptr && \
+ ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
+ (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
+
+int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+u8 intel_calc_active_pipes(struct intel_atomic_state *state,
+ u8 active_pipes);
+void intel_link_compute_m_n(u16 bpp, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n,
+ bool fec_enable);
+u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
+ u32 pixel_format, u64 modifier);
+enum drm_mode_status
+intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+ const struct drm_display_mode *mode,
+ bool bigjoiner);
+enum drm_mode_status
+intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915,
+ const struct drm_display_mode *mode);
+enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
+bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
+bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state);
+bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state);
+u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state);
+struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state);
+bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state);
+bool intel_pipe_config_compare(const struct intel_crtc_state *current_config,
+ const struct intel_crtc_state *pipe_config,
+ bool fastset);
+void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state);
+
+void intel_plane_destroy(struct drm_plane *plane);
+void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
+void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
+void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
+void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state);
+void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
+void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
+int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
+int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg, int ref_freq);
+int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg);
+void intel_init_display_hooks(struct drm_i915_private *dev_priv);
+unsigned int intel_fb_xy_to_linear(int x, int y,
+ const struct intel_plane_state *state,
+ int plane);
+void intel_add_fb_offsets(int *x, int *y,
+ const struct intel_plane_state *state, int plane);
+unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
+unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
+bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
+int intel_display_suspend(struct drm_device *dev);
+void intel_encoder_destroy(struct drm_encoder *encoder);
+struct drm_display_mode *
+intel_encoder_current_mode(struct intel_encoder *encoder);
+void intel_encoder_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
+bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy);
+bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
+bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy);
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
+ enum port port);
+int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dig_port,
+ unsigned int expected_mask);
+int intel_get_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
+struct drm_framebuffer *
+intel_framebuffer_create(struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+
+bool intel_fuzzy_clock_check(int clock1, int clock2);
+
+void intel_display_prepare_reset(struct drm_i915_private *dev_priv);
+void intel_display_finish_reset(struct drm_i915_private *dev_priv);
+void intel_zero_m_n(struct intel_link_m_n *m_n);
+void intel_set_m_n(struct drm_i915_private *i915,
+ const struct intel_link_m_n *m_n,
+ i915_reg_t data_m_reg, i915_reg_t data_n_reg,
+ i915_reg_t link_m_reg, i915_reg_t link_n_reg);
+void intel_get_m_n(struct drm_i915_private *i915,
+ struct intel_link_m_n *m_n,
+ i915_reg_t data_m_reg, i915_reg_t data_n_reg,
+ i915_reg_t link_m_reg, i915_reg_t link_n_reg);
+bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+ enum transcoder transcoder);
+void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
+ enum transcoder cpu_transcoder,
+ const struct intel_link_m_n *m_n);
+void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
+ enum transcoder cpu_transcoder,
+ const struct intel_link_m_n *m_n);
+void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
+ enum transcoder cpu_transcoder,
+ struct intel_link_m_n *m_n);
+void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
+ enum transcoder cpu_transcoder,
+ struct intel_link_m_n *m_n);
+void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
+int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config);
+enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port);
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port);
+void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
+void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
+
+int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
+unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
+
+bool intel_plane_uses_fence(const struct intel_plane_state *plane_state);
+
+struct intel_encoder *
+intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state);
+void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane);
+void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state,
+ bool visible);
+void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state);
+
+void intel_display_driver_register(struct drm_i915_private *i915);
+void intel_display_driver_unregister(struct drm_i915_private *i915);
+
+void intel_update_watermarks(struct drm_i915_private *i915);
+
+/* modesetting */
+bool intel_modeset_probe_defer(struct pci_dev *pdev);
+void intel_modeset_init_hw(struct drm_i915_private *i915);
+int intel_modeset_init_noirq(struct drm_i915_private *i915);
+int intel_modeset_init_nogem(struct drm_i915_private *i915);
+int intel_modeset_init(struct drm_i915_private *i915);
+void intel_modeset_driver_remove(struct drm_i915_private *i915);
+void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
+void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915);
+void intel_display_resume(struct drm_device *dev);
+int intel_modeset_all_pipes(struct intel_atomic_state *state);
+void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
+ struct intel_power_domain_mask *old_domains);
+void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
+ struct intel_power_domain_mask *domains);
+
+/* modesetting asserts */
+void assert_transcoder(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, bool state);
+#define assert_transcoder_enabled(d, t) assert_transcoder(d, t, true)
+#define assert_transcoder_disabled(d, t) assert_transcoder(d, t, false)
+
+/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
+ * WARN_ON()) for hw state sanity checks to check for unexpected conditions
+ * which may not necessarily be a user visible problem. This will either
+ * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
+ * enable distros and users to tailor their preferred amount of i915 abrt
+ * spam.
+ */
+#define I915_STATE_WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ if (!WARN(i915_modparams.verbose_state_checks, format)) \
+ DRM_ERROR(format); \
+ unlikely(__ret_warn_on); \
+})
+
+#define I915_STATE_WARN_ON(x) \
+ I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
+
+bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
new file mode 100644
index 000000000..9b51148e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -0,0 +1,426 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_CORE_H__
+#define __INTEL_DISPLAY_CORE_H__
+
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_connector.h>
+
+#include "intel_cdclk.h"
+#include "intel_display.h"
+#include "intel_display_power.h"
+#include "intel_dmc.h"
+#include "intel_dpll_mgr.h"
+#include "intel_fbc.h"
+#include "intel_global_state.h"
+#include "intel_gmbus.h"
+#include "intel_opregion.h"
+#include "intel_pm_types.h"
+
+struct drm_i915_private;
+struct drm_property;
+struct i915_audio_component;
+struct i915_hdcp_comp_master;
+struct intel_atomic_state;
+struct intel_audio_funcs;
+struct intel_bios_encoder_data;
+struct intel_cdclk_funcs;
+struct intel_cdclk_vals;
+struct intel_color_funcs;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dpll_funcs;
+struct intel_dpll_mgr;
+struct intel_fbdev;
+struct intel_fdi_funcs;
+struct intel_hotplug_funcs;
+struct intel_initial_plane_config;
+struct intel_overlay;
+
+/* Amount of SAGV/QGV points, BSpec precisely defines this */
+#define I915_NUM_QGV_POINTS 8
+
+/* Amount of PSF GV points, BSpec precisely defines this */
+#define I915_NUM_PSF_GV_POINTS 3
+
+struct intel_display_funcs {
+ /*
+ * Returns the active state of the crtc, and if the crtc is active,
+ * fills out the pipe-config with the hw state.
+ */
+ bool (*get_pipe_config)(struct intel_crtc *,
+ struct intel_crtc_state *);
+ void (*get_initial_plane_config)(struct intel_crtc *,
+ struct intel_initial_plane_config *);
+ void (*crtc_enable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*crtc_disable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*commit_modeset_enables)(struct intel_atomic_state *state);
+};
+
+/* functions used for watermark calcs for display. */
+struct intel_wm_funcs {
+ /* update_wm is for legacy wm management */
+ void (*update_wm)(struct drm_i915_private *dev_priv);
+ int (*compute_pipe_wm)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ int (*compute_intermediate_wm)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*initial_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*atomic_update_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*optimize_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ int (*compute_global_watermarks)(struct intel_atomic_state *state);
+};
+
+struct intel_audio {
+ /* hda/i915 audio component */
+ struct i915_audio_component *component;
+ bool component_registered;
+ /* mutex for audio/video sync */
+ struct mutex mutex;
+ int power_refcount;
+ u32 freq_cntrl;
+
+ /* Used to save the pipe-to-encoder mapping for audio */
+ struct intel_encoder *encoder_map[I915_MAX_PIPES];
+
+ /* necessary resource sharing with HDMI LPE audio driver. */
+ struct {
+ struct platform_device *platdev;
+ int irq;
+ } lpe;
+};
+
+/*
+ * dpll and cdclk state is protected by connection_mutex dpll.lock serializes
+ * intel_{prepare,enable,disable}_shared_dpll. Must be global rather than per
+ * dpll, because on some platforms plls share registers.
+ */
+struct intel_dpll {
+ struct mutex lock;
+
+ int num_shared_dpll;
+ struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ const struct intel_dpll_mgr *mgr;
+
+ struct {
+ int nssc;
+ int ssc;
+ } ref_clks;
+};
+
+struct intel_frontbuffer_tracking {
+ spinlock_t lock;
+
+ /*
+ * Tracking bits for delayed frontbuffer flushing du to gpu activity or
+ * scheduled flips.
+ */
+ unsigned busy_bits;
+ unsigned flip_bits;
+};
+
+struct intel_hotplug {
+ struct delayed_work hotplug_work;
+
+ const u32 *hpd, *pch_hpd;
+
+ struct {
+ unsigned long last_jiffies;
+ int count;
+ enum {
+ HPD_ENABLED = 0,
+ HPD_DISABLED = 1,
+ HPD_MARK_DISABLED = 2
+ } state;
+ } stats[HPD_NUM_PINS];
+ u32 event_bits;
+ u32 retry_bits;
+ struct delayed_work reenable_work;
+
+ u32 long_port_mask;
+ u32 short_port_mask;
+ struct work_struct dig_port_work;
+
+ struct work_struct poll_init_work;
+ bool poll_enabled;
+
+ unsigned int hpd_storm_threshold;
+ /* Whether or not to count short HPD IRQs in HPD storms */
+ u8 hpd_short_storm_enabled;
+
+ /*
+ * if we get a HPD irq from DP and a HPD irq from non-DP
+ * the non-DP HPD could block the workqueue on a mode config
+ * mutex getting, that userspace may have taken. However
+ * userspace is waiting on the DP workqueue to run which is
+ * blocked behind the non-DP one.
+ */
+ struct workqueue_struct *dp_wq;
+};
+
+struct intel_vbt_data {
+ /* bdb version */
+ u16 version;
+
+ /* Feature bits */
+ unsigned int int_tv_support:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ unsigned int int_lvds_support:1;
+ unsigned int display_clock_mode:1;
+ unsigned int fdi_rx_polarity_inverted:1;
+ int lvds_ssc_freq;
+ enum drm_panel_orientation orientation;
+
+ bool override_afc_startup;
+ u8 override_afc_startup_val;
+
+ int crt_ddc_pin;
+
+ struct list_head display_devices;
+ struct list_head bdb_blocks;
+
+ struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */
+ struct sdvo_device_mapping {
+ u8 initialized;
+ u8 dvo_port;
+ u8 slave_addr;
+ u8 dvo_wiring;
+ u8 i2c_pin;
+ u8 ddc_pin;
+ } sdvo_mappings[2];
+};
+
+struct intel_wm {
+ /*
+ * Raw watermark latency values:
+ * in 0.1us units for WM0,
+ * in 0.5us units for WM1+.
+ */
+ /* primary */
+ u16 pri_latency[5];
+ /* sprite */
+ u16 spr_latency[5];
+ /* cursor */
+ u16 cur_latency[5];
+ /*
+ * Raw watermark memory latency values
+ * for SKL for all 8 levels
+ * in 1us units.
+ */
+ u16 skl_latency[8];
+
+ /* current hardware state */
+ union {
+ struct ilk_wm_values hw;
+ struct vlv_wm_values vlv;
+ struct g4x_wm_values g4x;
+ };
+
+ u8 max_level;
+
+ /*
+ * Should be held around atomic WM register writing; also
+ * protects * intel_crtc->wm.active and
+ * crtc_state->wm.need_postvbl_update.
+ */
+ struct mutex wm_mutex;
+
+ bool ipc_enabled;
+};
+
+struct intel_display {
+ /* Display functions */
+ struct {
+ /* Top level crtc-ish functions */
+ const struct intel_display_funcs *display;
+
+ /* Display CDCLK functions */
+ const struct intel_cdclk_funcs *cdclk;
+
+ /* Display pll funcs */
+ const struct intel_dpll_funcs *dpll;
+
+ /* irq display functions */
+ const struct intel_hotplug_funcs *hotplug;
+
+ /* pm display functions */
+ const struct intel_wm_funcs *wm;
+
+ /* fdi display functions */
+ const struct intel_fdi_funcs *fdi;
+
+ /* Display internal color functions */
+ const struct intel_color_funcs *color;
+
+ /* Display internal audio functions */
+ const struct intel_audio_funcs *audio;
+ } funcs;
+
+ /* Grouping using anonymous structs. Keep sorted. */
+ struct intel_atomic_helper {
+ struct llist_head free_list;
+ struct work_struct free_work;
+ } atomic_helper;
+
+ struct {
+ /* backlight registers and fields in struct intel_panel */
+ struct mutex lock;
+ } backlight;
+
+ struct {
+ struct intel_global_obj obj;
+
+ struct intel_bw_info {
+ /* for each QGV point */
+ unsigned int deratedbw[I915_NUM_QGV_POINTS];
+ /* for each PSF GV point */
+ unsigned int psf_bw[I915_NUM_PSF_GV_POINTS];
+ u8 num_qgv_points;
+ u8 num_psf_gv_points;
+ u8 num_planes;
+ } max[6];
+ } bw;
+
+ struct {
+ /* The current hardware cdclk configuration */
+ struct intel_cdclk_config hw;
+
+ /* cdclk, divider, and ratio table from bspec */
+ const struct intel_cdclk_vals *table;
+
+ struct intel_global_obj obj;
+
+ unsigned int max_cdclk_freq;
+ } cdclk;
+
+ struct {
+ /* The current hardware dbuf configuration */
+ u8 enabled_slices;
+
+ struct intel_global_obj obj;
+ } dbuf;
+
+ struct {
+ /*
+ * dkl.phy_lock protects against concurrent access of the
+ * Dekel TypeC PHYs.
+ */
+ spinlock_t phy_lock;
+ } dkl;
+
+ struct {
+ /* VLV/CHV/BXT/GLK DSI MMIO register base address */
+ u32 mmio_base;
+ } dsi;
+
+ struct {
+ /* list of fbdev register on this device */
+ struct intel_fbdev *fbdev;
+ struct work_struct suspend_work;
+ } fbdev;
+
+ struct {
+ unsigned int pll_freq;
+ u32 rx_config;
+ } fdi;
+
+ struct {
+ /*
+ * Base address of where the gmbus and gpio blocks are located
+ * (either on PCH or on SoC for platforms without PCH).
+ */
+ u32 mmio_base;
+
+ /*
+ * gmbus.mutex protects against concurrent usage of the single
+ * hw gmbus controller on different i2c buses.
+ */
+ struct mutex mutex;
+
+ struct intel_gmbus *bus[GMBUS_NUM_PINS];
+
+ wait_queue_head_t wait_queue;
+ } gmbus;
+
+ struct {
+ struct i915_hdcp_comp_master *master;
+ bool comp_added;
+
+ /* Mutex to protect the above hdcp component related values. */
+ struct mutex comp_mutex;
+ } hdcp;
+
+ struct {
+ struct i915_power_domains domains;
+
+ /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
+ u32 chv_phy_control;
+
+ /* perform PHY state sanity checks? */
+ bool chv_phy_assert[2];
+ } power;
+
+ struct {
+ u32 mmio_base;
+
+ /* protects panel power sequencer state */
+ struct mutex mutex;
+ } pps;
+
+ struct {
+ struct drm_property *broadcast_rgb;
+ struct drm_property *force_audio;
+ } properties;
+
+ struct {
+ unsigned long mask;
+ } quirks;
+
+ struct {
+ enum {
+ I915_SAGV_UNKNOWN = 0,
+ I915_SAGV_DISABLED,
+ I915_SAGV_ENABLED,
+ I915_SAGV_NOT_CONTROLLED
+ } status;
+
+ u32 block_time_us;
+ } sagv;
+
+ struct {
+ /* ordered wq for modesets */
+ struct workqueue_struct *modeset;
+
+ /* unbound hipri wq for page flips/plane updates */
+ struct workqueue_struct *flip;
+ } wq;
+
+ /* Grouping using named structs. Keep sorted. */
+ struct intel_audio audio;
+ struct intel_dmc dmc;
+ struct intel_dpll dpll;
+ struct intel_fbc *fbc[I915_MAX_FBCS];
+ struct intel_frontbuffer_tracking fb_tracking;
+ struct intel_hotplug hotplug;
+ struct intel_opregion opregion;
+ struct intel_overlay *overlay;
+ struct intel_vbt_data vbt;
+ struct intel_wm wm;
+};
+
+#endif /* __INTEL_DISPLAY_CORE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
new file mode 100644
index 000000000..7c7253a25
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -0,0 +1,2254 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/string_helpers.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_fourcc.h>
+
+#include "i915_debugfs.h"
+#include "intel_de.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_power.h"
+#include "intel_display_power_well.h"
+#include "intel_display_types.h"
+#include "intel_dmc.h"
+#include "intel_dp.h"
+#include "intel_dp_mst.h"
+#include "intel_drrs.h"
+#include "intel_fbc.h"
+#include "intel_fbdev.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_panel.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+#include "skl_watermark.h"
+
+static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
+{
+ return to_i915(node->minor->dev);
+}
+
+static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+
+ seq_printf(m, "FB tracking busy bits: 0x%08x\n",
+ dev_priv->display.fb_tracking.busy_bits);
+
+ seq_printf(m, "FB tracking flip bits: 0x%08x\n",
+ dev_priv->display.fb_tracking.flip_bits);
+
+ return 0;
+}
+
+static int i915_ips_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
+
+ if (!HAS_IPS(dev_priv))
+ return -ENODEV;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ seq_printf(m, "Enabled by kernel parameter: %s\n",
+ str_yes_no(dev_priv->params.enable_ips));
+
+ if (DISPLAY_VER(dev_priv) >= 8) {
+ seq_puts(m, "Currently: unknown\n");
+ } else {
+ if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
+ seq_puts(m, "Currently: enabled\n");
+ else
+ seq_puts(m, "Currently: disabled\n");
+ }
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_sr_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
+ bool sr_enabled = false;
+
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ /* no global SR status; inspect per-plane WM */;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM_LP_ENABLE;
+ else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
+ IS_I945G(dev_priv) || IS_I945GM(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ else if (IS_I915GM(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
+ else if (IS_PINEVIEW(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+
+ seq_printf(m, "self-refresh: %s\n", str_enabled_disabled(sr_enabled));
+
+ return 0;
+}
+
+static int i915_opregion(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_opregion *opregion = &i915->display.opregion;
+
+ if (opregion->header)
+ seq_write(m, opregion->header, OPREGION_SIZE);
+
+ return 0;
+}
+
+static int i915_vbt(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_opregion *opregion = &i915->display.opregion;
+
+ if (opregion->vbt)
+ seq_write(m, opregion->vbt, opregion->vbt_size);
+
+ return 0;
+}
+
+static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_framebuffer *fbdev_fb = NULL;
+ struct drm_framebuffer *drm_fb;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ fbdev_fb = intel_fbdev_framebuffer(dev_priv->display.fbdev.fbdev);
+ if (fbdev_fb) {
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+ fbdev_fb->base.width,
+ fbdev_fb->base.height,
+ fbdev_fb->base.format->depth,
+ fbdev_fb->base.format->cpp[0] * 8,
+ fbdev_fb->base.modifier,
+ drm_framebuffer_read_refcount(&fbdev_fb->base));
+ i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
+ seq_putc(m, '\n');
+ }
+#endif
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ drm_for_each_fb(drm_fb, dev) {
+ struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
+ if (fb == fbdev_fb)
+ continue;
+
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+ fb->base.width,
+ fb->base.height,
+ fb->base.format->depth,
+ fb->base.format->cpp[0] * 8,
+ fb->base.modifier,
+ drm_framebuffer_read_refcount(&fb->base));
+ i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
+ seq_putc(m, '\n');
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static int i915_psr_sink_status_show(struct seq_file *m, void *data)
+{
+ u8 val;
+ static const char * const sink_status[] = {
+ "inactive",
+ "transition to active, capture and display",
+ "active, display from RFB",
+ "active, capture and display on sink device timings",
+ "transition to inactive, capture and display, timing re-sync",
+ "reserved",
+ "reserved",
+ "sink internal error",
+ };
+ struct drm_connector *connector = m->private;
+ struct intel_dp *intel_dp =
+ intel_attached_dp(to_intel_connector(connector));
+ int ret;
+
+ if (!CAN_PSR(intel_dp)) {
+ seq_puts(m, "PSR Unsupported\n");
+ return -ENODEV;
+ }
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
+
+ if (ret == 1) {
+ const char *str = "unknown";
+
+ val &= DP_PSR_SINK_STATE_MASK;
+ if (val < ARRAY_SIZE(sink_status))
+ str = sink_status[val];
+ seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
+ } else {
+ return ret;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
+
+static void
+psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ const char *status = "unknown";
+ u32 val, status_val;
+
+ if (intel_dp->psr.psr2_enabled) {
+ static const char * const live_status[] = {
+ "IDLE",
+ "CAPTURE",
+ "CAPTURE_FS",
+ "SLEEP",
+ "BUFON_FW",
+ "ML_UP",
+ "SU_STANDBY",
+ "FAST_SLEEP",
+ "DEEP_SLEEP",
+ "BUF_ON",
+ "TG_ON"
+ };
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_STATUS(intel_dp->psr.transcoder));
+ status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ } else {
+ static const char * const live_status[] = {
+ "IDLE",
+ "SRDONACK",
+ "SRDENT",
+ "BUFOFF",
+ "BUFON",
+ "AUXACK",
+ "SRDOFFACK",
+ "SRDENT_ON",
+ };
+ val = intel_de_read(dev_priv,
+ EDP_PSR_STATUS(intel_dp->psr.transcoder));
+ status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
+ EDP_PSR_STATUS_STATE_SHIFT;
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ }
+
+ seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
+}
+
+static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_psr *psr = &intel_dp->psr;
+ intel_wakeref_t wakeref;
+ const char *status;
+ bool enabled;
+ u32 val;
+
+ seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
+ if (psr->sink_support)
+ seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
+ seq_puts(m, "\n");
+
+ if (!psr->sink_support)
+ return 0;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&psr->lock);
+
+ if (psr->enabled)
+ status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
+ else
+ status = "disabled";
+ seq_printf(m, "PSR mode: %s\n", status);
+
+ if (!psr->enabled) {
+ seq_printf(m, "PSR sink not reliable: %s\n",
+ str_yes_no(psr->sink_not_reliable));
+
+ goto unlock;
+ }
+
+ if (psr->psr2_enabled) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(intel_dp->psr.transcoder));
+ enabled = val & EDP_PSR2_ENABLE;
+ } else {
+ val = intel_de_read(dev_priv,
+ EDP_PSR_CTL(intel_dp->psr.transcoder));
+ enabled = val & EDP_PSR_ENABLE;
+ }
+ seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
+ str_enabled_disabled(enabled), val);
+ psr_source_status(intel_dp, m);
+ seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
+ psr->busy_frontbuffer_bits);
+
+ /*
+ * SKL+ Perf counter is reset to 0 everytime DC state is entered
+ */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
+ val &= EDP_PSR_PERF_CNT_MASK;
+ seq_printf(m, "Performance counter: %u\n", val);
+ }
+
+ if (psr->debug & I915_PSR_DEBUG_IRQ) {
+ seq_printf(m, "Last attempted entry at: %lld\n",
+ psr->last_entry_attempt);
+ seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
+ }
+
+ if (psr->psr2_enabled) {
+ u32 su_frames_val[3];
+ int frame;
+
+ /*
+ * Reading all 3 registers before hand to minimize crossing a
+ * frame boundary between register reads
+ */
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+ val = intel_de_read(dev_priv,
+ PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
+ su_frames_val[frame / 3] = val;
+ }
+
+ seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
+
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
+ u32 su_blocks;
+
+ su_blocks = su_frames_val[frame / 3] &
+ PSR2_SU_STATUS_MASK(frame);
+ su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
+ seq_printf(m, "%d\t%d\n", frame, su_blocks);
+ }
+
+ seq_printf(m, "PSR2 selective fetch: %s\n",
+ str_enabled_disabled(psr->psr2_sel_fetch_enabled));
+ }
+
+unlock:
+ mutex_unlock(&psr->lock);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_dp *intel_dp = NULL;
+ struct intel_encoder *encoder;
+
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
+
+ /* Find the first EDP which supports PSR */
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ intel_dp = enc_to_intel_dp(encoder);
+ break;
+ }
+
+ if (!intel_dp)
+ return -ENODEV;
+
+ return intel_psr_status(m, intel_dp);
+}
+
+static int
+i915_edp_psr_debug_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ struct intel_encoder *encoder;
+ intel_wakeref_t wakeref;
+ int ret = -ENODEV;
+
+ if (!HAS_PSR(dev_priv))
+ return ret;
+
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ // TODO: split to each transcoder's PSR debug state
+ ret = intel_psr_debug_set(intel_dp, val);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ }
+
+ return ret;
+}
+
+static int
+i915_edp_psr_debug_get(void *data, u64 *val)
+{
+ struct drm_i915_private *dev_priv = data;
+ struct intel_encoder *encoder;
+
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
+
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ // TODO: split to each transcoder's PSR debug state
+ *val = READ_ONCE(intel_dp->psr.debug);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
+ i915_edp_psr_debug_get, i915_edp_psr_debug_set,
+ "%llu\n");
+
+static int i915_power_domain_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+
+ intel_display_power_debug(i915, m);
+
+ return 0;
+}
+
+static void intel_seq_print_mode(struct seq_file *m, int tabs,
+ const struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < tabs; i++)
+ seq_putc(m, '\t');
+
+ seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+}
+
+static void intel_encoder_info(struct seq_file *m,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+
+ seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
+ encoder->base.base.id, encoder->base.name);
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ const struct drm_connector_state *conn_state =
+ connector->state;
+
+ if (conn_state->best_encoder != &encoder->base)
+ continue;
+
+ seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
+static void intel_panel_info(struct seq_file *m,
+ struct intel_connector *connector)
+{
+ const struct drm_display_mode *fixed_mode;
+
+ if (list_empty(&connector->panel.fixed_modes))
+ return;
+
+ seq_puts(m, "\tfixed modes:\n");
+
+ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head)
+ intel_seq_print_mode(m, 2, fixed_mode);
+}
+
+static void intel_hdcp_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ bool hdcp_cap, hdcp2_cap;
+
+ if (!intel_connector->hdcp.shim) {
+ seq_puts(m, "No Connector Support");
+ goto out;
+ }
+
+ hdcp_cap = intel_hdcp_capable(intel_connector);
+ hdcp2_cap = intel_hdcp2_capable(intel_connector);
+
+ if (hdcp_cap)
+ seq_puts(m, "HDCP1.4 ");
+ if (hdcp2_cap)
+ seq_puts(m, "HDCP2.2 ");
+
+ if (!hdcp_cap && !hdcp2_cap)
+ seq_puts(m, "None");
+
+out:
+ seq_puts(m, "\n");
+}
+
+static void intel_dp_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+ const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
+
+ seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
+ seq_printf(m, "\taudio support: %s\n",
+ str_yes_no(intel_dp->has_audio));
+
+ drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
+ edid ? edid->data : NULL, &intel_dp->aux);
+}
+
+static void intel_dp_mst_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ bool has_audio = intel_connector->port->has_audio;
+
+ seq_printf(m, "\taudio support: %s\n", str_yes_no(has_audio));
+}
+
+static void intel_hdmi_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
+
+ seq_printf(m, "\taudio support: %s\n",
+ str_yes_no(intel_hdmi->has_audio));
+}
+
+static void intel_connector_info(struct seq_file *m,
+ struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ const struct drm_connector_state *conn_state = connector->state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ const struct drm_display_mode *mode;
+
+ seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
+ connector->base.id, connector->name,
+ drm_get_connector_status_name(connector->status));
+
+ if (connector->status == connector_status_disconnected)
+ return;
+
+ seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
+ connector->display_info.width_mm,
+ connector->display_info.height_mm);
+ seq_printf(m, "\tsubpixel order: %s\n",
+ drm_get_subpixel_order_name(connector->display_info.subpixel_order));
+ seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
+
+ if (!encoder)
+ return;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ intel_dp_mst_info(m, intel_connector);
+ else
+ intel_dp_info(m, intel_connector);
+ break;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (encoder->type == INTEL_OUTPUT_HDMI ||
+ encoder->type == INTEL_OUTPUT_DDI)
+ intel_hdmi_info(m, intel_connector);
+ break;
+ default:
+ break;
+ }
+
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+
+ seq_printf(m, "\tmax bpc: %u\n", connector->display_info.bpc);
+
+ intel_panel_info(m, intel_connector);
+
+ seq_printf(m, "\tmodes:\n");
+ list_for_each_entry(mode, &connector->modes, head)
+ intel_seq_print_mode(m, 2, mode);
+}
+
+static const char *plane_type(enum drm_plane_type type)
+{
+ switch (type) {
+ case DRM_PLANE_TYPE_OVERLAY:
+ return "OVL";
+ case DRM_PLANE_TYPE_PRIMARY:
+ return "PRI";
+ case DRM_PLANE_TYPE_CURSOR:
+ return "CUR";
+ /*
+ * Deliberately omitting default: to generate compiler warnings
+ * when a new drm_plane_type gets added.
+ */
+ }
+
+ return "unknown";
+}
+
+static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
+{
+ /*
+ * According to doc only one DRM_MODE_ROTATE_ is allowed but this
+ * will print them all to visualize if the values are misused
+ */
+ snprintf(buf, bufsize,
+ "%s%s%s%s%s%s(0x%08x)",
+ (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
+ (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
+ (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
+ (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
+ (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
+ (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
+ rotation);
+}
+
+static const char *plane_visibility(const struct intel_plane_state *plane_state)
+{
+ if (plane_state->uapi.visible)
+ return "visible";
+
+ if (plane_state->planar_slave)
+ return "planar-slave";
+
+ return "hidden";
+}
+
+static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
+{
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->uapi.fb;
+ struct drm_rect src, dst;
+ char rot_str[48];
+
+ src = drm_plane_state_src(&plane_state->uapi);
+ dst = drm_plane_state_dest(&plane_state->uapi);
+
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->uapi.rotation);
+
+ seq_puts(m, "\t\tuapi: [FB:");
+ if (fb)
+ seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id,
+ &fb->format->format, fb->modifier, fb->width,
+ fb->height);
+ else
+ seq_puts(m, "0] n/a,0x0,0x0,");
+ seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT
+ ", rotation=%s\n", plane_visibility(plane_state),
+ DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str);
+
+ if (plane_state->planar_linked_plane)
+ seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
+ plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name,
+ plane_state->planar_slave ? "slave" : "master");
+}
+
+static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
+{
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ char rot_str[48];
+
+ if (!fb)
+ return;
+
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->hw.rotation);
+
+ seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src="
+ DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb->base.id, &fb->format->format,
+ fb->modifier, fb->width, fb->height,
+ str_yes_no(plane_state->uapi.visible),
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst),
+ rot_str);
+}
+
+static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
+ plane->base.base.id, plane->base.name,
+ plane_type(plane->base.type));
+ intel_plane_uapi_info(m, plane);
+ intel_plane_hw_info(m, plane);
+ }
+}
+
+static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ int num_scalers = crtc->num_scalers;
+ int i;
+
+ /* Not all platformas have a scaler */
+ if (num_scalers) {
+ seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d scaling_filter=%d",
+ num_scalers,
+ crtc_state->scaler_state.scaler_users,
+ crtc_state->scaler_state.scaler_id,
+ crtc_state->hw.scaling_filter);
+
+ for (i = 0; i < num_scalers; i++) {
+ const struct intel_scaler *sc =
+ &crtc_state->scaler_state.scalers[i];
+
+ seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
+ i, str_yes_no(sc->in_use), sc->mode);
+ }
+ seq_puts(m, "\n");
+ } else {
+ seq_puts(m, "\tNo scalers available on this platform\n");
+ }
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+static void crtc_updates_info(struct seq_file *m,
+ struct intel_crtc *crtc,
+ const char *hdr)
+{
+ u64 count;
+ int row;
+
+ count = 0;
+ for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++)
+ count += crtc->debug.vbl.times[row];
+ seq_printf(m, "%sUpdates: %llu\n", hdr, count);
+ if (!count)
+ return;
+
+ for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) {
+ char columns[80] = " |";
+ unsigned int x;
+
+ if (row & 1) {
+ const char *units;
+
+ if (row > 10) {
+ x = 1000000;
+ units = "ms";
+ } else {
+ x = 1000;
+ units = "us";
+ }
+
+ snprintf(columns, sizeof(columns), "%4ld%s |",
+ DIV_ROUND_CLOSEST(BIT(row + 9), x), units);
+ }
+
+ if (crtc->debug.vbl.times[row]) {
+ x = ilog2(crtc->debug.vbl.times[row]);
+ memset(columns + 8, '*', x);
+ columns[8 + x] = '\0';
+ }
+
+ seq_printf(m, "%s%s\n", hdr, columns);
+ }
+
+ seq_printf(m, "%sMin update: %lluns\n",
+ hdr, crtc->debug.vbl.min);
+ seq_printf(m, "%sMax update: %lluns\n",
+ hdr, crtc->debug.vbl.max);
+ seq_printf(m, "%sAverage update: %lluns\n",
+ hdr, div64_u64(crtc->debug.vbl.sum, count));
+ seq_printf(m, "%sOverruns > %uus: %u\n",
+ hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
+}
+
+static int crtc_updates_show(struct seq_file *m, void *data)
+{
+ crtc_updates_info(m, m->private, "");
+ return 0;
+}
+
+static int crtc_updates_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, crtc_updates_show, inode->i_private);
+}
+
+static ssize_t crtc_updates_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_crtc *crtc = m->private;
+
+ /* May race with an update. Meh. */
+ memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl));
+
+ return len;
+}
+
+static const struct file_operations crtc_updates_fops = {
+ .owner = THIS_MODULE,
+ .open = crtc_updates_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = crtc_updates_write
+};
+
+static void crtc_updates_add(struct drm_crtc *crtc)
+{
+ debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
+ to_intel_crtc(crtc), &crtc_updates_fops);
+}
+
+#else
+static void crtc_updates_info(struct seq_file *m,
+ struct intel_crtc *crtc,
+ const char *hdr)
+{
+}
+
+static void crtc_updates_add(struct drm_crtc *crtc)
+{
+}
+#endif
+
+static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_encoder *encoder;
+
+ seq_printf(m, "[CRTC:%d:%s]:\n",
+ crtc->base.base.id, crtc->base.name);
+
+ seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
+ str_yes_no(crtc_state->uapi.enable),
+ str_yes_no(crtc_state->uapi.active),
+ DRM_MODE_ARG(&crtc_state->uapi.mode));
+
+ seq_printf(m, "\thw: enable=%s, active=%s\n",
+ str_yes_no(crtc_state->hw.enable), str_yes_no(crtc_state->hw.active));
+ seq_printf(m, "\tadjusted_mode=" DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
+ seq_printf(m, "\tpipe__mode=" DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&crtc_state->hw.pipe_mode));
+
+ seq_printf(m, "\tpipe src=" DRM_RECT_FMT ", dither=%s, bpp=%d\n",
+ DRM_RECT_ARG(&crtc_state->pipe_src),
+ str_yes_no(crtc_state->dither), crtc_state->pipe_bpp);
+
+ intel_scaler_info(m, crtc);
+
+ if (crtc_state->bigjoiner_pipes)
+ seq_printf(m, "\tLinked to 0x%x pipes as a %s\n",
+ crtc_state->bigjoiner_pipes,
+ intel_crtc_is_bigjoiner_slave(crtc_state) ? "slave" : "master");
+
+ for_each_intel_encoder_mask(&dev_priv->drm, encoder,
+ crtc_state->uapi.encoder_mask)
+ intel_encoder_info(m, crtc, encoder);
+
+ intel_plane_info(m, crtc);
+
+ seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
+ str_yes_no(!crtc->cpu_fifo_underrun_disabled),
+ str_yes_no(!crtc->pch_fifo_underrun_disabled));
+
+ crtc_updates_info(m, crtc, "\t");
+}
+
+static int i915_display_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *crtc;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ drm_modeset_lock_all(dev);
+
+ seq_printf(m, "CRTC info\n");
+ seq_printf(m, "---------\n");
+ for_each_intel_crtc(dev, crtc)
+ intel_crtc_info(m, crtc);
+
+ seq_printf(m, "\n");
+ seq_printf(m, "Connector info\n");
+ seq_printf(m, "--------------\n");
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter)
+ intel_connector_info(m, connector);
+ drm_connector_list_iter_end(&conn_iter);
+
+ drm_modeset_unlock_all(dev);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_shared_dplls_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ int i;
+
+ drm_modeset_lock_all(dev);
+
+ seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
+ dev_priv->display.dpll.ref_clks.nssc,
+ dev_priv->display.dpll.ref_clks.ssc);
+
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
+
+ seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
+ pll->info->id);
+ seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
+ pll->state.pipe_mask, pll->active_mask,
+ str_yes_no(pll->on));
+ seq_printf(m, " tracked hardware state:\n");
+ seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
+ seq_printf(m, " dpll_md: 0x%08x\n",
+ pll->state.hw_state.dpll_md);
+ seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
+ seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
+ seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
+ seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
+ seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
+ seq_printf(m, " div0: 0x%08x\n", pll->state.hw_state.div0);
+ seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
+ pll->state.hw_state.mg_refclkin_ctl);
+ seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
+ pll->state.hw_state.mg_clktop2_coreclkctl1);
+ seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
+ pll->state.hw_state.mg_clktop2_hsclkctl);
+ seq_printf(m, " mg_pll_div0: 0x%08x\n",
+ pll->state.hw_state.mg_pll_div0);
+ seq_printf(m, " mg_pll_div1: 0x%08x\n",
+ pll->state.hw_state.mg_pll_div1);
+ seq_printf(m, " mg_pll_lf: 0x%08x\n",
+ pll->state.hw_state.mg_pll_lf);
+ seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
+ pll->state.hw_state.mg_pll_frac_lock);
+ seq_printf(m, " mg_pll_ssc: 0x%08x\n",
+ pll->state.hw_state.mg_pll_ssc);
+ seq_printf(m, " mg_pll_bias: 0x%08x\n",
+ pll->state.hw_state.mg_pll_bias);
+ seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
+ pll->state.hw_state.mg_pll_tdc_coldst_bias);
+ }
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+static int i915_ddb_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct skl_ddb_entry *entry;
+ struct intel_crtc *crtc;
+
+ if (DISPLAY_VER(dev_priv) < 9)
+ return -ENODEV;
+
+ drm_modeset_lock_all(dev);
+
+ seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+
+ seq_printf(m, "Pipe %c\n", pipe_name(pipe));
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ entry = &crtc_state->wm.skl.plane_ddb[plane_id];
+ seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
+ entry->start, entry->end,
+ skl_ddb_entry_size(entry));
+ }
+
+ entry = &crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
+ seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
+ entry->end, skl_ddb_entry_size(entry));
+ }
+
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+static int i915_drrs_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
+ struct intel_crtc *crtc;
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ seq_printf(m, "[CONNECTOR:%d:%s] DRRS type: %s\n",
+ connector->base.base.id, connector->base.name,
+ intel_drrs_type_str(intel_panel_drrs_type(connector)));
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ seq_puts(m, "\n");
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ seq_printf(m, "[CRTC:%d:%s]:\n",
+ crtc->base.base.id, crtc->base.name);
+
+ mutex_lock(&crtc->drrs.mutex);
+
+ /* DRRS Supported */
+ seq_printf(m, "\tDRRS Enabled: %s\n",
+ str_yes_no(crtc_state->has_drrs));
+
+ seq_printf(m, "\tDRRS Active: %s\n",
+ str_yes_no(intel_drrs_is_active(crtc)));
+
+ seq_printf(m, "\tBusy_frontbuffer_bits: 0x%X\n",
+ crtc->drrs.busy_frontbuffer_bits);
+
+ seq_printf(m, "\tDRRS refresh rate: %s\n",
+ crtc->drrs.refresh_rate == DRRS_REFRESH_RATE_LOW ?
+ "low" : "high");
+
+ mutex_unlock(&crtc->drrs.mutex);
+ }
+
+ return 0;
+}
+
+static bool
+intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
+ enum i915_power_well_id power_well_id)
+{
+ intel_wakeref_t wakeref;
+ bool is_enabled;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ is_enabled = intel_display_power_well_is_enabled(i915,
+ power_well_id);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return is_enabled;
+}
+
+static int i915_lpsp_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ bool lpsp_enabled = false;
+
+ if (DISPLAY_VER(i915) >= 13 || IS_DISPLAY_VER(i915, 9, 10)) {
+ lpsp_enabled = !intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2);
+ } else if (IS_DISPLAY_VER(i915, 11, 12)) {
+ lpsp_enabled = !intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ lpsp_enabled = !intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL);
+ } else {
+ seq_puts(m, "LPSP: not supported\n");
+ return 0;
+ }
+
+ seq_printf(m, "LPSP: %s\n", str_enabled_disabled(lpsp_enabled));
+
+ return 0;
+}
+
+static int i915_dp_mst_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_encoder *intel_encoder;
+ struct intel_digital_port *dig_port;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ intel_encoder = intel_attached_encoder(to_intel_connector(connector));
+ if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ dig_port = enc_to_dig_port(intel_encoder);
+ if (!intel_dp_mst_source_support(&dig_port->dp))
+ continue;
+
+ seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+
+static ssize_t i915_displayport_test_active_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ char *input_buffer;
+ int status = 0;
+ struct drm_device *dev;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+ int val = 0;
+
+ dev = ((struct seq_file *)file->private_data)->private;
+
+ if (len == 0)
+ return 0;
+
+ input_buffer = memdup_user_nul(ubuf, len);
+ if (IS_ERR(input_buffer))
+ return PTR_ERR(input_buffer);
+
+ drm_dbg(&to_i915(dev)->drm,
+ "Copied %d bytes from user\n", (unsigned int)len);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ status = kstrtoint(input_buffer, 10, &val);
+ if (status < 0)
+ break;
+ drm_dbg(&to_i915(dev)->drm,
+ "Got %d for test active\n", val);
+ /* To prevent erroneous activation of the compliance
+ * testing code, only accept an actual value of 1 here
+ */
+ if (val == 1)
+ intel_dp->compliance.test_active = true;
+ else
+ intel_dp->compliance.test_active = false;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ kfree(input_buffer);
+ if (status < 0)
+ return status;
+
+ *offp += len;
+ return len;
+}
+
+static int i915_displayport_test_active_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->compliance.test_active)
+ seq_puts(m, "1");
+ else
+ seq_puts(m, "0");
+ } else
+ seq_puts(m, "0");
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+
+static int i915_displayport_test_active_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_displayport_test_active_show,
+ inode->i_private);
+}
+
+static const struct file_operations i915_displayport_test_active_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_displayport_test_active_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_displayport_test_active_write
+};
+
+static int i915_displayport_test_data_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_EDID_READ)
+ seq_printf(m, "%lx",
+ intel_dp->compliance.test_data.edid);
+ else if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_VIDEO_PATTERN) {
+ seq_printf(m, "hdisplay: %d\n",
+ intel_dp->compliance.test_data.hdisplay);
+ seq_printf(m, "vdisplay: %d\n",
+ intel_dp->compliance.test_data.vdisplay);
+ seq_printf(m, "bpc: %u\n",
+ intel_dp->compliance.test_data.bpc);
+ } else if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_PHY_TEST_PATTERN) {
+ seq_printf(m, "pattern: %d\n",
+ intel_dp->compliance.test_data.phytest.phy_pattern);
+ seq_printf(m, "Number of lanes: %d\n",
+ intel_dp->compliance.test_data.phytest.num_lanes);
+ seq_printf(m, "Link Rate: %d\n",
+ intel_dp->compliance.test_data.phytest.link_rate);
+ seq_printf(m, "level: %02x\n",
+ intel_dp->train_set[0]);
+ }
+ } else
+ seq_puts(m, "0");
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
+
+static int i915_displayport_test_type_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
+ } else
+ seq_puts(m, "0");
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
+
+static void wm_latency_show(struct seq_file *m, const u16 wm[8])
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ int level;
+ int num_levels;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ num_levels = 3;
+ else if (IS_VALLEYVIEW(dev_priv))
+ num_levels = 1;
+ else if (IS_G4X(dev_priv))
+ num_levels = 3;
+ else
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
+
+ drm_modeset_lock_all(dev);
+
+ for (level = 0; level < num_levels; level++) {
+ unsigned int latency = wm[level];
+
+ /*
+ * - WM1+ latency values in 0.5us units
+ * - latencies are in us on gen9/vlv/chv
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 ||
+ IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_G4X(dev_priv))
+ latency *= 10;
+ else if (level > 0)
+ latency *= 5;
+
+ seq_printf(m, "WM%d %u (%u.%u usec)\n",
+ level, wm[level], latency / 10, latency % 10);
+ }
+
+ drm_modeset_unlock_all(dev);
+}
+
+static int pri_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.pri_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int spr_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.spr_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int cur_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.cur_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int pri_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, pri_wm_latency_show, dev_priv);
+}
+
+static int spr_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, spr_wm_latency_show, dev_priv);
+}
+
+static int cur_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, cur_wm_latency_show, dev_priv);
+}
+
+static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp, u16 wm[8])
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ u16 new[8] = { 0 };
+ int num_levels;
+ int level;
+ int ret;
+ char tmp[32];
+
+ if (IS_CHERRYVIEW(dev_priv))
+ num_levels = 3;
+ else if (IS_VALLEYVIEW(dev_priv))
+ num_levels = 1;
+ else if (IS_G4X(dev_priv))
+ num_levels = 3;
+ else
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
+ &new[0], &new[1], &new[2], &new[3],
+ &new[4], &new[5], &new[6], &new[7]);
+ if (ret != num_levels)
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ for (level = 0; level < num_levels; level++)
+ wm[level] = new[level];
+
+ drm_modeset_unlock_all(dev);
+
+ return len;
+}
+
+
+static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.pri_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.spr_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ latencies = dev_priv->display.wm.skl_latency;
+ else
+ latencies = dev_priv->display.wm.cur_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static const struct file_operations i915_pri_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = pri_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pri_wm_latency_write
+};
+
+static const struct file_operations i915_spr_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = spr_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = spr_wm_latency_write
+};
+
+static const struct file_operations i915_cur_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = cur_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = cur_wm_latency_write
+};
+
+static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+
+ /* Synchronize with everything first in case there's been an HPD
+ * storm, but we haven't finished handling it in the kernel yet
+ */
+ intel_synchronize_irq(dev_priv);
+ flush_work(&dev_priv->display.hotplug.dig_port_work);
+ flush_delayed_work(&dev_priv->display.hotplug.hotplug_work);
+
+ seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
+ seq_printf(m, "Detected: %s\n",
+ str_yes_no(delayed_work_pending(&hotplug->reenable_work)));
+
+ return 0;
+}
+
+static ssize_t i915_hpd_storm_ctl_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ unsigned int new_threshold;
+ int i;
+ char *newline;
+ char tmp[16];
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ /* Strip newline, if any */
+ newline = strchr(tmp, '\n');
+ if (newline)
+ *newline = '\0';
+
+ if (strcmp(tmp, "reset") == 0)
+ new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ else if (kstrtouint(tmp, 10, &new_threshold) != 0)
+ return -EINVAL;
+
+ if (new_threshold > 0)
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting HPD storm detection threshold to %d\n",
+ new_threshold);
+ else
+ drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ hotplug->hpd_storm_threshold = new_threshold;
+ /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+ for_each_hpd_pin(i)
+ hotplug->stats[i].count = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Re-enable hpd immediately if we were in an irq storm */
+ flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
+
+ return len;
+}
+
+static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
+}
+
+static const struct file_operations i915_hpd_storm_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_hpd_storm_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_hpd_storm_ctl_write
+};
+
+static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+
+ seq_printf(m, "Enabled: %s\n",
+ str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled));
+
+ return 0;
+}
+
+static int
+i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_hpd_short_storm_ctl_show,
+ inode->i_private);
+}
+
+static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
+ char *newline;
+ char tmp[16];
+ int i;
+ bool new_state;
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ /* Strip newline, if any */
+ newline = strchr(tmp, '\n');
+ if (newline)
+ *newline = '\0';
+
+ /* Reset to the "default" state for this system */
+ if (strcmp(tmp, "reset") == 0)
+ new_state = !HAS_DP_MST(dev_priv);
+ else if (kstrtobool(tmp, &new_state) != 0)
+ return -EINVAL;
+
+ drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
+ new_state ? "En" : "Dis");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ hotplug->hpd_short_storm_enabled = new_state;
+ /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+ for_each_hpd_pin(i)
+ hotplug->stats[i].count = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Re-enable hpd immediately if we were in an irq storm */
+ flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
+
+ return len;
+}
+
+static const struct file_operations i915_hpd_short_storm_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_hpd_short_storm_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_hpd_short_storm_ctl_write,
+};
+
+static int i915_drrs_ctl_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state;
+ struct drm_crtc_commit *commit;
+ int ret;
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (!crtc_state->hw.active ||
+ !crtc_state->has_drrs)
+ goto out;
+
+ commit = crtc_state->uapi.commit;
+ if (commit) {
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (ret)
+ goto out;
+ }
+
+ drm_dbg(&dev_priv->drm,
+ "Manually %sactivating DRRS\n", val ? "" : "de");
+
+ if (val)
+ intel_drrs_activate(crtc_state);
+ else
+ intel_drrs_deactivate(crtc_state);
+
+out:
+ drm_modeset_unlock(&crtc->base.mutex);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
+
+static ssize_t
+i915_fifo_underrun_reset_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct drm_i915_private *dev_priv = filp->private_data;
+ struct intel_crtc *crtc;
+ struct drm_device *dev = &dev_priv->drm;
+ int ret;
+ bool reset;
+
+ ret = kstrtobool_from_user(ubuf, cnt, &reset);
+ if (ret)
+ return ret;
+
+ if (!reset)
+ return cnt;
+
+ for_each_intel_crtc(dev, crtc) {
+ struct drm_crtc_commit *commit;
+ struct intel_crtc_state *crtc_state;
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+ commit = crtc_state->uapi.commit;
+ if (commit) {
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (!ret)
+ ret = wait_for_completion_interruptible(&commit->flip_done);
+ }
+
+ if (!ret && crtc_state->hw.active) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Re-arming FIFO underruns on pipe %c\n",
+ pipe_name(crtc->pipe));
+
+ intel_crtc_arm_fifo_underrun(crtc, crtc_state);
+ }
+
+ drm_modeset_unlock(&crtc->base.mutex);
+
+ if (ret)
+ return ret;
+ }
+
+ intel_fbc_reset_underrun(dev_priv);
+
+ return cnt;
+}
+
+static const struct file_operations i915_fifo_underrun_reset_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = i915_fifo_underrun_reset_write,
+ .llseek = default_llseek,
+};
+
+static const struct drm_info_list intel_display_debugfs_list[] = {
+ {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
+ {"i915_ips_status", i915_ips_status, 0},
+ {"i915_sr_status", i915_sr_status, 0},
+ {"i915_opregion", i915_opregion, 0},
+ {"i915_vbt", i915_vbt, 0},
+ {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
+ {"i915_edp_psr_status", i915_edp_psr_status, 0},
+ {"i915_power_domain_info", i915_power_domain_info, 0},
+ {"i915_display_info", i915_display_info, 0},
+ {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
+ {"i915_dp_mst_info", i915_dp_mst_info, 0},
+ {"i915_ddb_info", i915_ddb_info, 0},
+ {"i915_drrs_status", i915_drrs_status, 0},
+ {"i915_lpsp_status", i915_lpsp_status, 0},
+};
+
+static const struct {
+ const char *name;
+ const struct file_operations *fops;
+} intel_display_debugfs_files[] = {
+ {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
+ {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
+ {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
+ {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
+ {"i915_dp_test_data", &i915_displayport_test_data_fops},
+ {"i915_dp_test_type", &i915_displayport_test_type_fops},
+ {"i915_dp_test_active", &i915_displayport_test_active_fops},
+ {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
+ {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
+ {"i915_drrs_ctl", &i915_drrs_ctl_fops},
+ {"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
+};
+
+void intel_display_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
+ debugfs_create_file(intel_display_debugfs_files[i].name,
+ S_IRUGO | S_IWUSR,
+ minor->debugfs_root,
+ to_i915(minor->dev),
+ intel_display_debugfs_files[i].fops);
+ }
+
+ drm_debugfs_create_files(intel_display_debugfs_list,
+ ARRAY_SIZE(intel_display_debugfs_list),
+ minor->debugfs_root, minor);
+
+ intel_dmc_debugfs_register(i915);
+ intel_fbc_debugfs_register(i915);
+ skl_watermark_ipc_debugfs_register(i915);
+}
+
+static int i915_panel_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_dp *intel_dp =
+ intel_attached_dp(to_intel_connector(connector));
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ seq_printf(m, "Panel power up delay: %d\n",
+ intel_dp->pps.panel_power_up_delay);
+ seq_printf(m, "Panel power down delay: %d\n",
+ intel_dp->pps.panel_power_down_delay);
+ seq_printf(m, "Backlight on delay: %d\n",
+ intel_dp->pps.backlight_on_delay);
+ seq_printf(m, "Backlight off delay: %d\n",
+ intel_dp->pps.backlight_off_delay);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_panel);
+
+static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ int ret;
+
+ ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
+ if (!connector->encoder || connector->status != connector_status_connected) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ seq_printf(m, "%s:%d HDCP version: ", connector->name,
+ connector->base.id);
+ intel_hdcp_info(m, intel_connector);
+
+out:
+ drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+
+static int i915_psr_status_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_dp *intel_dp =
+ intel_attached_dp(to_intel_connector(connector));
+
+ return intel_psr_status(m, intel_dp);
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
+
+static int i915_lpsp_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_encoder *encoder;
+ bool lpsp_capable = false;
+
+ encoder = intel_attached_encoder(to_intel_connector(connector));
+ if (!encoder)
+ return -ENODEV;
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ if (DISPLAY_VER(i915) >= 13)
+ lpsp_capable = encoder->port <= PORT_B;
+ else if (DISPLAY_VER(i915) >= 12)
+ /*
+ * Actually TGL can drive LPSP on port till DDI_C
+ * but there is no physical connected DDI_C on TGL sku's,
+ * even driver is not initilizing DDI_C port for gen12.
+ */
+ lpsp_capable = encoder->port <= PORT_B;
+ else if (DISPLAY_VER(i915) == 11)
+ lpsp_capable = (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP);
+ else if (IS_DISPLAY_VER(i915, 9, 10))
+ lpsp_capable = (encoder->port == PORT_A &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ lpsp_capable = connector->connector_type == DRM_MODE_CONNECTOR_eDP;
+
+ seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
+
+static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct drm_device *dev = connector->dev;
+ struct drm_crtc *crtc;
+ struct intel_dp *intel_dp;
+ struct drm_modeset_acquire_ctx ctx;
+ struct intel_crtc_state *crtc_state = NULL;
+ int ret = 0;
+ bool try_again = false;
+
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
+ do {
+ try_again = false;
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ &ctx);
+ if (ret) {
+ if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
+ try_again = true;
+ continue;
+ }
+ break;
+ }
+ crtc = connector->state->crtc;
+ if (connector->status != connector_status_connected || !crtc) {
+ ret = -ENODEV;
+ break;
+ }
+ ret = drm_modeset_lock(&crtc->mutex, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret) {
+ try_again = true;
+ continue;
+ }
+ break;
+ } else if (ret) {
+ break;
+ }
+ intel_dp = intel_attached_dp(to_intel_connector(connector));
+ crtc_state = to_intel_crtc_state(crtc->state);
+ seq_printf(m, "DSC_Enabled: %s\n",
+ str_yes_no(crtc_state->dsc.compression_enable));
+ seq_printf(m, "DSC_Sink_Support: %s\n",
+ str_yes_no(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
+ seq_printf(m, "Force_DSC_Enable: %s\n",
+ str_yes_no(intel_dp->force_dsc_en));
+ if (!intel_dp_is_edp(intel_dp))
+ seq_printf(m, "FEC_Sink_Support: %s\n",
+ str_yes_no(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
+ } while (try_again);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
+static ssize_t i915_dsc_fec_support_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ bool dsc_enable = false;
+ int ret;
+ struct drm_connector *connector =
+ ((struct seq_file *)file->private_data)->private;
+ struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (len == 0)
+ return 0;
+
+ drm_dbg(&i915->drm,
+ "Copied %zu bytes from user to force DSC\n", len);
+
+ ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
+ if (ret < 0)
+ return ret;
+
+ drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
+ (dsc_enable) ? "true" : "false");
+ intel_dp->force_dsc_en = dsc_enable;
+
+ *offp += len;
+ return len;
+}
+
+static int i915_dsc_fec_support_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_dsc_fec_support_show,
+ inode->i_private);
+}
+
+static const struct file_operations i915_dsc_fec_support_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_dsc_fec_support_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_dsc_fec_support_write
+};
+
+static int i915_dsc_bpc_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct drm_device *dev = connector->dev;
+ struct drm_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ int ret;
+
+ if (!encoder)
+ return -ENODEV;
+
+ ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
+ crtc = connector->state->crtc;
+ if (connector->status != connector_status_connected || !crtc) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ crtc_state = to_intel_crtc_state(crtc->state);
+ seq_printf(m, "Input_BPC: %d\n", crtc_state->dsc.config.bits_per_component);
+
+out: drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return ret;
+}
+
+static ssize_t i915_dsc_bpc_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct drm_connector *connector =
+ ((struct seq_file *)file->private_data)->private;
+ struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int dsc_bpc = 0;
+ int ret;
+
+ ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpc);
+ if (ret < 0)
+ return ret;
+
+ intel_dp->force_dsc_bpc = dsc_bpc;
+ *offp += len;
+
+ return len;
+}
+
+static int i915_dsc_bpc_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_dsc_bpc_show, inode->i_private);
+}
+
+static const struct file_operations i915_dsc_bpc_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_dsc_bpc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_dsc_bpc_write
+};
+
+/*
+ * Returns the Current CRTC's bpc.
+ * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc
+ */
+static int i915_current_bpc_show(struct seq_file *m, void *data)
+{
+ struct intel_crtc *crtc = to_intel_crtc(m->private);
+ struct intel_crtc_state *crtc_state;
+ int ret;
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+ seq_printf(m, "Current: %u\n", crtc_state->pipe_bpp / 3);
+
+ drm_modeset_unlock(&crtc->base.mutex);
+
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_current_bpc);
+
+/**
+ * intel_connector_debugfs_add - add i915 specific connector debugfs files
+ * @connector: pointer to a registered drm_connector
+ *
+ * Cleanup will be done by drm_connector_unregister() through a call to
+ * drm_debugfs_connector_remove().
+ */
+void intel_connector_debugfs_add(struct intel_connector *intel_connector)
+{
+ struct drm_connector *connector = &intel_connector->base;
+ struct dentry *root = connector->debugfs_entry;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+
+ /* The connector must have been registered beforehands. */
+ if (!root)
+ return;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ debugfs_create_file("i915_panel_timings", S_IRUGO, root,
+ connector, &i915_panel_fops);
+ debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
+ connector, &i915_psr_sink_status_fops);
+ }
+
+ if (HAS_PSR(dev_priv) &&
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ debugfs_create_file("i915_psr_status", 0444, root,
+ connector, &i915_psr_status_fops);
+ }
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
+ connector, &i915_hdcp_sink_capability_fops);
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 11 &&
+ ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
+ !to_intel_connector(connector)->mst_port) ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ debugfs_create_file("i915_dsc_fec_support", 0644, root,
+ connector, &i915_dsc_fec_support_fops);
+
+ debugfs_create_file("i915_dsc_bpc", 0644, root,
+ connector, &i915_dsc_bpc_fops);
+ }
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ debugfs_create_file("i915_lpsp_capability", 0444, root,
+ connector, &i915_lpsp_capability_fops);
+}
+
+/**
+ * intel_crtc_debugfs_add - add i915 specific crtc debugfs files
+ * @crtc: pointer to a drm_crtc
+ *
+ * Failure to add debugfs entries should generally be ignored.
+ */
+void intel_crtc_debugfs_add(struct drm_crtc *crtc)
+{
+ if (!crtc->debugfs_entry)
+ return;
+
+ crtc_updates_add(crtc);
+ intel_fbc_crtc_debugfs_add(to_intel_crtc(crtc));
+
+ debugfs_create_file("i915_current_bpc", 0444, crtc->debugfs_entry, crtc,
+ &i915_current_bpc_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
new file mode 100644
index 000000000..d3a79c07c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_DEBUGFS_H__
+#define __INTEL_DISPLAY_DEBUGFS_H__
+
+struct drm_crtc;
+struct drm_i915_private;
+struct intel_connector;
+
+#ifdef CONFIG_DEBUG_FS
+void intel_display_debugfs_register(struct drm_i915_private *i915);
+void intel_connector_debugfs_add(struct intel_connector *connector);
+void intel_crtc_debugfs_add(struct drm_crtc *crtc);
+#else
+static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
+static inline void intel_connector_debugfs_add(struct intel_connector *connector) {}
+static inline void intel_crtc_debugfs_add(struct drm_crtc *crtc) {}
+#endif
+
+#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
new file mode 100644
index 000000000..1a63da28f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -0,0 +1,2489 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/string_helpers.h>
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "intel_backlight_regs.h"
+#include "intel_cdclk.h"
+#include "intel_combo_phy.h"
+#include "intel_de.h"
+#include "intel_display_power.h"
+#include "intel_display_power_map.h"
+#include "intel_display_power_well.h"
+#include "intel_display_types.h"
+#include "intel_dmc.h"
+#include "intel_mchbar_regs.h"
+#include "intel_pch_refclk.h"
+#include "intel_pcode.h"
+#include "intel_snps_phy.h"
+#include "skl_watermark.h"
+#include "vlv_sideband.h"
+
+#define for_each_power_domain_well(__dev_priv, __power_well, __domain) \
+ for_each_power_well(__dev_priv, __power_well) \
+ for_each_if(test_bit((__domain), (__power_well)->domains.bits))
+
+#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
+ for_each_power_well_reverse(__dev_priv, __power_well) \
+ for_each_if(test_bit((__domain), (__power_well)->domains.bits))
+
+const char *
+intel_display_power_domain_str(enum intel_display_power_domain domain)
+{
+ switch (domain) {
+ case POWER_DOMAIN_DISPLAY_CORE:
+ return "DISPLAY_CORE";
+ case POWER_DOMAIN_PIPE_A:
+ return "PIPE_A";
+ case POWER_DOMAIN_PIPE_B:
+ return "PIPE_B";
+ case POWER_DOMAIN_PIPE_C:
+ return "PIPE_C";
+ case POWER_DOMAIN_PIPE_D:
+ return "PIPE_D";
+ case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
+ return "PIPE_PANEL_FITTER_A";
+ case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
+ return "PIPE_PANEL_FITTER_B";
+ case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
+ return "PIPE_PANEL_FITTER_C";
+ case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
+ return "PIPE_PANEL_FITTER_D";
+ case POWER_DOMAIN_TRANSCODER_A:
+ return "TRANSCODER_A";
+ case POWER_DOMAIN_TRANSCODER_B:
+ return "TRANSCODER_B";
+ case POWER_DOMAIN_TRANSCODER_C:
+ return "TRANSCODER_C";
+ case POWER_DOMAIN_TRANSCODER_D:
+ return "TRANSCODER_D";
+ case POWER_DOMAIN_TRANSCODER_EDP:
+ return "TRANSCODER_EDP";
+ case POWER_DOMAIN_TRANSCODER_DSI_A:
+ return "TRANSCODER_DSI_A";
+ case POWER_DOMAIN_TRANSCODER_DSI_C:
+ return "TRANSCODER_DSI_C";
+ case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
+ return "TRANSCODER_VDSC_PW2";
+ case POWER_DOMAIN_PORT_DDI_LANES_A:
+ return "PORT_DDI_LANES_A";
+ case POWER_DOMAIN_PORT_DDI_LANES_B:
+ return "PORT_DDI_LANES_B";
+ case POWER_DOMAIN_PORT_DDI_LANES_C:
+ return "PORT_DDI_LANES_C";
+ case POWER_DOMAIN_PORT_DDI_LANES_D:
+ return "PORT_DDI_LANES_D";
+ case POWER_DOMAIN_PORT_DDI_LANES_E:
+ return "PORT_DDI_LANES_E";
+ case POWER_DOMAIN_PORT_DDI_LANES_F:
+ return "PORT_DDI_LANES_F";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC1:
+ return "PORT_DDI_LANES_TC1";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC2:
+ return "PORT_DDI_LANES_TC2";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC3:
+ return "PORT_DDI_LANES_TC3";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC4:
+ return "PORT_DDI_LANES_TC4";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC5:
+ return "PORT_DDI_LANES_TC5";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC6:
+ return "PORT_DDI_LANES_TC6";
+ case POWER_DOMAIN_PORT_DDI_IO_A:
+ return "PORT_DDI_IO_A";
+ case POWER_DOMAIN_PORT_DDI_IO_B:
+ return "PORT_DDI_IO_B";
+ case POWER_DOMAIN_PORT_DDI_IO_C:
+ return "PORT_DDI_IO_C";
+ case POWER_DOMAIN_PORT_DDI_IO_D:
+ return "PORT_DDI_IO_D";
+ case POWER_DOMAIN_PORT_DDI_IO_E:
+ return "PORT_DDI_IO_E";
+ case POWER_DOMAIN_PORT_DDI_IO_F:
+ return "PORT_DDI_IO_F";
+ case POWER_DOMAIN_PORT_DDI_IO_TC1:
+ return "PORT_DDI_IO_TC1";
+ case POWER_DOMAIN_PORT_DDI_IO_TC2:
+ return "PORT_DDI_IO_TC2";
+ case POWER_DOMAIN_PORT_DDI_IO_TC3:
+ return "PORT_DDI_IO_TC3";
+ case POWER_DOMAIN_PORT_DDI_IO_TC4:
+ return "PORT_DDI_IO_TC4";
+ case POWER_DOMAIN_PORT_DDI_IO_TC5:
+ return "PORT_DDI_IO_TC5";
+ case POWER_DOMAIN_PORT_DDI_IO_TC6:
+ return "PORT_DDI_IO_TC6";
+ case POWER_DOMAIN_PORT_DSI:
+ return "PORT_DSI";
+ case POWER_DOMAIN_PORT_CRT:
+ return "PORT_CRT";
+ case POWER_DOMAIN_PORT_OTHER:
+ return "PORT_OTHER";
+ case POWER_DOMAIN_VGA:
+ return "VGA";
+ case POWER_DOMAIN_AUDIO_MMIO:
+ return "AUDIO_MMIO";
+ case POWER_DOMAIN_AUDIO_PLAYBACK:
+ return "AUDIO_PLAYBACK";
+ case POWER_DOMAIN_AUX_A:
+ return "AUX_A";
+ case POWER_DOMAIN_AUX_B:
+ return "AUX_B";
+ case POWER_DOMAIN_AUX_C:
+ return "AUX_C";
+ case POWER_DOMAIN_AUX_D:
+ return "AUX_D";
+ case POWER_DOMAIN_AUX_E:
+ return "AUX_E";
+ case POWER_DOMAIN_AUX_F:
+ return "AUX_F";
+ case POWER_DOMAIN_AUX_USBC1:
+ return "AUX_USBC1";
+ case POWER_DOMAIN_AUX_USBC2:
+ return "AUX_USBC2";
+ case POWER_DOMAIN_AUX_USBC3:
+ return "AUX_USBC3";
+ case POWER_DOMAIN_AUX_USBC4:
+ return "AUX_USBC4";
+ case POWER_DOMAIN_AUX_USBC5:
+ return "AUX_USBC5";
+ case POWER_DOMAIN_AUX_USBC6:
+ return "AUX_USBC6";
+ case POWER_DOMAIN_AUX_IO_A:
+ return "AUX_IO_A";
+ case POWER_DOMAIN_AUX_TBT1:
+ return "AUX_TBT1";
+ case POWER_DOMAIN_AUX_TBT2:
+ return "AUX_TBT2";
+ case POWER_DOMAIN_AUX_TBT3:
+ return "AUX_TBT3";
+ case POWER_DOMAIN_AUX_TBT4:
+ return "AUX_TBT4";
+ case POWER_DOMAIN_AUX_TBT5:
+ return "AUX_TBT5";
+ case POWER_DOMAIN_AUX_TBT6:
+ return "AUX_TBT6";
+ case POWER_DOMAIN_GMBUS:
+ return "GMBUS";
+ case POWER_DOMAIN_INIT:
+ return "INIT";
+ case POWER_DOMAIN_MODESET:
+ return "MODESET";
+ case POWER_DOMAIN_GT_IRQ:
+ return "GT_IRQ";
+ case POWER_DOMAIN_DC_OFF:
+ return "DC_OFF";
+ case POWER_DOMAIN_TC_COLD_OFF:
+ return "TC_COLD_OFF";
+ default:
+ MISSING_CASE(domain);
+ return "?";
+ }
+}
+
+/**
+ * __intel_display_power_is_enabled - unlocked check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This is the unlocked version of intel_display_power_is_enabled() and should
+ * only be used from error capture and recovery code where deadlocks are
+ * possible.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_well *power_well;
+ bool is_enabled;
+
+ if (dev_priv->runtime_pm.suspended)
+ return false;
+
+ is_enabled = true;
+
+ for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
+ if (intel_power_well_is_always_on(power_well))
+ continue;
+
+ if (!intel_power_well_is_enabled_cached(power_well)) {
+ is_enabled = false;
+ break;
+ }
+ }
+
+ return is_enabled;
+}
+
+/**
+ * intel_display_power_is_enabled - check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This function can be used to check the hw power domain state. It is mostly
+ * used in hardware state readout functions. Everywhere else code should rely
+ * upon explicit power domain reference counting to ensure that the hardware
+ * block is powered up before accessing it.
+ *
+ * Callers must hold the relevant modesetting locks to ensure that concurrent
+ * threads can't disable the power well while the caller tries to read a few
+ * registers.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ bool ret;
+
+ power_domains = &dev_priv->display.power.domains;
+
+ mutex_lock(&power_domains->lock);
+ ret = __intel_display_power_is_enabled(dev_priv, domain);
+ mutex_unlock(&power_domains->lock);
+
+ return ret;
+}
+
+static u32
+sanitize_target_dc_state(struct drm_i915_private *dev_priv,
+ u32 target_dc_state)
+{
+ static const u32 states[] = {
+ DC_STATE_EN_UPTO_DC6,
+ DC_STATE_EN_UPTO_DC5,
+ DC_STATE_EN_DC3CO,
+ DC_STATE_DISABLE,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
+ if (target_dc_state != states[i])
+ continue;
+
+ if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state)
+ break;
+
+ target_dc_state = states[i + 1];
+ }
+
+ return target_dc_state;
+}
+
+/**
+ * intel_display_power_set_target_dc_state - Set target dc state.
+ * @dev_priv: i915 device
+ * @state: state which needs to be set as target_dc_state.
+ *
+ * This function set the "DC off" power well target_dc_state,
+ * based upon this target_dc_stste, "DC off" power well will
+ * enable desired DC state.
+ */
+void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+ u32 state)
+{
+ struct i915_power_well *power_well;
+ bool dc_off_enabled;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+
+ mutex_lock(&power_domains->lock);
+ power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
+
+ if (drm_WARN_ON(&dev_priv->drm, !power_well))
+ goto unlock;
+
+ state = sanitize_target_dc_state(dev_priv, state);
+
+ if (state == dev_priv->display.dmc.target_dc_state)
+ goto unlock;
+
+ dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
+ /*
+ * If DC off power well is disabled, need to enable and disable the
+ * DC off power well to effect target DC state.
+ */
+ if (!dc_off_enabled)
+ intel_power_well_enable(dev_priv, power_well);
+
+ dev_priv->display.dmc.target_dc_state = state;
+
+ if (!dc_off_enabled)
+ intel_power_well_disable(dev_priv, power_well);
+
+unlock:
+ mutex_unlock(&power_domains->lock);
+}
+
+#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
+
+static void __async_put_domains_mask(struct i915_power_domains *power_domains,
+ struct intel_power_domain_mask *mask)
+{
+ bitmap_or(mask->bits,
+ power_domains->async_put_domains[0].bits,
+ power_domains->async_put_domains[1].bits,
+ POWER_DOMAIN_NUM);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static bool
+assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
+{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ display.power.domains);
+
+ return !drm_WARN_ON(&i915->drm,
+ bitmap_intersects(power_domains->async_put_domains[0].bits,
+ power_domains->async_put_domains[1].bits,
+ POWER_DOMAIN_NUM));
+}
+
+static bool
+__async_put_domains_state_ok(struct i915_power_domains *power_domains)
+{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ display.power.domains);
+ struct intel_power_domain_mask async_put_mask;
+ enum intel_display_power_domain domain;
+ bool err = false;
+
+ err |= !assert_async_put_domain_masks_disjoint(power_domains);
+ __async_put_domains_mask(power_domains, &async_put_mask);
+ err |= drm_WARN_ON(&i915->drm,
+ !!power_domains->async_put_wakeref !=
+ !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
+
+ for_each_power_domain(domain, &async_put_mask)
+ err |= drm_WARN_ON(&i915->drm,
+ power_domains->domain_use_count[domain] != 1);
+
+ return !err;
+}
+
+static void print_power_domains(struct i915_power_domains *power_domains,
+ const char *prefix, struct intel_power_domain_mask *mask)
+{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ display.power.domains);
+ enum intel_display_power_domain domain;
+
+ drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
+ for_each_power_domain(domain, mask)
+ drm_dbg(&i915->drm, "%s use_count %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
+}
+
+static void
+print_async_put_domains_state(struct i915_power_domains *power_domains)
+{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ display.power.domains);
+
+ drm_dbg(&i915->drm, "async_put_wakeref %u\n",
+ power_domains->async_put_wakeref);
+
+ print_power_domains(power_domains, "async_put_domains[0]",
+ &power_domains->async_put_domains[0]);
+ print_power_domains(power_domains, "async_put_domains[1]",
+ &power_domains->async_put_domains[1]);
+}
+
+static void
+verify_async_put_domains_state(struct i915_power_domains *power_domains)
+{
+ if (!__async_put_domains_state_ok(power_domains))
+ print_async_put_domains_state(power_domains);
+}
+
+#else
+
+static void
+assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
+{
+}
+
+static void
+verify_async_put_domains_state(struct i915_power_domains *power_domains)
+{
+}
+
+#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
+
+static void async_put_domains_mask(struct i915_power_domains *power_domains,
+ struct intel_power_domain_mask *mask)
+
+{
+ assert_async_put_domain_masks_disjoint(power_domains);
+
+ __async_put_domains_mask(power_domains, mask);
+}
+
+static void
+async_put_domains_clear_domain(struct i915_power_domains *power_domains,
+ enum intel_display_power_domain domain)
+{
+ assert_async_put_domain_masks_disjoint(power_domains);
+
+ clear_bit(domain, power_domains->async_put_domains[0].bits);
+ clear_bit(domain, power_domains->async_put_domains[1].bits);
+}
+
+static bool
+intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct intel_power_domain_mask async_put_mask;
+ bool ret = false;
+
+ async_put_domains_mask(power_domains, &async_put_mask);
+ if (!test_bit(domain, async_put_mask.bits))
+ goto out_verify;
+
+ async_put_domains_clear_domain(power_domains, domain);
+
+ ret = true;
+
+ async_put_domains_mask(power_domains, &async_put_mask);
+ if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
+ goto out_verify;
+
+ cancel_delayed_work(&power_domains->async_put_work);
+ intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
+ fetch_and_zero(&power_domains->async_put_wakeref));
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ return ret;
+}
+
+static void
+__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_well *power_well;
+
+ if (intel_display_power_grab_async_put_ref(dev_priv, domain))
+ return;
+
+ for_each_power_domain_well(dev_priv, power_well, domain)
+ intel_power_well_get(dev_priv, power_well);
+
+ power_domains->domain_use_count[domain]++;
+}
+
+/**
+ * intel_display_power_get - grab a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ mutex_lock(&power_domains->lock);
+ __intel_display_power_get_domain(dev_priv, domain);
+ mutex_unlock(&power_domains->lock);
+
+ return wakeref;
+}
+
+/**
+ * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+intel_wakeref_t
+intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ intel_wakeref_t wakeref;
+ bool is_enabled;
+
+ wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
+ if (!wakeref)
+ return false;
+
+ mutex_lock(&power_domains->lock);
+
+ if (__intel_display_power_is_enabled(dev_priv, domain)) {
+ __intel_display_power_get_domain(dev_priv, domain);
+ is_enabled = true;
+ } else {
+ is_enabled = false;
+ }
+
+ mutex_unlock(&power_domains->lock);
+
+ if (!is_enabled) {
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ wakeref = 0;
+ }
+
+ return wakeref;
+}
+
+static void
+__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ const char *name = intel_display_power_domain_str(domain);
+ struct intel_power_domain_mask async_put_mask;
+
+ power_domains = &dev_priv->display.power.domains;
+
+ drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
+ "Use count on domain %s is already zero\n",
+ name);
+ async_put_domains_mask(power_domains, &async_put_mask);
+ drm_WARN(&dev_priv->drm,
+ test_bit(domain, async_put_mask.bits),
+ "Async disabling of domain %s is pending\n",
+ name);
+
+ power_domains->domain_use_count[domain]--;
+
+ for_each_power_domain_well_reverse(dev_priv, power_well, domain)
+ intel_power_well_put(dev_priv, power_well);
+}
+
+static void __intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+
+ mutex_lock(&power_domains->lock);
+ __intel_display_power_put_domain(dev_priv, domain);
+ mutex_unlock(&power_domains->lock);
+}
+
+static void
+queue_async_put_domains_work(struct i915_power_domains *power_domains,
+ intel_wakeref_t wakeref)
+{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ display.power.domains);
+ drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
+ power_domains->async_put_wakeref = wakeref;
+ drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
+ &power_domains->async_put_work,
+ msecs_to_jiffies(100)));
+}
+
+static void
+release_async_put_domains(struct i915_power_domains *power_domains,
+ struct intel_power_domain_mask *mask)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(power_domains, struct drm_i915_private,
+ display.power.domains);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ enum intel_display_power_domain domain;
+ intel_wakeref_t wakeref;
+
+ /*
+ * The caller must hold already raw wakeref, upgrade that to a proper
+ * wakeref to make the state checker happy about the HW access during
+ * power well disabling.
+ */
+ assert_rpm_raw_wakeref_held(rpm);
+ wakeref = intel_runtime_pm_get(rpm);
+
+ for_each_power_domain(domain, mask) {
+ /* Clear before put, so put's sanity check is happy. */
+ async_put_domains_clear_domain(power_domains, domain);
+ __intel_display_power_put_domain(dev_priv, domain);
+ }
+
+ intel_runtime_pm_put(rpm, wakeref);
+}
+
+static void
+intel_display_power_put_async_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ display.power.domains.async_put_work.work);
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
+ intel_wakeref_t old_work_wakeref = 0;
+
+ mutex_lock(&power_domains->lock);
+
+ /*
+ * Bail out if all the domain refs pending to be released were grabbed
+ * by subsequent gets or a flush_work.
+ */
+ old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
+ if (!old_work_wakeref)
+ goto out_verify;
+
+ release_async_put_domains(power_domains,
+ &power_domains->async_put_domains[0]);
+
+ /* Requeue the work if more domains were async put meanwhile. */
+ if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
+ bitmap_copy(power_domains->async_put_domains[0].bits,
+ power_domains->async_put_domains[1].bits,
+ POWER_DOMAIN_NUM);
+ bitmap_zero(power_domains->async_put_domains[1].bits,
+ POWER_DOMAIN_NUM);
+ queue_async_put_domains_work(power_domains,
+ fetch_and_zero(&new_work_wakeref));
+ } else {
+ /*
+ * Cancel the work that got queued after this one got dequeued,
+ * since here we released the corresponding async-put reference.
+ */
+ cancel_delayed_work(&power_domains->async_put_work);
+ }
+
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ mutex_unlock(&power_domains->lock);
+
+ if (old_work_wakeref)
+ intel_runtime_pm_put_raw(rpm, old_work_wakeref);
+ if (new_work_wakeref)
+ intel_runtime_pm_put_raw(rpm, new_work_wakeref);
+}
+
+/**
+ * intel_display_power_put_async - release a power domain reference asynchronously
+ * @i915: i915 device instance
+ * @domain: power domain to reference
+ * @wakeref: wakeref acquired for the reference that is being released
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get*() and schedules a work to power down the
+ * corresponding hardware block if this is the last reference.
+ */
+void __intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct intel_runtime_pm *rpm = &i915->runtime_pm;
+ intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
+
+ mutex_lock(&power_domains->lock);
+
+ if (power_domains->domain_use_count[domain] > 1) {
+ __intel_display_power_put_domain(i915, domain);
+
+ goto out_verify;
+ }
+
+ drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
+
+ /* Let a pending work requeue itself or queue a new one. */
+ if (power_domains->async_put_wakeref) {
+ set_bit(domain, power_domains->async_put_domains[1].bits);
+ } else {
+ set_bit(domain, power_domains->async_put_domains[0].bits);
+ queue_async_put_domains_work(power_domains,
+ fetch_and_zero(&work_wakeref));
+ }
+
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ mutex_unlock(&power_domains->lock);
+
+ if (work_wakeref)
+ intel_runtime_pm_put_raw(rpm, work_wakeref);
+
+ intel_runtime_pm_put(rpm, wakeref);
+}
+
+/**
+ * intel_display_power_flush_work - flushes the async display power disabling work
+ * @i915: i915 device instance
+ *
+ * Flushes any pending work that was scheduled by a preceding
+ * intel_display_power_put_async() call, completing the disabling of the
+ * corresponding power domains.
+ *
+ * Note that the work handler function may still be running after this
+ * function returns; to ensure that the work handler isn't running use
+ * intel_display_power_flush_work_sync() instead.
+ */
+void intel_display_power_flush_work(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct intel_power_domain_mask async_put_mask;
+ intel_wakeref_t work_wakeref;
+
+ mutex_lock(&power_domains->lock);
+
+ work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
+ if (!work_wakeref)
+ goto out_verify;
+
+ async_put_domains_mask(power_domains, &async_put_mask);
+ release_async_put_domains(power_domains, &async_put_mask);
+ cancel_delayed_work(&power_domains->async_put_work);
+
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ mutex_unlock(&power_domains->lock);
+
+ if (work_wakeref)
+ intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
+}
+
+/**
+ * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
+ * @i915: i915 device instance
+ *
+ * Like intel_display_power_flush_work(), but also ensure that the work
+ * handler function is not running any more when this function returns.
+ */
+static void
+intel_display_power_flush_work_sync(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+
+ intel_display_power_flush_work(i915);
+ cancel_delayed_work_sync(&power_domains->async_put_work);
+
+ verify_async_put_domains_state(power_domains);
+
+ drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+/**
+ * intel_display_power_put - release a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ * @wakeref: wakeref acquired for the reference that is being released
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ */
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ __intel_display_power_put(dev_priv, domain);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+}
+#else
+/**
+ * intel_display_power_put_unchecked - release an unchecked power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ *
+ * This function is only for the power domain code's internal use to suppress wakeref
+ * tracking when the correspondig debug kconfig option is disabled, should not
+ * be used otherwise.
+ */
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ __intel_display_power_put(dev_priv, domain);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+}
+#endif
+
+void
+intel_display_power_get_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain)
+{
+ intel_wakeref_t __maybe_unused wf;
+
+ drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
+
+ wf = intel_display_power_get(i915, domain);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ power_domain_set->wakerefs[domain] = wf;
+#endif
+ set_bit(domain, power_domain_set->mask.bits);
+}
+
+bool
+intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain)
+{
+ intel_wakeref_t wf;
+
+ drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
+
+ wf = intel_display_power_get_if_enabled(i915, domain);
+ if (!wf)
+ return false;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ power_domain_set->wakerefs[domain] = wf;
+#endif
+ set_bit(domain, power_domain_set->mask.bits);
+
+ return true;
+}
+
+void
+intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ struct intel_power_domain_mask *mask)
+{
+ enum intel_display_power_domain domain;
+
+ drm_WARN_ON(&i915->drm,
+ !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
+
+ for_each_power_domain(domain, mask) {
+ intel_wakeref_t __maybe_unused wf = -1;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
+#endif
+ intel_display_power_put(i915, domain, wf);
+ clear_bit(domain, power_domain_set->mask.bits);
+ }
+}
+
+static int
+sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
+ int disable_power_well)
+{
+ if (disable_power_well >= 0)
+ return !!disable_power_well;
+
+ return 1;
+}
+
+static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
+ int enable_dc)
+{
+ u32 mask;
+ int requested_dc;
+ int max_dc;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return 0;
+
+ if (IS_DG2(dev_priv))
+ max_dc = 1;
+ else if (IS_DG1(dev_priv))
+ max_dc = 3;
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ max_dc = 4;
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ max_dc = 1;
+ else if (DISPLAY_VER(dev_priv) >= 9)
+ max_dc = 2;
+ else
+ max_dc = 0;
+
+ /*
+ * DC9 has a separate HW flow from the rest of the DC states,
+ * not depending on the DMC firmware. It's needed by system
+ * suspend/resume, so allow it unconditionally.
+ */
+ mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
+ DISPLAY_VER(dev_priv) >= 11 ?
+ DC_STATE_EN_DC9 : 0;
+
+ if (!dev_priv->params.disable_power_well)
+ max_dc = 0;
+
+ if (enable_dc >= 0 && enable_dc <= max_dc) {
+ requested_dc = enable_dc;
+ } else if (enable_dc == -1) {
+ requested_dc = max_dc;
+ } else if (enable_dc > max_dc && enable_dc <= 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Adjusting requested max DC state (%d->%d)\n",
+ enable_dc, max_dc);
+ requested_dc = max_dc;
+ } else {
+ drm_err(&dev_priv->drm,
+ "Unexpected value for enable_dc (%d)\n", enable_dc);
+ requested_dc = max_dc;
+ }
+
+ switch (requested_dc) {
+ case 4:
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
+ break;
+ case 3:
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
+ break;
+ case 2:
+ mask |= DC_STATE_EN_UPTO_DC6;
+ break;
+ case 1:
+ mask |= DC_STATE_EN_UPTO_DC5;
+ break;
+ }
+
+ drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
+
+ return mask;
+}
+
+/**
+ * intel_power_domains_init - initializes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Initializes the power domain structures for @dev_priv depending upon the
+ * supported platform.
+ */
+int intel_power_domains_init(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+
+ dev_priv->params.disable_power_well =
+ sanitize_disable_power_well_option(dev_priv,
+ dev_priv->params.disable_power_well);
+ dev_priv->display.dmc.allowed_dc_mask =
+ get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
+
+ dev_priv->display.dmc.target_dc_state =
+ sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+
+ mutex_init(&power_domains->lock);
+
+ INIT_DELAYED_WORK(&power_domains->async_put_work,
+ intel_display_power_put_async_work);
+
+ return intel_display_power_map_init(power_domains);
+}
+
+/**
+ * intel_power_domains_cleanup - clean up power domains resources
+ * @dev_priv: i915 device instance
+ *
+ * Release any resources acquired by intel_power_domains_init()
+ */
+void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
+{
+ intel_display_power_map_cleanup(&dev_priv->display.power.domains);
+}
+
+static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_well *power_well;
+
+ mutex_lock(&power_domains->lock);
+ for_each_power_well(dev_priv, power_well)
+ intel_power_well_sync_hw(dev_priv, power_well);
+ mutex_unlock(&power_domains->lock);
+}
+
+static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
+ enum dbuf_slice slice, bool enable)
+{
+ i915_reg_t reg = DBUF_CTL_S(slice);
+ bool state;
+
+ intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
+ enable ? DBUF_POWER_REQUEST : 0);
+ intel_de_posting_read(dev_priv, reg);
+ udelay(10);
+
+ state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
+ drm_WARN(&dev_priv->drm, enable != state,
+ "DBuf slice %d power %s timeout!\n",
+ slice, str_enable_disable(enable));
+}
+
+void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
+ u8 req_slices)
+{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask;
+ enum dbuf_slice slice;
+
+ drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
+ "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
+ req_slices, slice_mask);
+
+ drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
+ req_slices);
+
+ /*
+ * Might be running this in parallel to gen9_dc_off_power_well_enable
+ * being called from intel_dp_detect for instance,
+ * which causes assertion triggered by race condition,
+ * as gen9_assert_dbuf_enabled might preempt this when registers
+ * were already updated, while dev_priv was not.
+ */
+ mutex_lock(&power_domains->lock);
+
+ for_each_dbuf_slice(dev_priv, slice)
+ gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
+
+ dev_priv->display.dbuf.enabled_slices = req_slices;
+
+ mutex_unlock(&power_domains->lock);
+}
+
+static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+ dev_priv->display.dbuf.enabled_slices =
+ intel_enabled_dbuf_slices_mask(dev_priv);
+
+ /*
+ * Just power up at least 1 slice, we will
+ * figure out later which slices we have and what we need.
+ */
+ gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
+ dev_priv->display.dbuf.enabled_slices);
+}
+
+static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
+{
+ gen9_dbuf_slices_update(dev_priv, 0);
+}
+
+static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
+{
+ enum dbuf_slice slice;
+
+ if (IS_ALDERLAKE_P(dev_priv))
+ return;
+
+ for_each_dbuf_slice(dev_priv, slice)
+ intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
+ DBUF_TRACKER_STATE_SERVICE_MASK,
+ DBUF_TRACKER_STATE_SERVICE(8));
+}
+
+static void icl_mbus_init(struct drm_i915_private *dev_priv)
+{
+ unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask;
+ u32 mask, val, i;
+
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+ return;
+
+ mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
+ MBUS_ABOX_BT_CREDIT_POOL2_MASK |
+ MBUS_ABOX_B_CREDIT_MASK |
+ MBUS_ABOX_BW_CREDIT_MASK;
+ val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
+ MBUS_ABOX_BT_CREDIT_POOL2(16) |
+ MBUS_ABOX_B_CREDIT(1) |
+ MBUS_ABOX_BW_CREDIT(1);
+
+ /*
+ * gen12 platforms that use abox1 and abox2 for pixel data reads still
+ * expect us to program the abox_ctl0 register as well, even though
+ * we don't have to program other instance-0 registers like BW_BUDDY.
+ */
+ if (DISPLAY_VER(dev_priv) == 12)
+ abox_regs |= BIT(0);
+
+ for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
+ intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
+}
+
+static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
+{
+ u32 val = intel_de_read(dev_priv, LCPLL_CTL);
+
+ /*
+ * The LCPLL register should be turned on by the BIOS. For now
+ * let's just check its state and print errors in case
+ * something is wrong. Don't even try to turn it on.
+ */
+
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ drm_err(&dev_priv->drm, "LCPLL is disabled\n");
+
+ if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
+ drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
+}
+
+static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(dev, crtc)
+ I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
+ pipe_name(crtc->pipe));
+
+ I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
+ "Display power well on\n");
+ I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
+ "SPLL enabled\n");
+ I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
+ "WRPLL1 enabled\n");
+ I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
+ "WRPLL2 enabled\n");
+ I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
+ "Panel power on\n");
+ I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+ "CPU PWM1 enabled\n");
+ if (IS_HASWELL(dev_priv))
+ I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ "CPU PWM2 enabled\n");
+ I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+ "PCH PWM1 enabled\n");
+ I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Utility pin enabled\n");
+ I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
+ "PCH GTC enabled\n");
+
+ /*
+ * In theory we can still leave IRQs enabled, as long as only the HPD
+ * interrupts remain enabled. We used to check for that, but since it's
+ * gen-specific and since we only disable LCPLL after we fully disable
+ * the interrupts, the check below should be enough.
+ */
+ I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+}
+
+static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
+{
+ if (IS_HASWELL(dev_priv))
+ return intel_de_read(dev_priv, D_COMP_HSW);
+ else
+ return intel_de_read(dev_priv, D_COMP_BDW);
+}
+
+static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
+{
+ if (IS_HASWELL(dev_priv)) {
+ if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to write to D_COMP\n");
+ } else {
+ intel_de_write(dev_priv, D_COMP_BDW, val);
+ intel_de_posting_read(dev_priv, D_COMP_BDW);
+ }
+}
+
+/*
+ * This function implements pieces of two sequences from BSpec:
+ * - Sequence for display software to disable LCPLL
+ * - Sequence for display software to allow package C8+
+ * The steps implemented here are just the steps that actually touch the LCPLL
+ * register. Callers should take care of disabling all the display engine
+ * functions, doing the mode unset, fixing interrupts, etc.
+ */
+static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+ bool switch_to_fclk, bool allow_power_down)
+{
+ u32 val;
+
+ assert_can_disable_lcpll(dev_priv);
+
+ val = intel_de_read(dev_priv, LCPLL_CTL);
+
+ if (switch_to_fclk) {
+ val |= LCPLL_CD_SOURCE_FCLK;
+ intel_de_write(dev_priv, LCPLL_CTL, val);
+
+ if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
+
+ val = intel_de_read(dev_priv, LCPLL_CTL);
+ }
+
+ val |= LCPLL_PLL_DISABLE;
+ intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_posting_read(dev_priv, LCPLL_CTL);
+
+ if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
+ drm_err(&dev_priv->drm, "LCPLL still locked\n");
+
+ val = hsw_read_dcomp(dev_priv);
+ val |= D_COMP_COMP_DISABLE;
+ hsw_write_dcomp(dev_priv, val);
+ ndelay(100);
+
+ if (wait_for((hsw_read_dcomp(dev_priv) &
+ D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
+
+ if (allow_power_down) {
+ val = intel_de_read(dev_priv, LCPLL_CTL);
+ val |= LCPLL_POWER_DOWN_ALLOW;
+ intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_posting_read(dev_priv, LCPLL_CTL);
+ }
+}
+
+/*
+ * Fully restores LCPLL, disallowing power down and switching back to LCPLL
+ * source.
+ */
+static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = intel_de_read(dev_priv, LCPLL_CTL);
+
+ if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
+ LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
+ return;
+
+ /*
+ * Make sure we're not on PC8 state before disabling PC8, otherwise
+ * we'll hang the machine. To prevent PC8 state, just enable force_wake.
+ */
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+
+ if (val & LCPLL_POWER_DOWN_ALLOW) {
+ val &= ~LCPLL_POWER_DOWN_ALLOW;
+ intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_posting_read(dev_priv, LCPLL_CTL);
+ }
+
+ val = hsw_read_dcomp(dev_priv);
+ val |= D_COMP_COMP_FORCE;
+ val &= ~D_COMP_COMP_DISABLE;
+ hsw_write_dcomp(dev_priv, val);
+
+ val = intel_de_read(dev_priv, LCPLL_CTL);
+ val &= ~LCPLL_PLL_DISABLE;
+ intel_de_write(dev_priv, LCPLL_CTL, val);
+
+ if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
+ drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
+
+ if (val & LCPLL_CD_SOURCE_FCLK) {
+ val = intel_de_read(dev_priv, LCPLL_CTL);
+ val &= ~LCPLL_CD_SOURCE_FCLK;
+ intel_de_write(dev_priv, LCPLL_CTL, val);
+
+ if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ drm_err(&dev_priv->drm,
+ "Switching back to LCPLL failed\n");
+ }
+
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+
+ intel_update_cdclk(dev_priv);
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
+}
+
+/*
+ * Package states C8 and deeper are really deep PC states that can only be
+ * reached when all the devices on the system allow it, so even if the graphics
+ * device allows PC8+, it doesn't mean the system will actually get to these
+ * states. Our driver only allows PC8+ when going into runtime PM.
+ *
+ * The requirements for PC8+ are that all the outputs are disabled, the power
+ * well is disabled and most interrupts are disabled, and these are also
+ * requirements for runtime PM. When these conditions are met, we manually do
+ * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
+ * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
+ * hang the machine.
+ *
+ * When we really reach PC8 or deeper states (not just when we allow it) we lose
+ * the state of some registers, so when we come back from PC8+ we need to
+ * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
+ * need to take care of the registers kept by RC6. Notice that this happens even
+ * if we don't put the device in PCI D3 state (which is what currently happens
+ * because of the runtime PM support).
+ *
+ * For more, read "Display Sequences for Package C8" on the hardware
+ * documentation.
+ */
+static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
+
+ if (HAS_PCH_LPT_LP(dev_priv)) {
+ val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
+ val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
+ intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
+ }
+
+ lpt_disable_clkout_dp(dev_priv);
+ hsw_disable_lcpll(dev_priv, true, true);
+}
+
+static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
+
+ hsw_restore_lcpll(dev_priv);
+ intel_init_pch_refclk(dev_priv);
+
+ if (HAS_PCH_LPT_LP(dev_priv)) {
+ val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
+ val |= PCH_LP_PARTITION_LEVEL_DISABLE;
+ intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
+ }
+}
+
+static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ i915_reg_t reg;
+ u32 reset_bits, val;
+
+ if (IS_IVYBRIDGE(dev_priv)) {
+ reg = GEN7_MSG_CTL;
+ reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
+ } else {
+ reg = HSW_NDE_RSTWRN_OPT;
+ reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 14)
+ reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
+
+ val = intel_de_read(dev_priv, reg);
+
+ if (enable)
+ val |= reset_bits;
+ else
+ val &= ~reset_bits;
+
+ intel_de_write(dev_priv, reg, val);
+}
+
+static void skl_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
+{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* enable PCH reset handshake */
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ /* enable PG1 and Misc I/O */
+ mutex_lock(&power_domains->lock);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
+ intel_power_well_enable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+
+ intel_cdclk_init_hw(dev_priv);
+
+ gen9_dbuf_enable(dev_priv);
+
+ if (resume)
+ intel_dmc_load_program(dev_priv);
+}
+
+static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_well *well;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ gen9_disable_dc_states(dev_priv);
+ /* TODO: disable DMC program */
+
+ gen9_dbuf_disable(dev_priv);
+
+ intel_cdclk_uninit_hw(dev_priv);
+
+ /* The spec doesn't call for removing the reset handshake flag */
+ /* disable PG1 and Misc I/O */
+
+ mutex_lock(&power_domains->lock);
+
+ /*
+ * BSpec says to keep the MISC IO power well enabled here, only
+ * remove our request for power well 1.
+ * Note that even though the driver's request is removed power well 1
+ * may stay enabled after this due to DMC's own request on it.
+ */
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+
+ usleep_range(10, 30); /* 10 us delay per Bspec */
+}
+
+static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
+{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /*
+ * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
+ * or else the reset will hang because there is no PCH to respond.
+ * Move the handshake programming to initialization sequence.
+ * Previously was left up to BIOS.
+ */
+ intel_pch_reset_handshake(dev_priv, false);
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ /* Enable PG1 */
+ mutex_lock(&power_domains->lock);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+
+ intel_cdclk_init_hw(dev_priv);
+
+ gen9_dbuf_enable(dev_priv);
+
+ if (resume)
+ intel_dmc_load_program(dev_priv);
+}
+
+static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_well *well;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ gen9_disable_dc_states(dev_priv);
+ /* TODO: disable DMC program */
+
+ gen9_dbuf_disable(dev_priv);
+
+ intel_cdclk_uninit_hw(dev_priv);
+
+ /* The spec doesn't call for removing the reset handshake flag */
+
+ /*
+ * Disable PW1 (PG1).
+ * Note that even though the driver's request is removed power well 1
+ * may stay enabled after this due to DMC's own request on it.
+ */
+ mutex_lock(&power_domains->lock);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+
+ usleep_range(10, 30); /* 10 us delay per Bspec */
+}
+
+struct buddy_page_mask {
+ u32 page_mask;
+ u8 type;
+ u8 num_channels;
+};
+
+static const struct buddy_page_mask tgl_buddy_page_masks[] = {
+ { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
+ { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF },
+ { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
+ { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
+ { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
+ { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E },
+ { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
+ { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
+ {}
+};
+
+static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
+ { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
+ { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
+ { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 },
+ { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
+ { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
+ { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
+ { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 },
+ { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
+ {}
+};
+
+static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
+{
+ enum intel_dram_type type = dev_priv->dram_info.type;
+ u8 num_channels = dev_priv->dram_info.num_channels;
+ const struct buddy_page_mask *table;
+ unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask;
+ int config, i;
+
+ /* BW_BUDDY registers are not used on dgpu's beyond DG1 */
+ if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
+ return;
+
+ if (IS_ALDERLAKE_S(dev_priv) ||
+ IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
+ IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
+ IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
+ /* Wa_1409767108:tgl,dg1,adl-s */
+ table = wa_1409767108_buddy_page_masks;
+ else
+ table = tgl_buddy_page_masks;
+
+ for (config = 0; table[config].page_mask != 0; config++)
+ if (table[config].num_channels == num_channels &&
+ table[config].type == type)
+ break;
+
+ if (table[config].page_mask == 0) {
+ drm_dbg(&dev_priv->drm,
+ "Unknown memory configuration; disabling address buddy logic.\n");
+ for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
+ intel_de_write(dev_priv, BW_BUDDY_CTL(i),
+ BW_BUDDY_DISABLE);
+ } else {
+ for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
+ intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
+ table[config].page_mask);
+
+ /* Wa_22010178259:tgl,dg1,rkl,adl-s */
+ if (DISPLAY_VER(dev_priv) == 12)
+ intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
+ BW_BUDDY_TLB_REQ_TIMER_MASK,
+ BW_BUDDY_TLB_REQ_TIMER(0x8));
+ }
+ }
+}
+
+static void icl_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
+{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_well *well;
+ u32 val;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
+ INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
+ intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
+ PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
+
+ /* 1. Enable PCH reset handshake. */
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ /* 2. Initialize all combo phys */
+ intel_combo_phy_init(dev_priv);
+
+ /*
+ * 3. Enable Power Well 1 (PG1).
+ * The AUX IO power wells will be enabled on demand.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
+
+ /* 4. Enable CDCLK. */
+ intel_cdclk_init_hw(dev_priv);
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ gen12_dbuf_slices_config(dev_priv);
+
+ /* 5. Enable DBUF. */
+ gen9_dbuf_enable(dev_priv);
+
+ /* 6. Setup MBUS. */
+ icl_mbus_init(dev_priv);
+
+ /* 7. Program arbiter BW_BUDDY registers */
+ if (DISPLAY_VER(dev_priv) >= 12)
+ tgl_bw_buddy_init(dev_priv);
+
+ /* 8. Ensure PHYs have completed calibration and adaptation */
+ if (IS_DG2(dev_priv))
+ intel_snps_phy_wait_for_calibration(dev_priv);
+
+ if (resume)
+ intel_dmc_load_program(dev_priv);
+
+ /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
+ DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
+ intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
+ }
+
+ /* Wa_14011503030:xelpd */
+ if (DISPLAY_VER(dev_priv) >= 13)
+ intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
+}
+
+static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_well *well;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ gen9_disable_dc_states(dev_priv);
+ intel_dmc_disable_program(dev_priv);
+
+ /* 1. Disable all display engine functions -> aready done */
+
+ /* 2. Disable DBUF */
+ gen9_dbuf_disable(dev_priv);
+
+ /* 3. Disable CD clock */
+ intel_cdclk_uninit_hw(dev_priv);
+
+ /*
+ * 4. Disable Power Well 1 (PG1).
+ * The AUX IO power wells are toggled on demand, so they are already
+ * disabled at this point.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
+
+ /* 5. */
+ intel_combo_phy_uninit(dev_priv);
+}
+
+static void chv_phy_control_init(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn_bc =
+ lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ struct i915_power_well *cmn_d =
+ lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+
+ /*
+ * DISPLAY_PHY_CONTROL can get corrupted if read. As a
+ * workaround never ever read DISPLAY_PHY_CONTROL, and
+ * instead maintain a shadow copy ourselves. Use the actual
+ * power well state and lane status to reconstruct the
+ * expected initial value.
+ */
+ dev_priv->display.power.chv_phy_control =
+ PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
+ PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
+ PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
+ PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
+ PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
+
+ /*
+ * If all lanes are disabled we leave the override disabled
+ * with all power down bits cleared to match the state we
+ * would use after disabling the port. Otherwise enable the
+ * override and set the lane powerdown bits accding to the
+ * current lane status.
+ */
+ if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
+ u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
+ unsigned int mask;
+
+ mask = status & DPLL_PORTB_READY_MASK;
+ if (mask == 0xf)
+ mask = 0x0;
+ else
+ dev_priv->display.power.chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
+
+ dev_priv->display.power.chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
+
+ mask = (status & DPLL_PORTC_READY_MASK) >> 4;
+ if (mask == 0xf)
+ mask = 0x0;
+ else
+ dev_priv->display.power.chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
+
+ dev_priv->display.power.chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
+
+ dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
+
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false;
+ } else {
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true;
+ }
+
+ if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
+ u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
+ unsigned int mask;
+
+ mask = status & DPLL_PORTD_READY_MASK;
+
+ if (mask == 0xf)
+ mask = 0x0;
+ else
+ dev_priv->display.power.chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
+
+ dev_priv->display.power.chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
+
+ dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false;
+ } else {
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true;
+ }
+
+ drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
+ dev_priv->display.power.chv_phy_control);
+
+ /* Defer application of initial phy_control to enabling the powerwell */
+}
+
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn =
+ lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ struct i915_power_well *disp2d =
+ lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
+
+ /* If the display might be already active skip this */
+ if (intel_power_well_is_enabled(dev_priv, cmn) &&
+ intel_power_well_is_enabled(dev_priv, disp2d) &&
+ intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
+
+ /* cmnlane needs DPLL registers */
+ intel_power_well_enable(dev_priv, disp2d);
+
+ /*
+ * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+ * Need to assert and de-assert PHY SB reset by gating the
+ * common lane power, then un-gating it.
+ * Simply ungating isn't enough to reset the PHY enough to get
+ * ports and lanes running.
+ */
+ intel_power_well_disable(dev_priv, cmn);
+}
+
+static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
+{
+ bool ret;
+
+ vlv_punit_get(dev_priv);
+ ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
+ vlv_punit_put(dev_priv);
+
+ return ret;
+}
+
+static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
+{
+ drm_WARN(&dev_priv->drm,
+ !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+ "VED not power gated\n");
+}
+
+static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
+{
+ static const struct pci_device_id isp_ids[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
+ {}
+ };
+
+ drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
+ !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+ "ISP not power gated\n");
+}
+
+static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
+
+/**
+ * intel_power_domains_init_hw - initialize hardware power domain state
+ * @i915: i915 device instance
+ * @resume: Called from resume code paths or not
+ *
+ * This function initializes the hardware power domain state and enables all
+ * power wells belonging to the INIT power domain. Power wells in other
+ * domains (and not in the INIT domain) are referenced or disabled by
+ * intel_modeset_readout_hw_state(). After that the reference count of each
+ * power well must match its HW enabled state, see
+ * intel_power_domains_verify_state().
+ *
+ * It will return with power domains disabled (to be enabled later by
+ * intel_power_domains_enable()) and must be paired with
+ * intel_power_domains_driver_remove().
+ */
+void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
+{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+
+ power_domains->initializing = true;
+
+ if (DISPLAY_VER(i915) >= 11) {
+ icl_display_core_init(i915, resume);
+ } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ bxt_display_core_init(i915, resume);
+ } else if (DISPLAY_VER(i915) == 9) {
+ skl_display_core_init(i915, resume);
+ } else if (IS_CHERRYVIEW(i915)) {
+ mutex_lock(&power_domains->lock);
+ chv_phy_control_init(i915);
+ mutex_unlock(&power_domains->lock);
+ assert_isp_power_gated(i915);
+ } else if (IS_VALLEYVIEW(i915)) {
+ mutex_lock(&power_domains->lock);
+ vlv_cmnlane_wa(i915);
+ mutex_unlock(&power_domains->lock);
+ assert_ved_power_gated(i915);
+ assert_isp_power_gated(i915);
+ } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
+ hsw_assert_cdclk(i915);
+ intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
+ } else if (IS_IVYBRIDGE(i915)) {
+ intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
+ }
+
+ /*
+ * Keep all power wells enabled for any dependent HW access during
+ * initialization and to make sure we keep BIOS enabled display HW
+ * resources powered until display HW readout is complete. We drop
+ * this reference in intel_power_domains_enable().
+ */
+ drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ power_domains->init_wakeref =
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
+ /* Disable power support if the user asked so. */
+ if (!i915->params.disable_power_well) {
+ drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
+ i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_INIT);
+ }
+ intel_power_domains_sync_hw(i915);
+
+ power_domains->initializing = false;
+}
+
+/**
+ * intel_power_domains_driver_remove - deinitialize hw power domain state
+ * @i915: i915 device instance
+ *
+ * De-initializes the display power domain HW state. It also ensures that the
+ * device stays powered up so that the driver can be reloaded.
+ *
+ * It must be called with power domains already disabled (after a call to
+ * intel_power_domains_disable()) and must be paired with
+ * intel_power_domains_init_hw().
+ */
+void intel_power_domains_driver_remove(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&i915->display.power.domains.init_wakeref);
+
+ /* Remove the refcount we took to keep power well support disabled. */
+ if (!i915->params.disable_power_well)
+ intel_display_power_put(i915, POWER_DOMAIN_INIT,
+ fetch_and_zero(&i915->display.power.domains.disable_wakeref));
+
+ intel_display_power_flush_work_sync(i915);
+
+ intel_power_domains_verify_state(i915);
+
+ /* Keep the power well enabled, but cancel its rpm wakeref. */
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
+/**
+ * intel_power_domains_sanitize_state - sanitize power domains state
+ * @i915: i915 device instance
+ *
+ * Sanitize the power domains state during driver loading and system resume.
+ * The function will disable all display power wells that BIOS has enabled
+ * without a user for it (any user for a power well has taken a reference
+ * on it by the time this function is called, after the state of all the
+ * pipe, encoder, etc. HW resources have been sanitized).
+ */
+void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_well *power_well;
+
+ mutex_lock(&power_domains->lock);
+
+ for_each_power_well_reverse(i915, power_well) {
+ if (power_well->desc->always_on || power_well->count ||
+ !intel_power_well_is_enabled(i915, power_well))
+ continue;
+
+ drm_dbg_kms(&i915->drm,
+ "BIOS left unused %s power well enabled, disabling it\n",
+ intel_power_well_name(power_well));
+ intel_power_well_disable(i915, power_well);
+ }
+
+ mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_power_domains_enable - enable toggling of display power wells
+ * @i915: i915 device instance
+ *
+ * Enable the ondemand enabling/disabling of the display power wells. Note that
+ * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
+ * only at specific points of the display modeset sequence, thus they are not
+ * affected by the intel_power_domains_enable()/disable() calls. The purpose
+ * of these function is to keep the rest of power wells enabled until the end
+ * of display HW readout (which will acquire the power references reflecting
+ * the current HW state).
+ */
+void intel_power_domains_enable(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&i915->display.power.domains.init_wakeref);
+
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+ intel_power_domains_verify_state(i915);
+}
+
+/**
+ * intel_power_domains_disable - disable toggling of display power wells
+ * @i915: i915 device instance
+ *
+ * Disable the ondemand enabling/disabling of the display power wells. See
+ * intel_power_domains_enable() for which power wells this call controls.
+ */
+void intel_power_domains_disable(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+
+ drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ power_domains->init_wakeref =
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
+ intel_power_domains_verify_state(i915);
+}
+
+/**
+ * intel_power_domains_suspend - suspend power domain state
+ * @i915: i915 device instance
+ * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
+ *
+ * This function prepares the hardware power domain state before entering
+ * system suspend.
+ *
+ * It must be called with power domains already disabled (after a call to
+ * intel_power_domains_disable()) and paired with intel_power_domains_resume().
+ */
+void intel_power_domains_suspend(struct drm_i915_private *i915,
+ enum i915_drm_suspend_mode suspend_mode)
+{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&power_domains->init_wakeref);
+
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+
+ /*
+ * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
+ * support don't manually deinit the power domains. This also means the
+ * DMC firmware will stay active, it will power down any HW
+ * resources as required and also enable deeper system power states
+ * that would be blocked if the firmware was inactive.
+ */
+ if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
+ suspend_mode == I915_DRM_SUSPEND_IDLE &&
+ intel_dmc_has_payload(i915)) {
+ intel_display_power_flush_work(i915);
+ intel_power_domains_verify_state(i915);
+ return;
+ }
+
+ /*
+ * Even if power well support was disabled we still want to disable
+ * power wells if power domains must be deinitialized for suspend.
+ */
+ if (!i915->params.disable_power_well)
+ intel_display_power_put(i915, POWER_DOMAIN_INIT,
+ fetch_and_zero(&i915->display.power.domains.disable_wakeref));
+
+ intel_display_power_flush_work(i915);
+ intel_power_domains_verify_state(i915);
+
+ if (DISPLAY_VER(i915) >= 11)
+ icl_display_core_uninit(i915);
+ else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ bxt_display_core_uninit(i915);
+ else if (DISPLAY_VER(i915) == 9)
+ skl_display_core_uninit(i915);
+
+ power_domains->display_core_suspended = true;
+}
+
+/**
+ * intel_power_domains_resume - resume power domain state
+ * @i915: i915 device instance
+ *
+ * This function resume the hardware power domain state during system resume.
+ *
+ * It will return with power domain support disabled (to be enabled later by
+ * intel_power_domains_enable()) and must be paired with
+ * intel_power_domains_suspend().
+ */
+void intel_power_domains_resume(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+
+ if (power_domains->display_core_suspended) {
+ intel_power_domains_init_hw(i915, true);
+ power_domains->display_core_suspended = false;
+ } else {
+ drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ power_domains->init_wakeref =
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ }
+
+ intel_power_domains_verify_state(i915);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static void intel_power_domains_dump_info(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_well *power_well;
+
+ for_each_power_well(i915, power_well) {
+ enum intel_display_power_domain domain;
+
+ drm_dbg(&i915->drm, "%-25s %d\n",
+ intel_power_well_name(power_well), intel_power_well_refcount(power_well));
+
+ for_each_power_domain(domain, intel_power_well_domains(power_well))
+ drm_dbg(&i915->drm, " %-23s %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
+ }
+}
+
+/**
+ * intel_power_domains_verify_state - verify the HW/SW state for all power wells
+ * @i915: i915 device instance
+ *
+ * Verify if the reference count of each power well matches its HW enabled
+ * state and the total refcount of the domains it belongs to. This must be
+ * called after modeset HW state sanitization, which is responsible for
+ * acquiring reference counts for any power wells in use and disabling the
+ * ones left on by BIOS but not required by any active output.
+ */
+static void intel_power_domains_verify_state(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_well *power_well;
+ bool dump_domain_info;
+
+ mutex_lock(&power_domains->lock);
+
+ verify_async_put_domains_state(power_domains);
+
+ dump_domain_info = false;
+ for_each_power_well(i915, power_well) {
+ enum intel_display_power_domain domain;
+ int domains_count;
+ bool enabled;
+
+ enabled = intel_power_well_is_enabled(i915, power_well);
+ if ((intel_power_well_refcount(power_well) ||
+ intel_power_well_is_always_on(power_well)) !=
+ enabled)
+ drm_err(&i915->drm,
+ "power well %s state mismatch (refcount %d/enabled %d)",
+ intel_power_well_name(power_well),
+ intel_power_well_refcount(power_well), enabled);
+
+ domains_count = 0;
+ for_each_power_domain(domain, intel_power_well_domains(power_well))
+ domains_count += power_domains->domain_use_count[domain];
+
+ if (intel_power_well_refcount(power_well) != domains_count) {
+ drm_err(&i915->drm,
+ "power well %s refcount/domain refcount mismatch "
+ "(refcount %d/domains refcount %d)\n",
+ intel_power_well_name(power_well),
+ intel_power_well_refcount(power_well),
+ domains_count);
+ dump_domain_info = true;
+ }
+ }
+
+ if (dump_domain_info) {
+ static bool dumped;
+
+ if (!dumped) {
+ intel_power_domains_dump_info(i915);
+ dumped = true;
+ }
+ }
+
+ mutex_unlock(&power_domains->lock);
+}
+
+#else
+
+static void intel_power_domains_verify_state(struct drm_i915_private *i915)
+{
+}
+
+#endif
+
+void intel_display_power_suspend_late(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
+ IS_BROXTON(i915)) {
+ bxt_enable_dc9(i915);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ hsw_enable_pc8(i915);
+ }
+
+ /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
+ if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
+ intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
+}
+
+void intel_display_power_resume_early(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
+ IS_BROXTON(i915)) {
+ gen9_sanitize_dc_state(i915);
+ bxt_disable_dc9(i915);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ hsw_disable_pc8(i915);
+ }
+
+ /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
+ if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
+ intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
+}
+
+void intel_display_power_suspend(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 11) {
+ icl_display_core_uninit(i915);
+ bxt_enable_dc9(i915);
+ } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ bxt_display_core_uninit(i915);
+ bxt_enable_dc9(i915);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ hsw_enable_pc8(i915);
+ }
+}
+
+void intel_display_power_resume(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 11) {
+ bxt_disable_dc9(i915);
+ icl_display_core_init(i915, true);
+ if (intel_dmc_has_payload(i915)) {
+ if (i915->display.dmc.allowed_dc_mask &
+ DC_STATE_EN_UPTO_DC6)
+ skl_enable_dc6(i915);
+ else if (i915->display.dmc.allowed_dc_mask &
+ DC_STATE_EN_UPTO_DC5)
+ gen9_enable_dc5(i915);
+ }
+ } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ bxt_disable_dc9(i915);
+ bxt_display_core_init(i915, true);
+ if (intel_dmc_has_payload(i915) &&
+ (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+ gen9_enable_dc5(i915);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ hsw_disable_pc8(i915);
+ }
+}
+
+void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
+{
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
+ int i;
+
+ mutex_lock(&power_domains->lock);
+
+ seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
+ for (i = 0; i < power_domains->power_well_count; i++) {
+ struct i915_power_well *power_well;
+ enum intel_display_power_domain power_domain;
+
+ power_well = &power_domains->power_wells[i];
+ seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well),
+ intel_power_well_refcount(power_well));
+
+ for_each_power_domain(power_domain, intel_power_well_domains(power_well))
+ seq_printf(m, " %-23s %d\n",
+ intel_display_power_domain_str(power_domain),
+ power_domains->domain_use_count[power_domain]);
+ }
+
+ mutex_unlock(&power_domains->lock);
+}
+
+struct intel_ddi_port_domains {
+ enum port port_start;
+ enum port port_end;
+ enum aux_ch aux_ch_start;
+ enum aux_ch aux_ch_end;
+
+ enum intel_display_power_domain ddi_lanes;
+ enum intel_display_power_domain ddi_io;
+ enum intel_display_power_domain aux_legacy_usbc;
+ enum intel_display_power_domain aux_tbt;
+};
+
+static const struct intel_ddi_port_domains
+i9xx_port_domains[] = {
+ {
+ .port_start = PORT_A,
+ .port_end = PORT_F,
+ .aux_ch_start = AUX_CH_A,
+ .aux_ch_end = AUX_CH_F,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
+ .aux_tbt = POWER_DOMAIN_INVALID,
+ },
+};
+
+static const struct intel_ddi_port_domains
+d11_port_domains[] = {
+ {
+ .port_start = PORT_A,
+ .port_end = PORT_B,
+ .aux_ch_start = AUX_CH_A,
+ .aux_ch_end = AUX_CH_B,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
+ .aux_tbt = POWER_DOMAIN_INVALID,
+ }, {
+ .port_start = PORT_C,
+ .port_end = PORT_F,
+ .aux_ch_start = AUX_CH_C,
+ .aux_ch_end = AUX_CH_F,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_C,
+ .aux_tbt = POWER_DOMAIN_AUX_TBT1,
+ },
+};
+
+static const struct intel_ddi_port_domains
+d12_port_domains[] = {
+ {
+ .port_start = PORT_A,
+ .port_end = PORT_C,
+ .aux_ch_start = AUX_CH_A,
+ .aux_ch_end = AUX_CH_C,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
+ .aux_tbt = POWER_DOMAIN_INVALID,
+ }, {
+ .port_start = PORT_TC1,
+ .port_end = PORT_TC6,
+ .aux_ch_start = AUX_CH_USBC1,
+ .aux_ch_end = AUX_CH_USBC6,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
+ .aux_tbt = POWER_DOMAIN_AUX_TBT1,
+ },
+};
+
+static const struct intel_ddi_port_domains
+d13_port_domains[] = {
+ {
+ .port_start = PORT_A,
+ .port_end = PORT_C,
+ .aux_ch_start = AUX_CH_A,
+ .aux_ch_end = AUX_CH_C,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
+ .aux_tbt = POWER_DOMAIN_INVALID,
+ }, {
+ .port_start = PORT_TC1,
+ .port_end = PORT_TC4,
+ .aux_ch_start = AUX_CH_USBC1,
+ .aux_ch_end = AUX_CH_USBC4,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
+ .aux_tbt = POWER_DOMAIN_AUX_TBT1,
+ }, {
+ .port_start = PORT_D_XELPD,
+ .port_end = PORT_E_XELPD,
+ .aux_ch_start = AUX_CH_D_XELPD,
+ .aux_ch_end = AUX_CH_E_XELPD,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_D,
+ .aux_tbt = POWER_DOMAIN_INVALID,
+ },
+};
+
+static void
+intel_port_domains_for_platform(struct drm_i915_private *i915,
+ const struct intel_ddi_port_domains **domains,
+ int *domains_size)
+{
+ if (DISPLAY_VER(i915) >= 13) {
+ *domains = d13_port_domains;
+ *domains_size = ARRAY_SIZE(d13_port_domains);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ *domains = d12_port_domains;
+ *domains_size = ARRAY_SIZE(d12_port_domains);
+ } else if (DISPLAY_VER(i915) >= 11) {
+ *domains = d11_port_domains;
+ *domains_size = ARRAY_SIZE(d11_port_domains);
+ } else {
+ *domains = i9xx_port_domains;
+ *domains_size = ARRAY_SIZE(i9xx_port_domains);
+ }
+}
+
+static const struct intel_ddi_port_domains *
+intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
+{
+ const struct intel_ddi_port_domains *domains;
+ int domains_size;
+ int i;
+
+ intel_port_domains_for_platform(i915, &domains, &domains_size);
+ for (i = 0; i < domains_size; i++)
+ if (port >= domains[i].port_start && port <= domains[i].port_end)
+ return &domains[i];
+
+ return NULL;
+}
+
+enum intel_display_power_domain
+intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
+{
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
+
+ if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
+ return POWER_DOMAIN_PORT_DDI_IO_A;
+
+ return domains->ddi_io + (int)(port - domains->port_start);
+}
+
+enum intel_display_power_domain
+intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
+{
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
+
+ if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
+ return POWER_DOMAIN_PORT_DDI_LANES_A;
+
+ return domains->ddi_lanes + (int)(port - domains->port_start);
+}
+
+static const struct intel_ddi_port_domains *
+intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
+{
+ const struct intel_ddi_port_domains *domains;
+ int domains_size;
+ int i;
+
+ intel_port_domains_for_platform(i915, &domains, &domains_size);
+ for (i = 0; i < domains_size; i++)
+ if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
+ return &domains[i];
+
+ return NULL;
+}
+
+enum intel_display_power_domain
+intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
+{
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
+
+ if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
+ return POWER_DOMAIN_AUX_A;
+
+ return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
+}
+
+enum intel_display_power_domain
+intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
+{
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
+
+ if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
+ return POWER_DOMAIN_AUX_TBT1;
+
+ return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
new file mode 100644
index 000000000..7136ea3f2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_POWER_H__
+#define __INTEL_DISPLAY_POWER_H__
+
+#include "intel_runtime_pm.h"
+
+enum aux_ch;
+enum dpio_channel;
+enum dpio_phy;
+enum port;
+struct drm_i915_private;
+struct i915_power_well;
+struct intel_encoder;
+
+/*
+ * Keep the pipe, transcoder, port (DDI_LANES,DDI_IO,AUX) domain instances
+ * consecutive, so that the pipe,transcoder,port -> power domain macros
+ * work correctly.
+ */
+enum intel_display_power_domain {
+ POWER_DOMAIN_DISPLAY_CORE,
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_D,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_D,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_TRANSCODER_D,
+ POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_TRANSCODER_DSI_A,
+ POWER_DOMAIN_TRANSCODER_DSI_C,
+
+ /* VDSC/joining for eDP/DSI transcoder (ICL) or pipe A (TGL) */
+ POWER_DOMAIN_TRANSCODER_VDSC_PW2,
+
+ POWER_DOMAIN_PORT_DDI_LANES_A,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_DDI_LANES_D,
+ POWER_DOMAIN_PORT_DDI_LANES_E,
+ POWER_DOMAIN_PORT_DDI_LANES_F,
+
+ POWER_DOMAIN_PORT_DDI_LANES_TC1,
+ POWER_DOMAIN_PORT_DDI_LANES_TC2,
+ POWER_DOMAIN_PORT_DDI_LANES_TC3,
+ POWER_DOMAIN_PORT_DDI_LANES_TC4,
+ POWER_DOMAIN_PORT_DDI_LANES_TC5,
+ POWER_DOMAIN_PORT_DDI_LANES_TC6,
+
+ POWER_DOMAIN_PORT_DDI_IO_A,
+ POWER_DOMAIN_PORT_DDI_IO_B,
+ POWER_DOMAIN_PORT_DDI_IO_C,
+ POWER_DOMAIN_PORT_DDI_IO_D,
+ POWER_DOMAIN_PORT_DDI_IO_E,
+ POWER_DOMAIN_PORT_DDI_IO_F,
+
+ POWER_DOMAIN_PORT_DDI_IO_TC1,
+ POWER_DOMAIN_PORT_DDI_IO_TC2,
+ POWER_DOMAIN_PORT_DDI_IO_TC3,
+ POWER_DOMAIN_PORT_DDI_IO_TC4,
+ POWER_DOMAIN_PORT_DDI_IO_TC5,
+ POWER_DOMAIN_PORT_DDI_IO_TC6,
+
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_PORT_OTHER,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_AUX_E,
+ POWER_DOMAIN_AUX_F,
+
+ POWER_DOMAIN_AUX_USBC1,
+ POWER_DOMAIN_AUX_USBC2,
+ POWER_DOMAIN_AUX_USBC3,
+ POWER_DOMAIN_AUX_USBC4,
+ POWER_DOMAIN_AUX_USBC5,
+ POWER_DOMAIN_AUX_USBC6,
+
+ POWER_DOMAIN_AUX_IO_A,
+
+ POWER_DOMAIN_AUX_TBT1,
+ POWER_DOMAIN_AUX_TBT2,
+ POWER_DOMAIN_AUX_TBT3,
+ POWER_DOMAIN_AUX_TBT4,
+ POWER_DOMAIN_AUX_TBT5,
+ POWER_DOMAIN_AUX_TBT6,
+
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_DC_OFF,
+ POWER_DOMAIN_TC_COLD_OFF,
+ POWER_DOMAIN_INIT,
+
+ POWER_DOMAIN_NUM,
+ POWER_DOMAIN_INVALID = POWER_DOMAIN_NUM,
+};
+
+#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
+ ((pipe) + POWER_DOMAIN_PIPE_PANEL_FITTER_A)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+ ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+ (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+struct intel_power_domain_mask {
+ DECLARE_BITMAP(bits, POWER_DOMAIN_NUM);
+};
+
+struct i915_power_domains {
+ /*
+ * Power wells needed for initialization at driver init and suspend
+ * time are on. They are kept on until after the first modeset.
+ */
+ bool initializing;
+ bool display_core_suspended;
+ int power_well_count;
+
+ intel_wakeref_t init_wakeref;
+ intel_wakeref_t disable_wakeref;
+
+ struct mutex lock;
+ int domain_use_count[POWER_DOMAIN_NUM];
+
+ struct delayed_work async_put_work;
+ intel_wakeref_t async_put_wakeref;
+ struct intel_power_domain_mask async_put_domains[2];
+
+ struct i915_power_well *power_wells;
+};
+
+struct intel_display_power_domain_set {
+ struct intel_power_domain_mask mask;
+#ifdef CONFIG_DRM_I915_DEBUG_RUNTIME_PM
+ intel_wakeref_t wakerefs[POWER_DOMAIN_NUM];
+#endif
+};
+
+#define for_each_power_domain(__domain, __mask) \
+ for ((__domain) = 0; (__domain) < POWER_DOMAIN_NUM; (__domain)++) \
+ for_each_if(test_bit((__domain), (__mask)->bits))
+
+int intel_power_domains_init(struct drm_i915_private *dev_priv);
+void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
+void intel_power_domains_driver_remove(struct drm_i915_private *dev_priv);
+void intel_power_domains_enable(struct drm_i915_private *dev_priv);
+void intel_power_domains_disable(struct drm_i915_private *dev_priv);
+void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
+ enum i915_drm_suspend_mode);
+void intel_power_domains_resume(struct drm_i915_private *dev_priv);
+void intel_power_domains_sanitize_state(struct drm_i915_private *dev_priv);
+
+void intel_display_power_suspend_late(struct drm_i915_private *i915);
+void intel_display_power_resume_early(struct drm_i915_private *i915);
+void intel_display_power_suspend(struct drm_i915_private *i915);
+void intel_display_power_resume(struct drm_i915_private *i915);
+void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+ u32 state);
+
+const char *
+intel_display_power_domain_str(enum intel_display_power_domain domain);
+
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+intel_wakeref_t
+intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void __intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref);
+void intel_display_power_flush_work(struct drm_i915_private *i915);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref);
+static inline void
+intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ __intel_display_power_put_async(i915, domain, wakeref);
+}
+#else
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+
+static inline void
+intel_display_power_put(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ intel_display_power_put_unchecked(i915, domain);
+}
+
+static inline void
+intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ __intel_display_power_put_async(i915, domain, -1);
+}
+#endif
+
+void
+intel_display_power_get_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain);
+
+bool
+intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ enum intel_display_power_domain domain);
+
+void
+intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set,
+ struct intel_power_domain_mask *mask);
+
+static inline void
+intel_display_power_put_all_in_set(struct drm_i915_private *i915,
+ struct intel_display_power_domain_set *power_domain_set)
+{
+ intel_display_power_put_mask_in_set(i915, power_domain_set, &power_domain_set->mask);
+}
+
+void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m);
+
+enum intel_display_power_domain
+intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port);
+enum intel_display_power_domain
+intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port);
+enum intel_display_power_domain
+intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch);
+enum intel_display_power_domain
+intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch);
+
+/*
+ * FIXME: We should probably switch this to a 0-based scheme to be consistent
+ * with how we now name/number DBUF_CTL instances.
+ */
+enum dbuf_slice {
+ DBUF_S1,
+ DBUF_S2,
+ DBUF_S3,
+ DBUF_S4,
+ I915_MAX_DBUF_SLICES
+};
+
+void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
+ u8 req_slices);
+
+#define with_intel_display_power(i915, domain, wf) \
+ for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
+ intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
+
+#define with_intel_display_power_if_enabled(i915, domain, wf) \
+ for ((wf) = intel_display_power_get_if_enabled((i915), (domain)); (wf); \
+ intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
+
+#endif /* __INTEL_DISPLAY_POWER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
new file mode 100644
index 000000000..dc04afc6c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -0,0 +1,1614 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+
+#include "vlv_sideband_reg.h"
+
+#include "intel_display_power_map.h"
+#include "intel_display_power_well.h"
+
+#define __LIST_INLINE_ELEMS(__elem_type, ...) \
+ ((__elem_type[]) { __VA_ARGS__ })
+
+#define __LIST(__elems) { \
+ .list = __elems, \
+ .count = ARRAY_SIZE(__elems), \
+}
+
+#define I915_PW_DOMAINS(...) \
+ (const struct i915_power_domain_list) \
+ __LIST(__LIST_INLINE_ELEMS(const enum intel_display_power_domain, __VA_ARGS__))
+
+#define I915_DECL_PW_DOMAINS(__name, ...) \
+ static const struct i915_power_domain_list __name = I915_PW_DOMAINS(__VA_ARGS__)
+
+/* Zero-length list assigns all power domains, a NULL list assigns none. */
+#define I915_PW_DOMAINS_NONE NULL
+#define I915_PW_DOMAINS_ALL /* zero-length list */
+
+#define I915_PW_INSTANCES(...) \
+ (const struct i915_power_well_instance_list) \
+ __LIST(__LIST_INLINE_ELEMS(const struct i915_power_well_instance, __VA_ARGS__))
+
+#define I915_PW(_name, _domain_list, ...) \
+ { .name = _name, .domain_list = _domain_list, ## __VA_ARGS__ }
+
+
+struct i915_power_well_desc_list {
+ const struct i915_power_well_desc *list;
+ u8 count;
+};
+
+#define I915_PW_DESCRIPTORS(x) __LIST(x)
+
+
+I915_DECL_PW_DOMAINS(i9xx_pwdoms_always_on, I915_PW_DOMAINS_ALL);
+
+static const struct i915_power_well_desc i9xx_power_wells_always_on[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("always-on", &i9xx_pwdoms_always_on),
+ ),
+ .ops = &i9xx_always_on_power_well_ops,
+ .always_on = true,
+ },
+};
+
+static const struct i915_power_well_desc_list i9xx_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+};
+
+I915_DECL_PW_DOMAINS(i830_pwdoms_pipes,
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc i830_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("pipes", &i830_pwdoms_pipes),
+ ),
+ .ops = &i830_pipes_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list i830_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(i830_power_wells_main),
+};
+
+I915_DECL_PW_DOMAINS(hsw_pwdoms_display,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_DDI_LANES_D,
+ POWER_DOMAIN_PORT_CRT, /* DDI E */
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc hsw_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("display", &hsw_pwdoms_display,
+ .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
+ .id = HSW_DISP_PW_GLOBAL),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ },
+};
+
+static const struct i915_power_well_desc_list hsw_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(hsw_power_wells_main),
+};
+
+I915_DECL_PW_DOMAINS(bdw_pwdoms_display,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_DDI_LANES_D,
+ POWER_DOMAIN_PORT_CRT, /* DDI E */
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc bdw_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("display", &bdw_pwdoms_display,
+ .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
+ .id = HSW_DISP_PW_GLOBAL),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ },
+};
+
+static const struct i915_power_well_desc_list bdw_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(bdw_power_wells_main),
+};
+
+I915_DECL_PW_DOMAINS(vlv_pwdoms_display,
+ POWER_DOMAIN_DISPLAY_CORE,
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(vlv_pwdoms_dpio_cmn_bc,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(vlv_pwdoms_dpio_tx_bc_lanes,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc vlv_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("display", &vlv_pwdoms_display,
+ .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
+ .id = VLV_DISP_PW_DISP2D),
+ ),
+ .ops = &vlv_display_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("dpio-tx-b-01", &vlv_pwdoms_dpio_tx_bc_lanes,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01),
+ I915_PW("dpio-tx-b-23", &vlv_pwdoms_dpio_tx_bc_lanes,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23),
+ I915_PW("dpio-tx-c-01", &vlv_pwdoms_dpio_tx_bc_lanes,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01),
+ I915_PW("dpio-tx-c-23", &vlv_pwdoms_dpio_tx_bc_lanes,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23),
+ ),
+ .ops = &vlv_dpio_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("dpio-common", &vlv_pwdoms_dpio_cmn_bc,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+ .id = VLV_DISP_PW_DPIO_CMN_BC),
+ ),
+ .ops = &vlv_dpio_cmn_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list vlv_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(vlv_power_wells_main),
+};
+
+I915_DECL_PW_DOMAINS(chv_pwdoms_display,
+ POWER_DOMAIN_DISPLAY_CORE,
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_DDI_LANES_D,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(chv_pwdoms_dpio_cmn_bc,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(chv_pwdoms_dpio_cmn_d,
+ POWER_DOMAIN_PORT_DDI_LANES_D,
+ POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc chv_power_wells_main[] = {
+ {
+ /*
+ * Pipe A power well is the new disp2d well. Pipe B and C
+ * power wells don't actually exist. Pipe A power well is
+ * required for any pipe to work.
+ */
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("display", &chv_pwdoms_display),
+ ),
+ .ops = &chv_pipe_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("dpio-common-bc", &chv_pwdoms_dpio_cmn_bc,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+ .id = VLV_DISP_PW_DPIO_CMN_BC),
+ I915_PW("dpio-common-d", &chv_pwdoms_dpio_cmn_d,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
+ .id = CHV_DISP_PW_DPIO_CMN_D),
+ ),
+ .ops = &chv_dpio_cmn_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list chv_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(chv_power_wells_main),
+};
+
+#define SKL_PW_2_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_A, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_TRANSCODER_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_D, \
+ POWER_DOMAIN_PORT_DDI_LANES_E, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_B, \
+ POWER_DOMAIN_AUX_C, \
+ POWER_DOMAIN_AUX_D
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_pw_2,
+ SKL_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_dc_off,
+ SKL_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_a_e,
+ POWER_DOMAIN_PORT_DDI_IO_A,
+ POWER_DOMAIN_PORT_DDI_IO_E,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_b,
+ POWER_DOMAIN_PORT_DDI_IO_B,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_c,
+ POWER_DOMAIN_PORT_DDI_IO_C,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_d,
+ POWER_DOMAIN_PORT_DDI_IO_D,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc skl_power_wells_pw_1[] = {
+ {
+ /* Handled by the DMC firmware */
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_1", I915_PW_DOMAINS_NONE,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_1,
+ .id = SKL_DISP_PW_1),
+ ),
+ .ops = &hsw_power_well_ops,
+ .always_on = true,
+ .has_fuses = true,
+ },
+};
+
+static const struct i915_power_well_desc skl_power_wells_main[] = {
+ {
+ /* Handled by the DMC firmware */
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("MISC_IO", I915_PW_DOMAINS_NONE,
+ .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
+ .id = SKL_DISP_PW_MISC_IO),
+ ),
+ .ops = &hsw_power_well_ops,
+ .always_on = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &skl_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &skl_pwdoms_pw_2,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DDI_IO_A_E", &skl_pwdoms_ddi_io_a_e, .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E),
+ I915_PW("DDI_IO_B", &skl_pwdoms_ddi_io_b, .hsw.idx = SKL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_C", &skl_pwdoms_ddi_io_c, .hsw.idx = SKL_PW_CTL_IDX_DDI_C),
+ I915_PW("DDI_IO_D", &skl_pwdoms_ddi_io_d, .hsw.idx = SKL_PW_CTL_IDX_DDI_D),
+ ),
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list skl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(skl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(skl_power_wells_main),
+};
+
+#define BXT_PW_2_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_A, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_TRANSCODER_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_B, \
+ POWER_DOMAIN_AUX_C
+
+I915_DECL_PW_DOMAINS(bxt_pwdoms_pw_2,
+ BXT_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(bxt_pwdoms_dc_off,
+ BXT_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(bxt_pwdoms_dpio_cmn_a,
+ POWER_DOMAIN_PORT_DDI_LANES_A,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(bxt_pwdoms_dpio_cmn_bc,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc bxt_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &bxt_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &bxt_pwdoms_pw_2,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("dpio-common-a", &bxt_pwdoms_dpio_cmn_a,
+ .bxt.phy = DPIO_PHY1,
+ .id = BXT_DISP_PW_DPIO_CMN_A),
+ I915_PW("dpio-common-bc", &bxt_pwdoms_dpio_cmn_bc,
+ .bxt.phy = DPIO_PHY0,
+ .id = VLV_DISP_PW_DPIO_CMN_BC),
+ ),
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list bxt_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(skl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(bxt_power_wells_main),
+};
+
+#define GLK_PW_2_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_A, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_TRANSCODER_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_B, \
+ POWER_DOMAIN_AUX_C
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_pw_2,
+ GLK_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_dc_off,
+ GLK_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_a, POWER_DOMAIN_PORT_DDI_IO_A);
+I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_b, POWER_DOMAIN_PORT_DDI_IO_B);
+I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_c, POWER_DOMAIN_PORT_DDI_IO_C);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_a,
+ POWER_DOMAIN_PORT_DDI_LANES_A,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_b,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_c,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_aux_a,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_IO_A,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_aux_b,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_aux_c,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc glk_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &glk_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &glk_pwdoms_pw_2,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("dpio-common-a", &glk_pwdoms_dpio_cmn_a,
+ .bxt.phy = DPIO_PHY1,
+ .id = BXT_DISP_PW_DPIO_CMN_A),
+ I915_PW("dpio-common-b", &glk_pwdoms_dpio_cmn_b,
+ .bxt.phy = DPIO_PHY0,
+ .id = VLV_DISP_PW_DPIO_CMN_BC),
+ I915_PW("dpio-common-c", &glk_pwdoms_dpio_cmn_c,
+ .bxt.phy = DPIO_PHY2,
+ .id = GLK_DISP_PW_DPIO_CMN_C),
+ ),
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &glk_pwdoms_aux_a, .hsw.idx = GLK_PW_CTL_IDX_AUX_A),
+ I915_PW("AUX_B", &glk_pwdoms_aux_b, .hsw.idx = GLK_PW_CTL_IDX_AUX_B),
+ I915_PW("AUX_C", &glk_pwdoms_aux_c, .hsw.idx = GLK_PW_CTL_IDX_AUX_C),
+ I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = GLK_PW_CTL_IDX_DDI_A),
+ I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = SKL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = SKL_PW_CTL_IDX_DDI_C),
+ ),
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list glk_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(skl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(glk_power_wells_main),
+};
+
+/*
+ * ICL PW_0/PG_0 domains (HW/DMC control):
+ * - PCI
+ * - clocks except port PLL
+ * - central power except FBC
+ * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
+ * ICL PW_1/PG_1 domains (HW/DMC control):
+ * - DBUF function
+ * - PIPE_A and its planes, except VGA
+ * - transcoder EDP + PSR
+ * - transcoder DSI
+ * - DDI_A
+ * - FBC
+ */
+#define ICL_PW_4_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_pw_4,
+ ICL_PW_4_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+ /* VDSC/joining */
+
+#define ICL_PW_3_POWER_DOMAINS \
+ ICL_PW_4_POWER_DOMAINS, \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_TRANSCODER_A, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_TRANSCODER_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_D, \
+ POWER_DOMAIN_PORT_DDI_LANES_E, \
+ POWER_DOMAIN_PORT_DDI_LANES_F, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_B, \
+ POWER_DOMAIN_AUX_C, \
+ POWER_DOMAIN_AUX_D, \
+ POWER_DOMAIN_AUX_E, \
+ POWER_DOMAIN_AUX_F, \
+ POWER_DOMAIN_AUX_TBT1, \
+ POWER_DOMAIN_AUX_TBT2, \
+ POWER_DOMAIN_AUX_TBT3, \
+ POWER_DOMAIN_AUX_TBT4
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_pw_3,
+ ICL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+ /*
+ * - transcoder WD
+ * - KVMR (HW control)
+ */
+
+#define ICL_PW_2_POWER_DOMAINS \
+ ICL_PW_3_POWER_DOMAINS, \
+ POWER_DOMAIN_TRANSCODER_VDSC_PW2
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_pw_2,
+ ICL_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+ /*
+ * - KVMR (HW control)
+ */
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_dc_off,
+ ICL_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_DC_OFF,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_d, POWER_DOMAIN_PORT_DDI_IO_D);
+I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_e, POWER_DOMAIN_PORT_DDI_IO_E);
+I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_f, POWER_DOMAIN_PORT_DDI_IO_F);
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_a,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_IO_A);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_b, POWER_DOMAIN_AUX_B);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_c, POWER_DOMAIN_AUX_C);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_d, POWER_DOMAIN_AUX_D);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_e, POWER_DOMAIN_AUX_E);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_f, POWER_DOMAIN_AUX_F);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt1, POWER_DOMAIN_AUX_TBT1);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt2, POWER_DOMAIN_AUX_TBT2);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt3, POWER_DOMAIN_AUX_TBT3);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt4, POWER_DOMAIN_AUX_TBT4);
+
+static const struct i915_power_well_desc icl_power_wells_pw_1[] = {
+ {
+ /* Handled by the DMC firmware */
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_1", I915_PW_DOMAINS_NONE,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+ .id = SKL_DISP_PW_1),
+ ),
+ .ops = &hsw_power_well_ops,
+ .always_on = true,
+ .has_fuses = true,
+ },
+};
+
+static const struct i915_power_well_desc icl_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &icl_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &icl_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_3", &icl_pwdoms_pw_3,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .id = ICL_DISP_PW_3),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A),
+ I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C),
+ I915_PW("DDI_IO_D", &icl_pwdoms_ddi_io_d, .hsw.idx = ICL_PW_CTL_IDX_DDI_D),
+ I915_PW("DDI_IO_E", &icl_pwdoms_ddi_io_e, .hsw.idx = ICL_PW_CTL_IDX_DDI_E),
+ I915_PW("DDI_IO_F", &icl_pwdoms_ddi_io_f, .hsw.idx = ICL_PW_CTL_IDX_DDI_F),
+ ),
+ .ops = &icl_ddi_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B),
+ I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C),
+ I915_PW("AUX_D", &icl_pwdoms_aux_d, .hsw.idx = ICL_PW_CTL_IDX_AUX_D),
+ I915_PW("AUX_E", &icl_pwdoms_aux_e, .hsw.idx = ICL_PW_CTL_IDX_AUX_E),
+ I915_PW("AUX_F", &icl_pwdoms_aux_f, .hsw.idx = ICL_PW_CTL_IDX_AUX_F),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1),
+ I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2),
+ I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3),
+ I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ .is_tc_tbt = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_4", &icl_pwdoms_pw_4,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_C),
+ .has_fuses = true,
+ },
+};
+
+static const struct i915_power_well_desc_list icl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(icl_power_wells_main),
+};
+
+#define TGL_PW_5_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_D, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_D, \
+ POWER_DOMAIN_TRANSCODER_D
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_5,
+ TGL_PW_5_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+#define TGL_PW_4_POWER_DOMAINS \
+ TGL_PW_5_POWER_DOMAINS, \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_C
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_4,
+ TGL_PW_4_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+#define TGL_PW_3_POWER_DOMAINS \
+ TGL_PW_4_POWER_DOMAINS, \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC3, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC4, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC5, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC6, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_USBC1, \
+ POWER_DOMAIN_AUX_USBC2, \
+ POWER_DOMAIN_AUX_USBC3, \
+ POWER_DOMAIN_AUX_USBC4, \
+ POWER_DOMAIN_AUX_USBC5, \
+ POWER_DOMAIN_AUX_USBC6, \
+ POWER_DOMAIN_AUX_TBT1, \
+ POWER_DOMAIN_AUX_TBT2, \
+ POWER_DOMAIN_AUX_TBT3, \
+ POWER_DOMAIN_AUX_TBT4, \
+ POWER_DOMAIN_AUX_TBT5, \
+ POWER_DOMAIN_AUX_TBT6
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_3,
+ TGL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_2,
+ TGL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_TRANSCODER_VDSC_PW2,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_dc_off,
+ TGL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc1, POWER_DOMAIN_PORT_DDI_IO_TC1);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc2, POWER_DOMAIN_PORT_DDI_IO_TC2);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc3, POWER_DOMAIN_PORT_DDI_IO_TC3);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc4, POWER_DOMAIN_PORT_DDI_IO_TC4);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc5, POWER_DOMAIN_PORT_DDI_IO_TC5);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc6, POWER_DOMAIN_PORT_DDI_IO_TC6);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc1, POWER_DOMAIN_AUX_USBC1);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc2, POWER_DOMAIN_AUX_USBC2);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc3, POWER_DOMAIN_AUX_USBC3);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc4, POWER_DOMAIN_AUX_USBC4);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc5, POWER_DOMAIN_AUX_USBC5);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc6, POWER_DOMAIN_AUX_USBC6);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt5, POWER_DOMAIN_AUX_TBT5);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt6, POWER_DOMAIN_AUX_TBT6);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_tc_cold_off,
+ POWER_DOMAIN_AUX_USBC1,
+ POWER_DOMAIN_AUX_USBC2,
+ POWER_DOMAIN_AUX_USBC3,
+ POWER_DOMAIN_AUX_USBC4,
+ POWER_DOMAIN_AUX_USBC5,
+ POWER_DOMAIN_AUX_USBC6,
+ POWER_DOMAIN_AUX_TBT1,
+ POWER_DOMAIN_AUX_TBT2,
+ POWER_DOMAIN_AUX_TBT3,
+ POWER_DOMAIN_AUX_TBT4,
+ POWER_DOMAIN_AUX_TBT5,
+ POWER_DOMAIN_AUX_TBT6,
+ POWER_DOMAIN_TC_COLD_OFF);
+
+static const struct i915_power_well_desc tgl_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &tgl_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &tgl_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_3", &tgl_pwdoms_pw_3,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .id = ICL_DISP_PW_3),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A),
+ I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C),
+ I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1),
+ I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2),
+ I915_PW("DDI_IO_TC3", &tgl_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3),
+ I915_PW("DDI_IO_TC4", &tgl_pwdoms_ddi_io_tc4, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4),
+ I915_PW("DDI_IO_TC5", &tgl_pwdoms_ddi_io_tc5, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5),
+ I915_PW("DDI_IO_TC6", &tgl_pwdoms_ddi_io_tc6, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6),
+ ),
+ .ops = &icl_ddi_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_4", &tgl_pwdoms_pw_4,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ .irq_pipe_mask = BIT(PIPE_C),
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_5", &tgl_pwdoms_pw_5,
+ .hsw.idx = TGL_PW_CTL_IDX_PW_5),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ .irq_pipe_mask = BIT(PIPE_D),
+ },
+};
+
+static const struct i915_power_well_desc tgl_power_wells_tc_cold_off[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("TC_cold_off", &tgl_pwdoms_tc_cold_off,
+ .id = TGL_DISP_PW_TC_COLD_OFF),
+ ),
+ .ops = &tgl_tc_cold_off_ops,
+ },
+};
+
+static const struct i915_power_well_desc tgl_power_wells_aux[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B),
+ I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C),
+ I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1),
+ I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2),
+ I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3),
+ I915_PW("AUX_USBC4", &tgl_pwdoms_aux_usbc4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4),
+ I915_PW("AUX_USBC5", &tgl_pwdoms_aux_usbc5, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5),
+ I915_PW("AUX_USBC6", &tgl_pwdoms_aux_usbc6, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1),
+ I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2),
+ I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3),
+ I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4),
+ I915_PW("AUX_TBT5", &tgl_pwdoms_aux_tbt5, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5),
+ I915_PW("AUX_TBT6", &tgl_pwdoms_aux_tbt6, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ .is_tc_tbt = true,
+ },
+};
+
+static const struct i915_power_well_desc_list tgl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(tgl_power_wells_main),
+ I915_PW_DESCRIPTORS(tgl_power_wells_tc_cold_off),
+ I915_PW_DESCRIPTORS(tgl_power_wells_aux),
+};
+
+static const struct i915_power_well_desc_list adls_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(tgl_power_wells_main),
+ I915_PW_DESCRIPTORS(tgl_power_wells_aux),
+};
+
+#define RKL_PW_4_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_C
+
+I915_DECL_PW_DOMAINS(rkl_pwdoms_pw_4,
+ RKL_PW_4_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+#define RKL_PW_3_POWER_DOMAINS \
+ RKL_PW_4_POWER_DOMAINS, \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_USBC1, \
+ POWER_DOMAIN_AUX_USBC2
+
+I915_DECL_PW_DOMAINS(rkl_pwdoms_pw_3,
+ RKL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+/*
+ * There is no PW_2/PG_2 on RKL.
+ *
+ * RKL PW_1/PG_1 domains (under HW/DMC control):
+ * - DBUF function (note: registers are in PW0)
+ * - PIPE_A and its planes and VDSC/joining, except VGA
+ * - transcoder A
+ * - DDI_A and DDI_B
+ * - FBC
+ *
+ * RKL PW_0/PG_0 domains (under HW/DMC control):
+ * - PCI
+ * - clocks except port PLL
+ * - shared functions:
+ * * interrupts except pipe interrupts
+ * * MBus except PIPE_MBUS_DBOX_CTL
+ * * DBUF registers
+ * - central power except FBC
+ * - top-level GTC (DDI-level GTC is in the well associated with the DDI)
+ */
+
+I915_DECL_PW_DOMAINS(rkl_pwdoms_dc_off,
+ RKL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc rkl_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &rkl_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_3", &rkl_pwdoms_pw_3,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .id = ICL_DISP_PW_3),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_4", &rkl_pwdoms_pw_4,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ .irq_pipe_mask = BIT(PIPE_C),
+ },
+};
+
+static const struct i915_power_well_desc rkl_power_wells_ddi_aux[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A),
+ I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1),
+ I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2),
+ ),
+ .ops = &icl_ddi_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B),
+ I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1),
+ I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list rkl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(rkl_power_wells_main),
+ I915_PW_DESCRIPTORS(rkl_power_wells_ddi_aux),
+};
+
+/*
+ * DG1 onwards Audio MMIO/VERBS lies in PG0 power well.
+ */
+#define DG1_PW_3_POWER_DOMAINS \
+ TGL_PW_4_POWER_DOMAINS, \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_USBC1, \
+ POWER_DOMAIN_AUX_USBC2
+
+I915_DECL_PW_DOMAINS(dg1_pwdoms_pw_3,
+ DG1_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(dg1_pwdoms_dc_off,
+ DG1_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(dg1_pwdoms_pw_2,
+ DG1_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_TRANSCODER_VDSC_PW2,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc dg1_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &dg1_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &dg1_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_3", &dg1_pwdoms_pw_3,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .id = ICL_DISP_PW_3),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_4", &tgl_pwdoms_pw_4,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ .irq_pipe_mask = BIT(PIPE_C),
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_5", &tgl_pwdoms_pw_5,
+ .hsw.idx = TGL_PW_CTL_IDX_PW_5),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ .irq_pipe_mask = BIT(PIPE_D),
+ },
+};
+
+static const struct i915_power_well_desc_list dg1_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(dg1_power_wells_main),
+ I915_PW_DESCRIPTORS(rkl_power_wells_ddi_aux),
+};
+
+/*
+ * XE_LPD Power Domains
+ *
+ * Previous platforms required that PG(n-1) be enabled before PG(n). That
+ * dependency chain turns into a dependency tree on XE_LPD:
+ *
+ * PG0
+ * |
+ * --PG1--
+ * / \
+ * PGA --PG2--
+ * / | \
+ * PGB PGC PGD
+ *
+ * Power wells must be enabled from top to bottom and disabled from bottom
+ * to top. This allows pipes to be power gated independently.
+ */
+
+#define XELPD_PW_D_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_D, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_D, \
+ POWER_DOMAIN_TRANSCODER_D
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_d,
+ XELPD_PW_D_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+#define XELPD_PW_C_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_C
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_c,
+ XELPD_PW_C_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+#define XELPD_PW_B_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_TRANSCODER_B
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_b,
+ XELPD_PW_B_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a,
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_INIT);
+
+#define XELPD_PW_2_POWER_DOMAINS \
+ XELPD_PW_B_POWER_DOMAINS, \
+ XELPD_PW_C_POWER_DOMAINS, \
+ XELPD_PW_D_POWER_DOMAINS, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_D, \
+ POWER_DOMAIN_PORT_DDI_LANES_E, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC3, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC4, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_C, \
+ POWER_DOMAIN_AUX_D, \
+ POWER_DOMAIN_AUX_E, \
+ POWER_DOMAIN_AUX_USBC1, \
+ POWER_DOMAIN_AUX_USBC2, \
+ POWER_DOMAIN_AUX_USBC3, \
+ POWER_DOMAIN_AUX_USBC4, \
+ POWER_DOMAIN_AUX_TBT1, \
+ POWER_DOMAIN_AUX_TBT2, \
+ POWER_DOMAIN_AUX_TBT3, \
+ POWER_DOMAIN_AUX_TBT4
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_2,
+ XELPD_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+/*
+ * XELPD PW_1/PG_1 domains (under HW/DMC control):
+ * - DBUF function (registers are in PW0)
+ * - Transcoder A
+ * - DDI_A and DDI_B
+ *
+ * XELPD PW_0/PW_1 domains (under HW/DMC control):
+ * - PCI
+ * - Clocks except port PLL
+ * - Shared functions:
+ * * interrupts except pipe interrupts
+ * * MBus except PIPE_MBUS_DBOX_CTL
+ * * DBUF registers
+ * - Central power except FBC
+ * - Top-level GTC (DDI-level GTC is in the well associated with the DDI)
+ */
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_dc_off,
+ XELPD_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc xelpd_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &xelpd_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &xelpd_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_A", &xelpd_pwdoms_pw_a,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_A),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_A),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_B", &xelpd_pwdoms_pw_b,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_B),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_C", &xelpd_pwdoms_pw_c,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_C),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_D", &xelpd_pwdoms_pw_d,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_D),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_D),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A),
+ I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C),
+ I915_PW("DDI_IO_D", &icl_pwdoms_ddi_io_d, .hsw.idx = XELPD_PW_CTL_IDX_DDI_D),
+ I915_PW("DDI_IO_E", &icl_pwdoms_ddi_io_e, .hsw.idx = XELPD_PW_CTL_IDX_DDI_E),
+ I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1),
+ I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2),
+ I915_PW("DDI_IO_TC3", &tgl_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3),
+ I915_PW("DDI_IO_TC4", &tgl_pwdoms_ddi_io_tc4, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4),
+ ),
+ .ops = &icl_ddi_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B),
+ I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C),
+ I915_PW("AUX_D", &icl_pwdoms_aux_d, .hsw.idx = XELPD_PW_CTL_IDX_AUX_D),
+ I915_PW("AUX_E", &icl_pwdoms_aux_e, .hsw.idx = XELPD_PW_CTL_IDX_AUX_E),
+ I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1),
+ I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2),
+ I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3),
+ I915_PW("AUX_USBC4", &tgl_pwdoms_aux_usbc4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ .fixed_enable_delay = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1),
+ I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2),
+ I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3),
+ I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ .is_tc_tbt = true,
+ },
+};
+
+static const struct i915_power_well_desc_list xelpd_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(xelpd_power_wells_main),
+};
+
+/*
+ * MTL is based on XELPD power domains with the exception of power gating for:
+ * - DDI_IO (moved to PLL logic)
+ * - AUX and AUX_IO functionality and register access for USBC1-4 (PICA always-on)
+ */
+#define XELPDP_PW_2_POWER_DOMAINS \
+ XELPD_PW_B_POWER_DOMAINS, \
+ XELPD_PW_C_POWER_DOMAINS, \
+ XELPD_PW_D_POWER_DOMAINS, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC3, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC4
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_pw_2,
+ XELPDP_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_dc_off,
+ XELPDP_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc1,
+ POWER_DOMAIN_AUX_USBC1,
+ POWER_DOMAIN_AUX_TBT1);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc2,
+ POWER_DOMAIN_AUX_USBC2,
+ POWER_DOMAIN_AUX_TBT2);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc3,
+ POWER_DOMAIN_AUX_USBC3,
+ POWER_DOMAIN_AUX_TBT3);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc4,
+ POWER_DOMAIN_AUX_USBC4,
+ POWER_DOMAIN_AUX_TBT4);
+
+static const struct i915_power_well_desc xelpdp_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &xelpdp_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &xelpdp_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_A", &xelpd_pwdoms_pw_a,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_A),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_A),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_B", &xelpd_pwdoms_pw_b,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_B),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_C", &xelpd_pwdoms_pw_c,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_C),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_D", &xelpd_pwdoms_pw_d,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_D),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_D),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
+ I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1),
+ I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2),
+ I915_PW("AUX_TC3", &xelpdp_pwdoms_aux_tc3, .xelpdp.aux_ch = AUX_CH_USBC3),
+ I915_PW("AUX_TC4", &xelpdp_pwdoms_aux_tc4, .xelpdp.aux_ch = AUX_CH_USBC4),
+ ),
+ .ops = &xelpdp_aux_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list xelpdp_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_main),
+};
+
+static void init_power_well_domains(const struct i915_power_well_instance *inst,
+ struct i915_power_well *power_well)
+{
+ int j;
+
+ if (!inst->domain_list)
+ return;
+
+ if (inst->domain_list->count == 0) {
+ bitmap_fill(power_well->domains.bits, POWER_DOMAIN_NUM);
+
+ return;
+ }
+
+ for (j = 0; j < inst->domain_list->count; j++)
+ set_bit(inst->domain_list->list[j], power_well->domains.bits);
+}
+
+#define for_each_power_well_instance_in_desc_list(_desc_list, _desc_count, _desc, _inst) \
+ for ((_desc) = (_desc_list); (_desc) - (_desc_list) < (_desc_count); (_desc)++) \
+ for ((_inst) = (_desc)->instances->list; \
+ (_inst) - (_desc)->instances->list < (_desc)->instances->count; \
+ (_inst)++)
+
+#define for_each_power_well_instance(_desc_list, _desc_count, _descs, _desc, _inst) \
+ for ((_descs) = (_desc_list); \
+ (_descs) - (_desc_list) < (_desc_count); \
+ (_descs)++) \
+ for_each_power_well_instance_in_desc_list((_descs)->list, (_descs)->count, \
+ (_desc), (_inst))
+
+static int
+__set_power_wells(struct i915_power_domains *power_domains,
+ const struct i915_power_well_desc_list *power_well_descs,
+ int power_well_descs_sz)
+{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ display.power.domains);
+ u64 power_well_ids = 0;
+ const struct i915_power_well_desc_list *desc_list;
+ const struct i915_power_well_desc *desc;
+ const struct i915_power_well_instance *inst;
+ int power_well_count = 0;
+ int plt_idx = 0;
+
+ for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc_list, desc, inst)
+ power_well_count++;
+
+ power_domains->power_well_count = power_well_count;
+ power_domains->power_wells =
+ kcalloc(power_well_count,
+ sizeof(*power_domains->power_wells),
+ GFP_KERNEL);
+ if (!power_domains->power_wells)
+ return -ENOMEM;
+
+ for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc_list, desc, inst) {
+ struct i915_power_well *pw = &power_domains->power_wells[plt_idx];
+ enum i915_power_well_id id = inst->id;
+
+ pw->desc = desc;
+ drm_WARN_ON(&i915->drm,
+ overflows_type(inst - desc->instances->list, pw->instance_idx));
+ pw->instance_idx = inst - desc->instances->list;
+
+ init_power_well_domains(inst, pw);
+
+ plt_idx++;
+
+ if (id == DISP_PW_ID_NONE)
+ continue;
+
+ drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
+ drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
+ power_well_ids |= BIT_ULL(id);
+ }
+
+ return 0;
+}
+
+#define set_power_wells(power_domains, __power_well_descs) \
+ __set_power_wells(power_domains, __power_well_descs, \
+ ARRAY_SIZE(__power_well_descs))
+
+/**
+ * intel_display_power_map_init - initialize power domain -> power well mappings
+ * @power_domains: power domain state
+ *
+ * Creates all the power wells for the current platform, initializes the
+ * dynamic state for them and initializes the mapping of each power well to
+ * all the power domains the power well belongs to.
+ */
+int intel_display_power_map_init(struct i915_power_domains *power_domains)
+{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ display.power.domains);
+ /*
+ * The enabling order will be from lower to higher indexed wells,
+ * the disabling order is reversed.
+ */
+ if (!HAS_DISPLAY(i915)) {
+ power_domains->power_well_count = 0;
+ return 0;
+ }
+
+ if (DISPLAY_VER(i915) >= 14)
+ return set_power_wells(power_domains, xelpdp_power_wells);
+ else if (DISPLAY_VER(i915) >= 13)
+ return set_power_wells(power_domains, xelpd_power_wells);
+ else if (IS_DG1(i915))
+ return set_power_wells(power_domains, dg1_power_wells);
+ else if (IS_ALDERLAKE_S(i915))
+ return set_power_wells(power_domains, adls_power_wells);
+ else if (IS_ROCKETLAKE(i915))
+ return set_power_wells(power_domains, rkl_power_wells);
+ else if (DISPLAY_VER(i915) == 12)
+ return set_power_wells(power_domains, tgl_power_wells);
+ else if (DISPLAY_VER(i915) == 11)
+ return set_power_wells(power_domains, icl_power_wells);
+ else if (IS_GEMINILAKE(i915))
+ return set_power_wells(power_domains, glk_power_wells);
+ else if (IS_BROXTON(i915))
+ return set_power_wells(power_domains, bxt_power_wells);
+ else if (DISPLAY_VER(i915) == 9)
+ return set_power_wells(power_domains, skl_power_wells);
+ else if (IS_CHERRYVIEW(i915))
+ return set_power_wells(power_domains, chv_power_wells);
+ else if (IS_BROADWELL(i915))
+ return set_power_wells(power_domains, bdw_power_wells);
+ else if (IS_HASWELL(i915))
+ return set_power_wells(power_domains, hsw_power_wells);
+ else if (IS_VALLEYVIEW(i915))
+ return set_power_wells(power_domains, vlv_power_wells);
+ else if (IS_I830(i915))
+ return set_power_wells(power_domains, i830_power_wells);
+ else
+ return set_power_wells(power_domains, i9xx_power_wells);
+}
+
+/**
+ * intel_display_power_map_cleanup - clean up power domain -> power well mappings
+ * @power_domains: power domain state
+ *
+ * Cleans up all the state that was initialized by intel_display_power_map_init().
+ */
+void intel_display_power_map_cleanup(struct i915_power_domains *power_domains)
+{
+ kfree(power_domains->power_wells);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.h b/drivers/gpu/drm/i915/display/intel_display_power_map.h
new file mode 100644
index 000000000..da8f7055a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_POWER_MAP_H__
+#define __INTEL_DISPLAY_POWER_MAP_H__
+
+struct i915_power_domains;
+
+int intel_display_power_map_init(struct i915_power_domains *power_domains);
+void intel_display_power_map_cleanup(struct i915_power_domains *power_domains);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
new file mode 100644
index 000000000..1d18eee56
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -0,0 +1,1956 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "intel_backlight_regs.h"
+#include "intel_combo_phy.h"
+#include "intel_combo_phy_regs.h"
+#include "intel_crt.h"
+#include "intel_de.h"
+#include "intel_display_power_well.h"
+#include "intel_display_types.h"
+#include "intel_dkl_phy.h"
+#include "intel_dmc.h"
+#include "intel_dpio_phy.h"
+#include "intel_dpll.h"
+#include "intel_hotplug.h"
+#include "intel_pcode.h"
+#include "intel_pps.h"
+#include "intel_tc.h"
+#include "intel_vga.h"
+#include "skl_watermark.h"
+#include "vlv_sideband.h"
+#include "vlv_sideband_reg.h"
+
+struct i915_power_well_regs {
+ i915_reg_t bios;
+ i915_reg_t driver;
+ i915_reg_t kvmr;
+ i915_reg_t debug;
+};
+
+struct i915_power_well_ops {
+ const struct i915_power_well_regs *regs;
+ /*
+ * Synchronize the well's hw state to match the current sw state, for
+ * example enable/disable it based on the current refcount. Called
+ * during driver init and resume time, possibly after first calling
+ * the enable/disable handlers.
+ */
+ void (*sync_hw)(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+ /*
+ * Enable the well and resources that depend on it (for example
+ * interrupts located on the well). Called after the 0->1 refcount
+ * transition.
+ */
+ void (*enable)(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+ /*
+ * Disable the well and resources that depend on it. Called after
+ * the 1->0 refcount transition.
+ */
+ void (*disable)(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+ /* Returns the hw enabled state. */
+ bool (*is_enabled)(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+};
+
+static const struct i915_power_well_instance *
+i915_power_well_instance(const struct i915_power_well *power_well)
+{
+ return &power_well->desc->instances->list[power_well->instance_idx];
+}
+
+struct i915_power_well *
+lookup_power_well(struct drm_i915_private *i915,
+ enum i915_power_well_id power_well_id)
+{
+ struct i915_power_well *power_well;
+
+ for_each_power_well(i915, power_well)
+ if (i915_power_well_instance(power_well)->id == power_well_id)
+ return power_well;
+
+ /*
+ * It's not feasible to add error checking code to the callers since
+ * this condition really shouldn't happen and it doesn't even make sense
+ * to abort things like display initialization sequences. Just return
+ * the first power well and hope the WARN gets reported so we can fix
+ * our driver.
+ */
+ drm_WARN(&i915->drm, 1,
+ "Power well %d not defined for this platform\n",
+ power_well_id);
+ return &i915->display.power.domains.power_wells[0];
+}
+
+void intel_power_well_enable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ drm_dbg_kms(&i915->drm, "enabling %s\n", intel_power_well_name(power_well));
+ power_well->desc->ops->enable(i915, power_well);
+ power_well->hw_enabled = true;
+}
+
+void intel_power_well_disable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ drm_dbg_kms(&i915->drm, "disabling %s\n", intel_power_well_name(power_well));
+ power_well->hw_enabled = false;
+ power_well->desc->ops->disable(i915, power_well);
+}
+
+void intel_power_well_sync_hw(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ power_well->desc->ops->sync_hw(i915, power_well);
+ power_well->hw_enabled =
+ power_well->desc->ops->is_enabled(i915, power_well);
+}
+
+void intel_power_well_get(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ if (!power_well->count++)
+ intel_power_well_enable(i915, power_well);
+}
+
+void intel_power_well_put(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ drm_WARN(&i915->drm, !power_well->count,
+ "Use count on power well %s is already zero",
+ i915_power_well_instance(power_well)->name);
+
+ if (!--power_well->count)
+ intel_power_well_disable(i915, power_well);
+}
+
+bool intel_power_well_is_enabled(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ return power_well->desc->ops->is_enabled(i915, power_well);
+}
+
+bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well)
+{
+ return power_well->hw_enabled;
+}
+
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id power_well_id)
+{
+ struct i915_power_well *power_well;
+
+ power_well = lookup_power_well(dev_priv, power_well_id);
+
+ return intel_power_well_is_enabled(dev_priv, power_well);
+}
+
+bool intel_power_well_is_always_on(struct i915_power_well *power_well)
+{
+ return power_well->desc->always_on;
+}
+
+const char *intel_power_well_name(struct i915_power_well *power_well)
+{
+ return i915_power_well_instance(power_well)->name;
+}
+
+struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well)
+{
+ return &power_well->domains;
+}
+
+int intel_power_well_refcount(struct i915_power_well *power_well)
+{
+ return power_well->count;
+}
+
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
+ u8 irq_pipe_mask, bool has_vga)
+{
+ if (has_vga)
+ intel_vga_reset_io_mem(dev_priv);
+
+ if (irq_pipe_mask)
+ gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
+}
+
+static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
+ u8 irq_pipe_mask)
+{
+ if (irq_pipe_mask)
+ gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
+}
+
+#define ICL_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+
+#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
+
+static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
+{
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+
+ return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
+ ICL_AUX_PW_TO_CH(pw_idx);
+}
+
+static struct intel_digital_port *
+aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
+ enum aux_ch aux_ch)
+{
+ struct intel_digital_port *dig_port = NULL;
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ /* We'll check the MST primary port */
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ dig_port = enc_to_dig_port(encoder);
+ if (!dig_port)
+ continue;
+
+ if (dig_port->aux_ch != aux_ch) {
+ dig_port = NULL;
+ continue;
+ }
+
+ break;
+ }
+
+ return dig_port;
+}
+
+static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
+ const struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
+
+ return intel_port_to_phy(i915, dig_port->base.port);
+}
+
+static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ bool timeout_expected)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+
+ /*
+ * For some power wells we're not supposed to watch the status bit for
+ * an ack, but rather just wait a fixed amount of time and then
+ * proceed. This is only used on DG2.
+ */
+ if (IS_DG2(dev_priv) && power_well->desc->fixed_enable_delay) {
+ usleep_range(600, 1200);
+ return;
+ }
+
+ /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
+ if (intel_de_wait_for_set(dev_priv, regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
+ drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
+ intel_power_well_name(power_well));
+
+ drm_WARN_ON(&dev_priv->drm, !timeout_expected);
+
+ }
+}
+
+static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
+ const struct i915_power_well_regs *regs,
+ int pw_idx)
+{
+ u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
+ u32 ret;
+
+ ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
+ ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
+ if (regs->kvmr.reg)
+ ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
+ ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
+
+ return ret;
+}
+
+static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ bool disabled;
+ u32 reqs;
+
+ /*
+ * Bspec doesn't require waiting for PWs to get disabled, but still do
+ * this for paranoia. The known cases where a PW will be forced on:
+ * - a KVMR request on any power well via the KVMR request register
+ * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
+ * DEBUG request registers
+ * Skip the wait in case any of the request bits are set and print a
+ * diagnostic message.
+ */
+ wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
+ HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
+ (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
+ if (disabled)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
+ intel_power_well_name(power_well),
+ !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
+}
+
+static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
+ enum skl_power_gate pg)
+{
+ /* Timeout 5us for PG#0, for other PGs 1us */
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
+}
+
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ u32 val;
+
+ if (power_well->desc->has_fuses) {
+ enum skl_power_gate pg;
+
+ pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ SKL_PW_CTL_IDX_TO_PG(pw_idx);
+
+ /* Wa_16013190616:adlp */
+ if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1)
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
+
+ /*
+ * For PW1 we have to wait both for the PW0/PG0 fuse state
+ * before enabling the power well and PW1/PG1's own fuse
+ * state after the enabling. For all other power wells with
+ * fuses we only have to wait for that PW/PG's fuse state
+ * after the enabling.
+ */
+ if (pg == SKL_PG1)
+ gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
+ }
+
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well, false);
+
+ if (power_well->desc->has_fuses) {
+ enum skl_power_gate pg;
+
+ pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ SKL_PW_CTL_IDX_TO_PG(pw_idx);
+ gen9_wait_for_power_well_fuses(dev_priv, pg);
+ }
+
+ hsw_power_well_post_enable(dev_priv,
+ power_well->desc->irq_pipe_mask,
+ power_well->desc->has_vga);
+}
+
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ u32 val;
+
+ hsw_power_well_pre_disable(dev_priv,
+ power_well->desc->irq_pipe_mask);
+
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
+static void
+icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+ u32 val;
+
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
+
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+ if (DISPLAY_VER(dev_priv) < 12) {
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
+ intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
+ val | ICL_LANE_ENABLE_AUX);
+ }
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well, false);
+
+ /* Display WA #1178: icl */
+ if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
+ !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
+ val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
+ val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
+ intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
+ }
+}
+
+static void
+icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+ u32 val;
+
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
+
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
+ intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
+ val & ~ICL_LANE_ENABLE_AUX);
+
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+ hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ struct intel_digital_port *dig_port)
+{
+ if (drm_WARN_ON(&dev_priv->drm, !dig_port))
+ return;
+
+ if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
+ return;
+
+ drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
+}
+
+#else
+
+static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ struct intel_digital_port *dig_port)
+{
+}
+
+#endif
+
+#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
+
+static void icl_tc_cold_exit(struct drm_i915_private *i915)
+{
+ int ret, tries = 0;
+
+ while (1) {
+ ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0,
+ 250, 1);
+ if (ret != -EAGAIN || ++tries == 3)
+ break;
+ msleep(1);
+ }
+
+ /* Spec states that TC cold exit can take up to 1ms to complete */
+ if (!ret)
+ msleep(1);
+
+ /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
+ drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
+ "succeeded");
+}
+
+static void
+icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ bool is_tbt = power_well->desc->is_tc_tbt;
+ bool timeout_expected;
+ u32 val;
+
+ icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
+
+ val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
+ val &= ~DP_AUX_CH_CTL_TBT_IO;
+ if (is_tbt)
+ val |= DP_AUX_CH_CTL_TBT_IO;
+ intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
+
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
+
+ /*
+ * An AUX timeout is expected if the TBT DP tunnel is down,
+ * or need to enable AUX on a legacy TypeC port as part of the TC-cold
+ * exit sequence.
+ */
+ timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
+ if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
+ icl_tc_cold_exit(dev_priv);
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
+
+ if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
+ enum tc_port tc_port;
+
+ tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
+
+ if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port), 2) &
+ DKL_CMN_UC_DW27_UC_HEALTH, 1))
+ drm_warn(&dev_priv->drm,
+ "Timeout waiting TC uC health\n");
+ }
+}
+
+static void
+icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+
+ if (intel_phy_is_tc(dev_priv, phy))
+ return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
+ else if (IS_ICELAKE(dev_priv))
+ return icl_combo_phy_aux_power_well_enable(dev_priv,
+ power_well);
+ else
+ return hsw_power_well_enable(dev_priv, power_well);
+}
+
+static void
+icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+
+ if (intel_phy_is_tc(dev_priv, phy))
+ return hsw_power_well_disable(dev_priv, power_well);
+ else if (IS_ICELAKE(dev_priv))
+ return icl_combo_phy_aux_power_well_disable(dev_priv,
+ power_well);
+ else
+ return hsw_power_well_disable(dev_priv, power_well);
+}
+
+/*
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
+ HSW_PWR_WELL_CTL_STATE(pw_idx);
+ u32 val;
+
+ val = intel_de_read(dev_priv, regs->driver);
+
+ /*
+ * On GEN9 big core due to a DMC bug the driver's request bits for PW1
+ * and the MISC_IO PW will be not restored, so check instead for the
+ * BIOS's own request bits, which are forced-on for these power wells
+ * when exiting DC5/6.
+ */
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
+ (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
+ val |= intel_de_read(dev_priv, regs->bios);
+
+ return (val & mask) == mask;
+}
+
+static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
+{
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
+ "DC9 already programmed to be enabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled to enable DC9.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
+ HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
+ "Power well 2 on.\n");
+ drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+
+ /*
+ * TODO: check for the following to verify the conditions to enter DC9
+ * state are satisfied:
+ * 1] Check relevant display engine registers to verify if mode set
+ * disable sequence was followed.
+ * 2] Check if display uninitialize sequence is initialized.
+ */
+}
+
+static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
+{
+ drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled.\n");
+
+ /*
+ * TODO: check for the following to verify DC9 state was indeed
+ * entered before programming to disable it:
+ * 1] Check relevant display engine registers to verify if mode
+ * set disable sequence was followed.
+ * 2] Check if display uninitialize sequence is initialized.
+ */
+}
+
+static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
+ u32 state)
+{
+ int rewrites = 0;
+ int rereads = 0;
+ u32 v;
+
+ intel_de_write(dev_priv, DC_STATE_EN, state);
+
+ /* It has been observed that disabling the dc6 state sometimes
+ * doesn't stick and dmc keeps returning old value. Make sure
+ * the write really sticks enough times and also force rewrite until
+ * we are confident that state is exactly what we want.
+ */
+ do {
+ v = intel_de_read(dev_priv, DC_STATE_EN);
+
+ if (v != state) {
+ intel_de_write(dev_priv, DC_STATE_EN, state);
+ rewrites++;
+ rereads = 0;
+ } else if (rereads++ > 5) {
+ break;
+ }
+
+ } while (rewrites < 100);
+
+ if (v != state)
+ drm_err(&dev_priv->drm,
+ "Writing dc state to 0x%x failed, now 0x%x\n",
+ state, v);
+
+ /* Most of the times we need one retry, avoid spam */
+ if (rewrites > 1)
+ drm_dbg_kms(&dev_priv->drm,
+ "Rewrote dc state to 0x%x %d times\n",
+ state, rewrites);
+}
+
+static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
+{
+ u32 mask;
+
+ mask = DC_STATE_EN_UPTO_DC5;
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
+ | DC_STATE_EN_DC9;
+ else if (DISPLAY_VER(dev_priv) == 11)
+ mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ mask |= DC_STATE_EN_DC9;
+ else
+ mask |= DC_STATE_EN_UPTO_DC6;
+
+ return mask;
+}
+
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Resetting DC state tracking from %02x to %02x\n",
+ dev_priv->display.dmc.dc_state, val);
+ dev_priv->display.dmc.dc_state = val;
+}
+
+/**
+ * gen9_set_dc_state - set target display C power state
+ * @dev_priv: i915 device instance
+ * @state: target DC power state
+ * - DC_STATE_DISABLE
+ * - DC_STATE_EN_UPTO_DC5
+ * - DC_STATE_EN_UPTO_DC6
+ * - DC_STATE_EN_DC9
+ *
+ * Signal to DMC firmware/HW the target DC power state passed in @state.
+ * DMC/HW can turn off individual display clocks and power rails when entering
+ * a deeper DC power state (higher in number) and turns these back when exiting
+ * that state to a shallower power state (lower in number). The HW will decide
+ * when to actually enter a given state on an on-demand basis, for instance
+ * depending on the active state of display pipes. The state of display
+ * registers backed by affected power rails are saved/restored as needed.
+ *
+ * Based on the above enabling a deeper DC power state is asynchronous wrt.
+ * enabling it. Disabling a deeper power state is synchronous: for instance
+ * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
+ * back on and register state is restored. This is guaranteed by the MMIO write
+ * to DC_STATE_EN blocking until the state is restored.
+ */
+void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
+{
+ u32 val;
+ u32 mask;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ state & ~dev_priv->display.dmc.allowed_dc_mask))
+ state &= dev_priv->display.dmc.allowed_dc_mask;
+
+ val = intel_de_read(dev_priv, DC_STATE_EN);
+ mask = gen9_dc_mask(dev_priv);
+ drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
+ val & mask, state);
+
+ /* Check if DMC is ignoring our DC state requests */
+ if ((val & mask) != dev_priv->display.dmc.dc_state)
+ drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
+ dev_priv->display.dmc.dc_state, val & mask);
+
+ val &= ~mask;
+ val |= state;
+
+ gen9_write_dc_state(dev_priv, val);
+
+ dev_priv->display.dmc.dc_state = val & mask;
+}
+
+static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
+{
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+}
+
+static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
+ val = intel_de_read(dev_priv, DC_STATE_EN);
+ val &= ~DC_STATE_DC3CO_STATUS;
+ intel_de_write(dev_priv, DC_STATE_EN, val);
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ /*
+ * Delay of 200us DC3CO Exit time B.Spec 49196
+ */
+ usleep_range(200, 210);
+}
+
+static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
+{
+ enum i915_power_well_id high_pg;
+
+ /* Power wells at this level and above must be disabled for DC5 entry */
+ if (DISPLAY_VER(dev_priv) == 12)
+ high_pg = ICL_DISP_PW_3;
+ else
+ high_pg = SKL_DISP_PW_2;
+
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_display_power_well_is_enabled(dev_priv, high_pg),
+ "Power wells above platform's DC5 limit still enabled.\n");
+
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5),
+ "DC5 already programmed to be enabled.\n");
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+
+ assert_dmc_loaded(dev_priv);
+}
+
+void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+{
+ assert_can_enable_dc5(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
+ intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
+
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
+}
+
+static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
+{
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Backlight is not disabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC6),
+ "DC6 already programmed to be enabled.\n");
+
+ assert_dmc_loaded(dev_priv);
+}
+
+void skl_enable_dc6(struct drm_i915_private *dev_priv)
+{
+ assert_can_enable_dc6(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
+ intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
+
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+}
+
+void bxt_enable_dc9(struct drm_i915_private *dev_priv)
+{
+ assert_can_enable_dc9(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
+ /*
+ * Power sequencer reset is not needed on
+ * platforms with South Display Engine on PCH,
+ * because PPS registers are always on.
+ */
+ if (!HAS_PCH_SPLIT(dev_priv))
+ intel_pps_reset_all(dev_priv);
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
+}
+
+void bxt_disable_dc9(struct drm_i915_private *dev_priv)
+{
+ assert_can_disable_dc9(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ intel_pps_unlock_regs_wa(dev_priv);
+}
+
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
+ u32 bios_req = intel_de_read(dev_priv, regs->bios);
+
+ /* Take over the request bit if set by BIOS. */
+ if (bios_req & mask) {
+ u32 drv_req = intel_de_read(dev_priv, regs->driver);
+
+ if (!(drv_req & mask))
+ intel_de_write(dev_priv, regs->driver, drv_req | mask);
+ intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
+ }
+}
+
+static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bxt_ddi_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+}
+
+static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bxt_ddi_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+}
+
+static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return bxt_ddi_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+}
+
+static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *power_well;
+
+ power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
+ if (intel_power_well_refcount(power_well) > 0)
+ bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+
+ power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ if (intel_power_well_refcount(power_well) > 0)
+ bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ power_well = lookup_power_well(dev_priv,
+ GLK_DISP_PW_DPIO_CMN_C);
+ if (intel_power_well_refcount(power_well) > 0)
+ bxt_ddi_phy_verify_state(dev_priv,
+ i915_power_well_instance(power_well)->bxt.phy);
+ }
+}
+
+static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
+ (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
+}
+
+static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
+{
+ u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
+ u8 enabled_dbuf_slices = dev_priv->display.dbuf.enabled_slices;
+
+ drm_WARN(&dev_priv->drm,
+ hw_enabled_dbuf_slices != enabled_dbuf_slices,
+ "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
+ hw_enabled_dbuf_slices,
+ enabled_dbuf_slices);
+}
+
+void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_config cdclk_config = {};
+
+ if (dev_priv->display.dmc.target_dc_state == DC_STATE_EN_DC3CO) {
+ tgl_disable_dc3co(dev_priv);
+ return;
+ }
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
+ /* Can't read out voltage_level so can't use intel_cdclk_changed() */
+ drm_WARN_ON(&dev_priv->drm,
+ intel_cdclk_needs_modeset(&dev_priv->display.cdclk.hw,
+ &cdclk_config));
+
+ gen9_assert_dbuf_enabled(dev_priv);
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ bxt_verify_ddi_phy_power_wells(dev_priv);
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ /*
+ * DMC retains HW context only for port A, the other combo
+ * PHY's HW context for port B is lost after DC transitions,
+ * so we need to restore it manually.
+ */
+ intel_combo_phy_init(dev_priv);
+}
+
+static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ gen9_disable_dc_states(dev_priv);
+}
+
+static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (!intel_dmc_has_payload(dev_priv))
+ return;
+
+ switch (dev_priv->display.dmc.target_dc_state) {
+ case DC_STATE_EN_DC3CO:
+ tgl_enable_dc3co(dev_priv);
+ break;
+ case DC_STATE_EN_UPTO_DC6:
+ skl_enable_dc6(dev_priv);
+ break;
+ case DC_STATE_EN_UPTO_DC5:
+ gen9_enable_dc5(dev_priv);
+ break;
+ }
+}
+
+static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return true;
+}
+
+static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
+ i830_enable_pipe(dev_priv, PIPE_A);
+ if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
+ i830_enable_pipe(dev_priv, PIPE_B);
+}
+
+static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ i830_disable_pipe(dev_priv, PIPE_B);
+ i830_disable_pipe(dev_priv, PIPE_A);
+}
+
+static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
+ intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+}
+
+static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (intel_power_well_refcount(power_well) > 0)
+ i830_pipes_power_well_enable(dev_priv, power_well);
+ else
+ i830_pipes_power_well_disable(dev_priv, power_well);
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(pw_idx);
+ state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
+ PUNIT_PWRGT_PWR_GATE(pw_idx);
+
+ vlv_punit_get(dev_priv);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+ ctrl &= ~mask;
+ ctrl |= state;
+ vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+ if (wait_for(COND, 100))
+ drm_err(&dev_priv->drm,
+ "timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+ vlv_punit_put(dev_priv);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
+ bool enabled = false;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(pw_idx);
+ ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
+
+ vlv_punit_get(dev_priv);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+ state != PUNIT_PWRGT_PWR_GATE(pw_idx));
+ if (state == ctrl)
+ enabled = true;
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+ drm_WARN_ON(&dev_priv->drm, ctrl != state);
+
+ vlv_punit_put(dev_priv);
+
+ return enabled;
+}
+
+static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ /*
+ * On driver load, a pipe may be active and driving a DSI display.
+ * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
+ * (and never recovering) in this case. intel_dsi_post_disable() will
+ * clear it when we turn off the display.
+ */
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
+ val &= DPOUNIT_CLOCK_GATE_DISABLE;
+ val |= VRHUNIT_CLOCK_GATE_DISABLE;
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
+
+ /*
+ * Disable trickle feed and enable pnd deadline calculation
+ */
+ intel_de_write(dev_priv, MI_ARB_VLV,
+ MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ intel_de_write(dev_priv, CBR1_VLV, 0);
+
+ drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
+ intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
+ DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
+ 1000));
+}
+
+static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ enum pipe pipe;
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection. Supposedly DSI also
+ * needs the ref clock up and running.
+ *
+ * CHV DPLL B/C have some issues if VGA mode is enabled.
+ */
+ for_each_pipe(dev_priv, pipe) {
+ u32 val = intel_de_read(dev_priv, DPLL(pipe));
+
+ val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ intel_de_write(dev_priv, DPLL(pipe), val);
+ }
+
+ vlv_init_display_clock_gating(dev_priv);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /*
+ * During driver initialization/resume we can avoid restoring the
+ * part of the HW/SW state that will be inited anyway explicitly.
+ */
+ if (dev_priv->display.power.domains.initializing)
+ return;
+
+ intel_hpd_init(dev_priv);
+ intel_hpd_poll_disable(dev_priv);
+
+ /* Re-enable the ADPA, if we have one */
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ if (encoder->type == INTEL_OUTPUT_ANALOG)
+ intel_crt_reset(&encoder->base);
+ }
+
+ intel_vga_redisable_power_on(dev_priv);
+
+ intel_pps_unlock_regs_wa(dev_priv);
+}
+
+static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* make sure we're done processing display irqs */
+ intel_synchronize_irq(dev_priv);
+
+ intel_pps_reset_all(dev_priv);
+
+ /* Prevent us from re-enabling polling on accident in late suspend */
+ if (!dev_priv->drm.dev->power.is_suspended)
+ intel_hpd_poll_enable(dev_priv);
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ vlv_display_power_well_init(dev_priv);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_display_power_well_deinit(dev_priv);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ /* since ref/cri clock was enabled */
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all
+ * be set to 0.
+ *
+ * This should only be done on init and resume from S3 with
+ * both PLLs disabled, or we risk losing DPIO and PLL
+ * synchronization.
+ */
+ intel_de_write(dev_priv, DPIO_CTL,
+ intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe)
+ assert_pll_disabled(dev_priv, pipe);
+
+ /* Assert common reset */
+ intel_de_write(dev_priv, DPIO_CTL,
+ intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
+
+static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn_bc =
+ lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ struct i915_power_well *cmn_d =
+ lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+ u32 phy_control = dev_priv->display.power.chv_phy_control;
+ u32 phy_status = 0;
+ u32 phy_status_mask = 0xffffffff;
+
+ /*
+ * The BIOS can leave the PHY is some weird state
+ * where it doesn't fully power down some parts.
+ * Disable the asserts until the PHY has been fully
+ * reset (ie. the power well has been disabled at
+ * least once).
+ */
+ if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY0])
+ phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
+ PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
+
+ if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY1])
+ phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
+
+ if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
+ phy_status |= PHY_POWERGOOD(DPIO_PHY0);
+
+ /* this assumes override is only used to enable lanes */
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
+
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
+
+ /* CL1 is on whenever anything is on in either channel */
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
+
+ /*
+ * The DPLLB check accounts for the pipe B + port A usage
+ * with CL2 powered up but all the lanes in the second channel
+ * powered down.
+ */
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
+ (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
+ }
+
+ if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
+ phy_status |= PHY_POWERGOOD(DPIO_PHY1);
+
+ /* this assumes override is only used to enable lanes */
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
+ }
+
+ phy_status &= phy_status_mask;
+
+ /*
+ * The PHY may be busy with some initial calibration and whatnot,
+ * so the power state can take a while to actually change.
+ */
+ if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
+ phy_status_mask, phy_status, 10))
+ drm_err(&dev_priv->drm,
+ "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+ intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
+ phy_status, dev_priv->display.power.chv_phy_control);
+}
+
+#undef BITS_SET
+
+static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
+ enum dpio_phy phy;
+ enum pipe pipe;
+ u32 tmp;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ id != VLV_DISP_PW_DPIO_CMN_BC &&
+ id != CHV_DISP_PW_DPIO_CMN_D);
+
+ if (id == VLV_DISP_PW_DPIO_CMN_BC) {
+ pipe = PIPE_A;
+ phy = DPIO_PHY0;
+ } else {
+ pipe = PIPE_C;
+ phy = DPIO_PHY1;
+ }
+
+ /* since ref/cri clock was enabled */
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /* Poll for phypwrgood signal */
+ if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
+ PHY_POWERGOOD(phy), 1))
+ drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
+ phy);
+
+ vlv_dpio_get(dev_priv);
+
+ /* Enable dynamic power down */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
+ tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
+ DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
+
+ if (id == VLV_DISP_PW_DPIO_CMN_BC) {
+ tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
+ tmp |= DPIO_DYNPWRDOWNEN_CH1;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
+ } else {
+ /*
+ * Force the non-existing CL2 off. BXT does this
+ * too, so maybe it saves some power even though
+ * CL2 doesn't exist?
+ */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+ tmp |= DPIO_CL2_LDOFUSE_PWRENB;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
+ }
+
+ vlv_dpio_put(dev_priv);
+
+ dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->display.power.chv_phy_control);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->display.power.chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+}
+
+static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
+ enum dpio_phy phy;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ id != VLV_DISP_PW_DPIO_CMN_BC &&
+ id != CHV_DISP_PW_DPIO_CMN_D);
+
+ if (id == VLV_DISP_PW_DPIO_CMN_BC) {
+ phy = DPIO_PHY0;
+ assert_pll_disabled(dev_priv, PIPE_A);
+ assert_pll_disabled(dev_priv, PIPE_B);
+ } else {
+ phy = DPIO_PHY1;
+ assert_pll_disabled(dev_priv, PIPE_C);
+ }
+
+ dev_priv->display.power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->display.power.chv_phy_control);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->display.power.chv_phy_control);
+
+ /* PHY is fully reset now, so we can enable the PHY state asserts */
+ dev_priv->display.power.chv_phy_assert[phy] = true;
+
+ assert_chv_phy_status(dev_priv);
+}
+
+static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override, unsigned int mask)
+{
+ enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
+ u32 reg, val, expected, actual;
+
+ /*
+ * The BIOS can leave the PHY is some weird state
+ * where it doesn't fully power down some parts.
+ * Disable the asserts until the PHY has been fully
+ * reset (ie. the power well has been disabled at
+ * least once).
+ */
+ if (!dev_priv->display.power.chv_phy_assert[phy])
+ return;
+
+ if (ch == DPIO_CH0)
+ reg = _CHV_CMN_DW0_CH0;
+ else
+ reg = _CHV_CMN_DW6_CH1;
+
+ vlv_dpio_get(dev_priv);
+ val = vlv_dpio_read(dev_priv, pipe, reg);
+ vlv_dpio_put(dev_priv);
+
+ /*
+ * This assumes !override is only used when the port is disabled.
+ * All lanes should power down even without the override when
+ * the port is disabled.
+ */
+ if (!override || mask == 0xf) {
+ expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+ /*
+ * If CH1 common lane is not active anymore
+ * (eg. for pipe B DPLL) the entire channel will
+ * shut down, which causes the common lane registers
+ * to read as 0. That means we can't actually check
+ * the lane power down status bits, but as the entire
+ * register reads as 0 it's a good indication that the
+ * channel is indeed entirely powered down.
+ */
+ if (ch == DPIO_CH1 && val == 0)
+ expected = 0;
+ } else if (mask != 0x0) {
+ expected = DPIO_ANYDL_POWERDOWN;
+ } else {
+ expected = 0;
+ }
+
+ if (ch == DPIO_CH0)
+ actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
+ else
+ actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
+ actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+
+ drm_WARN(&dev_priv->drm, actual != expected,
+ "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
+ !!(actual & DPIO_ALLDL_POWERDOWN),
+ !!(actual & DPIO_ANYDL_POWERDOWN),
+ !!(expected & DPIO_ALLDL_POWERDOWN),
+ !!(expected & DPIO_ANYDL_POWERDOWN),
+ reg, val);
+}
+
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override)
+{
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ bool was_override;
+
+ mutex_lock(&power_domains->lock);
+
+ was_override = dev_priv->display.power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ if (override == was_override)
+ goto out;
+
+ if (override)
+ dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ else
+ dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->display.power.chv_phy_control);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
+ phy, ch, dev_priv->display.power.chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+
+out:
+ mutex_unlock(&power_domains->lock);
+
+ return was_override;
+}
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+ bool override, unsigned int mask)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
+ enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
+
+ mutex_lock(&power_domains->lock);
+
+ dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
+ dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
+
+ if (override)
+ dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ else
+ dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->display.power.chv_phy_control);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
+ phy, ch, mask, dev_priv->display.power.chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+
+ assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
+
+ mutex_unlock(&power_domains->lock);
+}
+
+static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe = PIPE_A;
+ bool enabled;
+ u32 state, ctrl;
+
+ vlv_punit_get(dev_priv);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
+ state != DP_SSS_PWR_GATE(pipe));
+ enabled = state == DP_SSS_PWR_ON(pipe);
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
+ drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
+
+ vlv_punit_put(dev_priv);
+
+ return enabled;
+}
+
+static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ bool enable)
+{
+ enum pipe pipe = PIPE_A;
+ u32 state;
+ u32 ctrl;
+
+ state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
+
+ vlv_punit_get(dev_priv);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ ctrl &= ~DP_SSC_MASK(pipe);
+ ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
+
+ if (wait_for(COND, 100))
+ drm_err(&dev_priv->drm,
+ "timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
+
+#undef COND
+
+out:
+ vlv_punit_put(dev_priv);
+}
+
+static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->display.power.chv_phy_control);
+}
+
+static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ chv_set_pipe_power_well(dev_priv, power_well, true);
+
+ vlv_display_power_well_init(dev_priv);
+}
+
+static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_display_power_well_deinit(dev_priv);
+
+ chv_set_pipe_power_well(dev_priv, power_well, false);
+}
+
+static void
+tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
+{
+ u8 tries = 0;
+ int ret;
+
+ while (1) {
+ u32 low_val;
+ u32 high_val = 0;
+
+ if (block)
+ low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
+ else
+ low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
+
+ /*
+ * Spec states that we should timeout the request after 200us
+ * but the function below will timeout after 500us
+ */
+ ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val);
+ if (ret == 0) {
+ if (block &&
+ (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
+ ret = -EIO;
+ else
+ break;
+ }
+
+ if (++tries == 3)
+ break;
+
+ msleep(1);
+ }
+
+ if (ret)
+ drm_err(&i915->drm, "TC cold %sblock failed\n",
+ block ? "" : "un");
+ else
+ drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
+ block ? "" : "un");
+}
+
+static void
+tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ tgl_tc_cold_request(i915, true);
+}
+
+static void
+tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ tgl_tc_cold_request(i915, false);
+}
+
+static void
+tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ if (intel_power_well_refcount(power_well) > 0)
+ tgl_tc_cold_off_power_well_enable(i915, power_well);
+ else
+ tgl_tc_cold_off_power_well_disable(i915, power_well);
+}
+
+static bool
+tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ /*
+ * Not the correctly implementation but there is no way to just read it
+ * from PCODE, so returning count to avoid state mismatch errors
+ */
+ return intel_power_well_refcount(power_well);
+}
+
+static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+
+ intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
+ XELPDP_DP_AUX_CH_CTL_POWER_REQUEST);
+
+ /*
+ * The power status flag cannot be used to determine whether aux
+ * power wells have finished powering up. Instead we're
+ * expected to just wait a fixed 600us after raising the request
+ * bit.
+ */
+ usleep_range(600, 1200);
+}
+
+static void xelpdp_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+
+ intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
+ 0);
+ usleep_range(10, 30);
+}
+
+static bool xelpdp_aux_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+
+ return intel_de_read(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch)) &
+ XELPDP_DP_AUX_CH_CTL_POWER_STATUS;
+}
+
+const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = i9xx_always_on_power_well_noop,
+ .disable = i9xx_always_on_power_well_noop,
+ .is_enabled = i9xx_always_on_power_well_enabled,
+};
+
+const struct i915_power_well_ops chv_pipe_power_well_ops = {
+ .sync_hw = chv_pipe_power_well_sync_hw,
+ .enable = chv_pipe_power_well_enable,
+ .disable = chv_pipe_power_well_disable,
+ .is_enabled = chv_pipe_power_well_enabled,
+};
+
+const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = chv_dpio_cmn_power_well_enable,
+ .disable = chv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+const struct i915_power_well_ops i830_pipes_power_well_ops = {
+ .sync_hw = i830_pipes_power_well_sync_hw,
+ .enable = i830_pipes_power_well_enable,
+ .disable = i830_pipes_power_well_disable,
+ .is_enabled = i830_pipes_power_well_enabled,
+};
+
+static const struct i915_power_well_regs hsw_power_well_regs = {
+ .bios = HSW_PWR_WELL_CTL1,
+ .driver = HSW_PWR_WELL_CTL2,
+ .kvmr = HSW_PWR_WELL_CTL3,
+ .debug = HSW_PWR_WELL_CTL4,
+};
+
+const struct i915_power_well_ops hsw_power_well_ops = {
+ .regs = &hsw_power_well_regs,
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = hsw_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = gen9_dc_off_power_well_enable,
+ .disable = gen9_dc_off_power_well_disable,
+ .is_enabled = gen9_dc_off_power_well_enabled,
+};
+
+const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = bxt_dpio_cmn_power_well_enable,
+ .disable = bxt_dpio_cmn_power_well_disable,
+ .is_enabled = bxt_dpio_cmn_power_well_enabled,
+};
+
+const struct i915_power_well_ops vlv_display_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = vlv_display_power_well_enable,
+ .disable = vlv_display_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = vlv_dpio_cmn_power_well_enable,
+ .disable = vlv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = vlv_power_well_enable,
+ .disable = vlv_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_regs icl_aux_power_well_regs = {
+ .bios = ICL_PWR_WELL_CTL_AUX1,
+ .driver = ICL_PWR_WELL_CTL_AUX2,
+ .debug = ICL_PWR_WELL_CTL_AUX4,
+};
+
+const struct i915_power_well_ops icl_aux_power_well_ops = {
+ .regs = &icl_aux_power_well_regs,
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = icl_aux_power_well_enable,
+ .disable = icl_aux_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static const struct i915_power_well_regs icl_ddi_power_well_regs = {
+ .bios = ICL_PWR_WELL_CTL_DDI1,
+ .driver = ICL_PWR_WELL_CTL_DDI2,
+ .debug = ICL_PWR_WELL_CTL_DDI4,
+};
+
+const struct i915_power_well_ops icl_ddi_power_well_ops = {
+ .regs = &icl_ddi_power_well_regs,
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = hsw_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+const struct i915_power_well_ops tgl_tc_cold_off_ops = {
+ .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
+ .enable = tgl_tc_cold_off_power_well_enable,
+ .disable = tgl_tc_cold_off_power_well_disable,
+ .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
+};
+
+const struct i915_power_well_ops xelpdp_aux_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = xelpdp_aux_power_well_enable,
+ .disable = xelpdp_aux_power_well_disable,
+ .is_enabled = xelpdp_aux_power_well_enabled,
+};
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
new file mode 100644
index 000000000..e13b521e3
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+#ifndef __INTEL_DISPLAY_POWER_WELL_H__
+#define __INTEL_DISPLAY_POWER_WELL_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+#include "intel_display_power.h"
+
+struct drm_i915_private;
+struct i915_power_well;
+
+#define for_each_power_well(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->display.power.domains.power_wells; \
+ (__power_well) - (__dev_priv)->display.power.domains.power_wells < \
+ (__dev_priv)->display.power.domains.power_well_count; \
+ (__power_well)++)
+
+#define for_each_power_well_reverse(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->display.power.domains.power_wells + \
+ (__dev_priv)->display.power.domains.power_well_count - 1; \
+ (__power_well) - (__dev_priv)->display.power.domains.power_wells >= 0; \
+ (__power_well)--)
+
+/*
+ * i915_power_well_id:
+ *
+ * IDs used to look up power wells. Power wells accessed directly bypassing
+ * the power domains framework must be assigned a unique ID. The rest of power
+ * wells must be assigned DISP_PW_ID_NONE.
+ */
+enum i915_power_well_id {
+ DISP_PW_ID_NONE = 0, /* must be kept zero */
+
+ VLV_DISP_PW_DISP2D,
+ BXT_DISP_PW_DPIO_CMN_A,
+ VLV_DISP_PW_DPIO_CMN_BC,
+ GLK_DISP_PW_DPIO_CMN_C,
+ CHV_DISP_PW_DPIO_CMN_D,
+ HSW_DISP_PW_GLOBAL,
+ SKL_DISP_PW_MISC_IO,
+ SKL_DISP_PW_1,
+ SKL_DISP_PW_2,
+ ICL_DISP_PW_3,
+ SKL_DISP_DC_OFF,
+ TGL_DISP_PW_TC_COLD_OFF,
+};
+
+struct i915_power_well_instance {
+ const char *name;
+ const struct i915_power_domain_list {
+ const enum intel_display_power_domain *list;
+ u8 count;
+ } *domain_list;
+
+ /* unique identifier for this power well */
+ enum i915_power_well_id id;
+ /*
+ * Arbitraty data associated with this power well. Platform and power
+ * well specific.
+ */
+ union {
+ struct {
+ /*
+ * request/status flag index in the PUNIT power well
+ * control/status registers.
+ */
+ u8 idx;
+ } vlv;
+ struct {
+ enum dpio_phy phy;
+ } bxt;
+ struct {
+ /*
+ * request/status flag index in the power well
+ * constrol/status registers.
+ */
+ u8 idx;
+ } hsw;
+ struct {
+ u8 aux_ch;
+ } xelpdp;
+ };
+};
+
+struct i915_power_well_desc {
+ const struct i915_power_well_ops *ops;
+ const struct i915_power_well_instance_list {
+ const struct i915_power_well_instance *list;
+ u8 count;
+ } *instances;
+
+ /* Mask of pipes whose IRQ logic is backed by the pw */
+ u16 irq_pipe_mask:4;
+ u16 always_on:1;
+ /*
+ * Instead of waiting for the status bit to ack enables,
+ * just wait a specific amount of time and then consider
+ * the well enabled.
+ */
+ u16 fixed_enable_delay:1;
+ /* The pw is backing the VGA functionality */
+ u16 has_vga:1;
+ u16 has_fuses:1;
+ /*
+ * The pw is for an ICL+ TypeC PHY port in
+ * Thunderbolt mode.
+ */
+ u16 is_tc_tbt:1;
+};
+
+struct i915_power_well {
+ const struct i915_power_well_desc *desc;
+ struct intel_power_domain_mask domains;
+ /* power well enable/disable usage count */
+ int count;
+ /* cached hw enabled state */
+ bool hw_enabled;
+ /* index into desc->instances->list */
+ u8 instance_idx;
+};
+
+struct i915_power_well *lookup_power_well(struct drm_i915_private *i915,
+ enum i915_power_well_id id);
+
+void intel_power_well_enable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+void intel_power_well_disable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+void intel_power_well_sync_hw(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+void intel_power_well_get(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+void intel_power_well_put(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+bool intel_power_well_is_enabled(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well);
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id power_well_id);
+bool intel_power_well_is_always_on(struct i915_power_well *power_well);
+const char *intel_power_well_name(struct i915_power_well *power_well);
+struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well);
+int intel_power_well_refcount(struct i915_power_well *power_well);
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+ bool override, unsigned int mask);
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override);
+
+void gen9_enable_dc5(struct drm_i915_private *dev_priv);
+void skl_enable_dc6(struct drm_i915_private *dev_priv);
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
+void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state);
+void gen9_disable_dc_states(struct drm_i915_private *dev_priv);
+void bxt_enable_dc9(struct drm_i915_private *dev_priv);
+void bxt_disable_dc9(struct drm_i915_private *dev_priv);
+
+extern const struct i915_power_well_ops i9xx_always_on_power_well_ops;
+extern const struct i915_power_well_ops chv_pipe_power_well_ops;
+extern const struct i915_power_well_ops chv_dpio_cmn_power_well_ops;
+extern const struct i915_power_well_ops i830_pipes_power_well_ops;
+extern const struct i915_power_well_ops hsw_power_well_ops;
+extern const struct i915_power_well_ops gen9_dc_off_power_well_ops;
+extern const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops;
+extern const struct i915_power_well_ops vlv_display_power_well_ops;
+extern const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops;
+extern const struct i915_power_well_ops vlv_dpio_power_well_ops;
+extern const struct i915_power_well_ops icl_aux_power_well_ops;
+extern const struct i915_power_well_ops icl_ddi_power_well_ops;
+extern const struct i915_power_well_ops tgl_tc_cold_off_ops;
+extern const struct i915_power_well_ops xelpdp_aux_power_well_ops;
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.c b/drivers/gpu/drm/i915/display/intel_display_trace.c
new file mode 100644
index 000000000..737979ada
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "intel_display_trace.h"
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
new file mode 100644
index 000000000..2dd5a4b7f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -0,0 +1,589 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i915
+
+#if !defined(__INTEL_DISPLAY_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __INTEL_DISPLAY_TRACE_H__
+
+#include <linux/string_helpers.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "intel_crtc.h"
+#include "intel_display_types.h"
+
+TRACE_EVENT(intel_pipe_enable,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __array(u32, frame, 3)
+ __array(u32, scanline, 3)
+ __field(enum pipe, pipe)
+ ),
+ TP_fast_assign(
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc *it__;
+ for_each_intel_crtc(&dev_priv->drm, it__) {
+ __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
+ __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
+ }
+ __entry->pipe = crtc->pipe;
+ ),
+
+ TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
+ __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
+ __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+);
+
+TRACE_EVENT(intel_pipe_disable,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __array(u32, frame, 3)
+ __array(u32, scanline, 3)
+ __field(enum pipe, pipe)
+ ),
+
+ TP_fast_assign(
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc *it__;
+ for_each_intel_crtc(&dev_priv->drm, it__) {
+ __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
+ __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
+ }
+ __entry->pipe = crtc->pipe;
+ ),
+
+ TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
+ __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
+ __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+);
+
+TRACE_EVENT(intel_pipe_crc,
+ TP_PROTO(struct intel_crtc *crtc, const u32 *crcs),
+ TP_ARGS(crtc, crcs),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __array(u32, crcs, 5)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ memcpy(__entry->crcs, crcs, sizeof(__entry->crcs));
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline,
+ __entry->crcs[0], __entry->crcs[1], __entry->crcs[2],
+ __entry->crcs[3], __entry->crcs[4])
+);
+
+TRACE_EVENT(intel_cpu_fifo_underrun,
+ TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
+ TP_ARGS(dev_priv, pipe),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ __entry->pipe = pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_pch_fifo_underrun,
+ TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder),
+ TP_ARGS(dev_priv, pch_transcoder),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ enum pipe pipe = pch_transcoder;
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ __entry->pipe = pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pch transcoder %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_memory_cxsr,
+ TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new),
+ TP_ARGS(dev_priv, old, new),
+
+ TP_STRUCT__entry(
+ __array(u32, frame, 3)
+ __array(u32, scanline, 3)
+ __field(bool, old)
+ __field(bool, new)
+ ),
+
+ TP_fast_assign(
+ struct intel_crtc *crtc;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc);
+ }
+ __entry->old = old;
+ __entry->new = new;
+ ),
+
+ TP_printk("%s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+ str_on_off(__entry->old), str_on_off(__entry->new),
+ __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
+ __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
+ __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+);
+
+TRACE_EVENT(g4x_wm,
+ TP_PROTO(struct intel_crtc *crtc, const struct g4x_wm_values *wm),
+ TP_ARGS(crtc, wm),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u16, primary)
+ __field(u16, sprite)
+ __field(u16, cursor)
+ __field(u16, sr_plane)
+ __field(u16, sr_cursor)
+ __field(u16, sr_fbc)
+ __field(u16, hpll_plane)
+ __field(u16, hpll_cursor)
+ __field(u16, hpll_fbc)
+ __field(bool, cxsr)
+ __field(bool, hpll)
+ __field(bool, fbc)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
+ __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
+ __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR];
+ __entry->sr_plane = wm->sr.plane;
+ __entry->sr_cursor = wm->sr.cursor;
+ __entry->sr_fbc = wm->sr.fbc;
+ __entry->hpll_plane = wm->hpll.plane;
+ __entry->hpll_cursor = wm->hpll.cursor;
+ __entry->hpll_fbc = wm->hpll.fbc;
+ __entry->cxsr = wm->cxsr;
+ __entry->hpll = wm->hpll_en;
+ __entry->fbc = wm->fbc_en;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline,
+ __entry->primary, __entry->sprite, __entry->cursor,
+ str_yes_no(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc,
+ str_yes_no(__entry->hpll), __entry->hpll_plane, __entry->hpll_cursor, __entry->hpll_fbc,
+ str_yes_no(__entry->fbc))
+);
+
+TRACE_EVENT(vlv_wm,
+ TP_PROTO(struct intel_crtc *crtc, const struct vlv_wm_values *wm),
+ TP_ARGS(crtc, wm),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u32, level)
+ __field(u32, cxsr)
+ __field(u32, primary)
+ __field(u32, sprite0)
+ __field(u32, sprite1)
+ __field(u32, cursor)
+ __field(u32, sr_plane)
+ __field(u32, sr_cursor)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->level = wm->level;
+ __entry->cxsr = wm->cxsr;
+ __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
+ __entry->sprite0 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
+ __entry->sprite1 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE1];
+ __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR];
+ __entry->sr_plane = wm->sr.plane;
+ __entry->sr_cursor = wm->sr.cursor;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline, __entry->level, __entry->cxsr,
+ __entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor,
+ __entry->sr_plane, __entry->sr_cursor)
+);
+
+TRACE_EVENT(vlv_fifo_size,
+ TP_PROTO(struct intel_crtc *crtc, u32 sprite0_start, u32 sprite1_start, u32 fifo_size),
+ TP_ARGS(crtc, sprite0_start, sprite1_start, fifo_size),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u32, sprite0_start)
+ __field(u32, sprite1_start)
+ __field(u32, fifo_size)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->sprite0_start = sprite0_start;
+ __entry->sprite1_start = sprite1_start;
+ __entry->fifo_size = fifo_size;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, %d/%d/%d",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline, __entry->sprite0_start,
+ __entry->sprite1_start, __entry->fifo_size)
+);
+
+TRACE_EVENT(intel_plane_update_noarm,
+ TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
+ TP_ARGS(plane, crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __array(int, src, 4)
+ __array(int, dst, 4)
+ __string(name, plane->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, plane->name);
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
+ memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
+ ),
+
+ TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
+ pipe_name(__entry->pipe), __get_str(name),
+ __entry->frame, __entry->scanline,
+ DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
+ DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
+);
+
+TRACE_EVENT(intel_plane_update_arm,
+ TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
+ TP_ARGS(plane, crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __array(int, src, 4)
+ __array(int, dst, 4)
+ __string(name, plane->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, plane->name);
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
+ memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
+ ),
+
+ TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
+ pipe_name(__entry->pipe), __get_str(name),
+ __entry->frame, __entry->scanline,
+ DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
+ DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
+);
+
+TRACE_EVENT(intel_plane_disable_arm,
+ TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
+ TP_ARGS(plane, crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __string(name, plane->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, plane->name);
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, plane %s, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __get_str(name),
+ __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_fbc_activate,
+ TP_PROTO(struct intel_plane *plane),
+ TP_ARGS(plane),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
+ plane->pipe);
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_fbc_deactivate,
+ TP_PROTO(struct intel_plane *plane),
+ TP_ARGS(plane),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
+ plane->pipe);
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_fbc_nuke,
+ TP_PROTO(struct intel_plane *plane),
+ TP_ARGS(plane),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
+ plane->pipe);
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_crtc_vblank_work_start,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline)
+);
+
+TRACE_EVENT(intel_crtc_vblank_work_end,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline)
+);
+
+TRACE_EVENT(intel_pipe_update_start,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u32, min)
+ __field(u32, max)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->min = crtc->debug.min_vbl;
+ __entry->max = crtc->debug.max_vbl;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline, __entry->min, __entry->max)
+);
+
+TRACE_EVENT(intel_pipe_update_vblank_evaded,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u32, min)
+ __field(u32, max)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = crtc->debug.start_vbl_count;
+ __entry->scanline = crtc->debug.scanline_start;
+ __entry->min = crtc->debug.min_vbl;
+ __entry->max = crtc->debug.max_vbl;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline, __entry->min, __entry->max)
+);
+
+TRACE_EVENT(intel_pipe_update_end,
+ TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
+ TP_ARGS(crtc, frame, scanline_end),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = frame;
+ __entry->scanline = scanline_end;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline)
+);
+
+TRACE_EVENT(intel_frontbuffer_invalidate,
+ TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin),
+ TP_ARGS(frontbuffer_bits, origin),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, frontbuffer_bits)
+ __field(unsigned int, origin)
+ ),
+
+ TP_fast_assign(
+ __entry->frontbuffer_bits = frontbuffer_bits;
+ __entry->origin = origin;
+ ),
+
+ TP_printk("frontbuffer_bits=0x%08x, origin=%u",
+ __entry->frontbuffer_bits, __entry->origin)
+);
+
+TRACE_EVENT(intel_frontbuffer_flush,
+ TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin),
+ TP_ARGS(frontbuffer_bits, origin),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, frontbuffer_bits)
+ __field(unsigned int, origin)
+ ),
+
+ TP_fast_assign(
+ __entry->frontbuffer_bits = frontbuffer_bits;
+ __entry->origin = origin;
+ ),
+
+ TP_printk("frontbuffer_bits=0x%08x, origin=%u",
+ __entry->frontbuffer_bits, __entry->origin)
+);
+
+#endif /* __INTEL_DISPLAY_TRACE_H__ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915/display
+#define TRACE_INCLUDE_FILE intel_display_trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
new file mode 100644
index 000000000..a8bf91a21
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -0,0 +1,2074 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __INTEL_DISPLAY_TYPES_H__
+#define __INTEL_DISPLAY_TYPES_H__
+
+#include <linux/i2c.h>
+#include <linux/pm_qos.h>
+#include <linux/pwm.h>
+#include <linux/sched/clock.h>
+
+#include <drm/display/drm_dp_dual_mode_helper.h>
+#include <drm/display/drm_dp_mst_helper.h>
+#include <drm/display/drm_dsc.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_work.h>
+#include <drm/i915_mei_hdcp_interface.h>
+#include <media/cec-notifier.h>
+
+#include "i915_vma.h"
+#include "i915_vma_types.h"
+#include "intel_bios.h"
+#include "intel_display.h"
+#include "intel_display_power.h"
+#include "intel_dpll_mgr.h"
+#include "intel_pm_types.h"
+
+struct drm_printer;
+struct __intel_global_objs_state;
+struct intel_ddi_buf_trans;
+struct intel_fbc;
+struct intel_connector;
+
+/*
+ * Display related stuff
+ */
+
+/* these are outputs from the chip - integrated only
+ external chips are via DVO or SDVO output */
+enum intel_output_type {
+ INTEL_OUTPUT_UNUSED = 0,
+ INTEL_OUTPUT_ANALOG = 1,
+ INTEL_OUTPUT_DVO = 2,
+ INTEL_OUTPUT_SDVO = 3,
+ INTEL_OUTPUT_LVDS = 4,
+ INTEL_OUTPUT_TVOUT = 5,
+ INTEL_OUTPUT_HDMI = 6,
+ INTEL_OUTPUT_DP = 7,
+ INTEL_OUTPUT_EDP = 8,
+ INTEL_OUTPUT_DSI = 9,
+ INTEL_OUTPUT_DDI = 10,
+ INTEL_OUTPUT_DP_MST = 11,
+};
+
+enum hdmi_force_audio {
+ HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
+ HDMI_AUDIO_OFF, /* force turn off HDMI audio */
+ HDMI_AUDIO_AUTO, /* trust EDID */
+ HDMI_AUDIO_ON, /* force turn on HDMI audio */
+};
+
+/* "Broadcast RGB" property */
+enum intel_broadcast_rgb {
+ INTEL_BROADCAST_RGB_AUTO,
+ INTEL_BROADCAST_RGB_FULL,
+ INTEL_BROADCAST_RGB_LIMITED,
+};
+
+struct intel_fb_view {
+ /*
+ * The remap information used in the remapped and rotated views to
+ * create the DMA scatter-gather list for each FB color plane. This sg
+ * list is created along with the view type (gtt.type) specific
+ * i915_vma object and contains the list of FB object pages (reordered
+ * in the rotated view) that are visible in the view.
+ * In the normal view the FB object's backing store sg list is used
+ * directly and hence the remap information here is not used.
+ */
+ struct i915_gtt_view gtt;
+
+ /*
+ * The GTT view (gtt.type) specific information for each FB color
+ * plane. In the normal GTT view all formats (up to 4 color planes),
+ * in the rotated and remapped GTT view all no-CCS formats (up to 2
+ * color planes) are supported.
+ *
+ * The view information shared by all FB color planes in the FB,
+ * like dst x/y and src/dst width, is stored separately in
+ * intel_plane_state.
+ */
+ struct i915_color_plane_view {
+ u32 offset;
+ unsigned int x, y;
+ /*
+ * Plane stride in:
+ * bytes for 0/180 degree rotation
+ * pixels for 90/270 degree rotation
+ */
+ unsigned int mapping_stride;
+ unsigned int scanout_stride;
+ } color_plane[4];
+};
+
+struct intel_framebuffer {
+ struct drm_framebuffer base;
+ struct intel_frontbuffer *frontbuffer;
+
+ /* Params to remap the FB pages and program the plane registers in each view. */
+ struct intel_fb_view normal_view;
+ union {
+ struct intel_fb_view rotated_view;
+ struct intel_fb_view remapped_view;
+ };
+
+ struct i915_address_space *dpt_vm;
+};
+
+enum intel_hotplug_state {
+ INTEL_HOTPLUG_UNCHANGED,
+ INTEL_HOTPLUG_CHANGED,
+ INTEL_HOTPLUG_RETRY,
+};
+
+struct intel_encoder {
+ struct drm_encoder base;
+
+ enum intel_output_type type;
+ enum port port;
+ u16 cloneable;
+ u8 pipe_mask;
+ enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder,
+ struct intel_connector *connector);
+ enum intel_output_type (*compute_output_type)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
+ int (*compute_config)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
+ int (*compute_config_late)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
+ void (*update_prepare)(struct intel_atomic_state *,
+ struct intel_encoder *,
+ struct intel_crtc *);
+ void (*pre_pll_enable)(struct intel_atomic_state *,
+ struct intel_encoder *,
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
+ void (*pre_enable)(struct intel_atomic_state *,
+ struct intel_encoder *,
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
+ void (*enable)(struct intel_atomic_state *,
+ struct intel_encoder *,
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
+ void (*update_complete)(struct intel_atomic_state *,
+ struct intel_encoder *,
+ struct intel_crtc *);
+ void (*disable)(struct intel_atomic_state *,
+ struct intel_encoder *,
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
+ void (*post_disable)(struct intel_atomic_state *,
+ struct intel_encoder *,
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
+ void (*post_pll_disable)(struct intel_atomic_state *,
+ struct intel_encoder *,
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
+ void (*update_pipe)(struct intel_atomic_state *,
+ struct intel_encoder *,
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
+ /* Read out the current hw state of this connector, returning true if
+ * the encoder is active. If the encoder is enabled it also set the pipe
+ * it is connected to in the pipe parameter. */
+ bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
+ /* Reconstructs the equivalent mode flags for the current hardware
+ * state. This must be called _after_ display->get_pipe_config has
+ * pre-filled the pipe config. Note that intel_encoder->base.crtc must
+ * be set correctly before calling this function. */
+ void (*get_config)(struct intel_encoder *,
+ struct intel_crtc_state *pipe_config);
+
+ /*
+ * Optional hook called during init/resume to sync any state
+ * stored in the encoder (eg. DP link parameters) wrt. the HW state.
+ */
+ void (*sync_state)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+
+ /*
+ * Optional hook, returning true if this encoder allows a fastset
+ * during the initial commit, false otherwise.
+ */
+ bool (*initial_fastset_check)(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
+
+ /*
+ * Acquires the power domains needed for an active encoder during
+ * hardware state readout.
+ */
+ void (*get_power_domains)(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
+ /*
+ * Called during system suspend after all pending requests for the
+ * encoder are flushed (for example for DP AUX transactions) and
+ * device interrupts are disabled.
+ */
+ void (*suspend)(struct intel_encoder *);
+ /*
+ * Called during system reboot/shutdown after all the
+ * encoders have been disabled and suspended.
+ */
+ void (*shutdown)(struct intel_encoder *encoder);
+ /*
+ * Enable/disable the clock to the port.
+ */
+ void (*enable_clock)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+ void (*disable_clock)(struct intel_encoder *encoder);
+ /*
+ * Returns whether the port clock is enabled or not.
+ */
+ bool (*is_clock_enabled)(struct intel_encoder *encoder);
+ const struct intel_ddi_buf_trans *(*get_buf_trans)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries);
+ void (*set_signal_levels)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+
+ enum hpd_pin hpd_pin;
+ enum intel_display_power_domain power_domain;
+ /* for communication with audio component; protected by av_mutex */
+ const struct drm_connector *audio_connector;
+
+ /* VBT information for this encoder (may be NULL for older platforms) */
+ const struct intel_bios_encoder_data *devdata;
+};
+
+struct intel_panel_bl_funcs {
+ /* Connector and platform specific backlight functions */
+ int (*setup)(struct intel_connector *connector, enum pipe pipe);
+ u32 (*get)(struct intel_connector *connector, enum pipe pipe);
+ void (*set)(const struct drm_connector_state *conn_state, u32 level);
+ void (*disable)(const struct drm_connector_state *conn_state, u32 level);
+ void (*enable)(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level);
+ u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
+};
+
+enum drrs_type {
+ DRRS_TYPE_NONE,
+ DRRS_TYPE_STATIC,
+ DRRS_TYPE_SEAMLESS,
+};
+
+struct intel_vbt_panel_data {
+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+ /* Feature bits */
+ int panel_type;
+ unsigned int lvds_dither:1;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+
+ bool vrr;
+
+ u8 seamless_drrs_min_refresh_rate;
+ enum drrs_type drrs_type;
+
+ struct {
+ int max_link_rate;
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+ int bpp;
+ struct edp_power_seq pps;
+ u8 drrs_msa_timing_delay;
+ bool low_vswing;
+ bool initialized;
+ bool hobl;
+ } edp;
+
+ struct {
+ bool enable;
+ bool full_link;
+ bool require_aux_wakeup;
+ int idle_frames;
+ int tp1_wakeup_time_us;
+ int tp2_tp3_wakeup_time_us;
+ int psr2_tp2_tp3_wakeup_time_us;
+ } psr;
+
+ struct {
+ u16 pwm_freq_hz;
+ u16 brightness_precision_bits;
+ bool present;
+ bool active_low_pwm;
+ u8 min_brightness; /* min_brightness/255 of max */
+ u8 controller; /* brightness controller number */
+ enum intel_backlight_type type;
+ } backlight;
+
+ /* MIPI DSI */
+ struct {
+ u16 panel_id;
+ struct mipi_config *config;
+ struct mipi_pps_data *pps;
+ u16 bl_ports;
+ u16 cabc_ports;
+ u8 seq_version;
+ u32 size;
+ u8 *data;
+ const u8 *sequence[MIPI_SEQ_MAX];
+ u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
+ enum drm_panel_orientation orientation;
+ } dsi;
+};
+
+struct intel_panel {
+ struct list_head fixed_modes;
+
+ /* backlight */
+ struct {
+ bool present;
+ u32 level;
+ u32 min;
+ u32 max;
+ bool enabled;
+ bool combination_mode; /* gen 2/4 only */
+ bool active_low_pwm;
+ bool alternate_pwm_increment; /* lpt+ */
+
+ /* PWM chip */
+ u32 pwm_level_min;
+ u32 pwm_level_max;
+ bool pwm_enabled;
+ bool util_pin_active_low; /* bxt+ */
+ u8 controller; /* bxt+ only */
+ struct pwm_device *pwm;
+ struct pwm_state pwm_state;
+
+ /* DPCD backlight */
+ union {
+ struct {
+ struct drm_edp_backlight_info info;
+ } vesa;
+ struct {
+ bool sdr_uses_aux;
+ } intel;
+ } edp;
+
+ struct backlight_device *device;
+
+ const struct intel_panel_bl_funcs *funcs;
+ const struct intel_panel_bl_funcs *pwm_funcs;
+ void (*power)(struct intel_connector *, bool enable);
+ } backlight;
+
+ struct intel_vbt_panel_data vbt;
+};
+
+struct intel_digital_port;
+
+enum check_link_response {
+ HDCP_LINK_PROTECTED = 0,
+ HDCP_TOPOLOGY_CHANGE,
+ HDCP_LINK_INTEGRITY_FAILURE,
+ HDCP_REAUTH_REQUEST
+};
+
+/*
+ * This structure serves as a translation layer between the generic HDCP code
+ * and the bus-specific code. What that means is that HDCP over HDMI differs
+ * from HDCP over DP, so to account for these differences, we need to
+ * communicate with the receiver through this shim.
+ *
+ * For completeness, the 2 buses differ in the following ways:
+ * - DP AUX vs. DDC
+ * HDCP registers on the receiver are set via DP AUX for DP, and
+ * they are set via DDC for HDMI.
+ * - Receiver register offsets
+ * The offsets of the registers are different for DP vs. HDMI
+ * - Receiver register masks/offsets
+ * For instance, the ready bit for the KSV fifo is in a different
+ * place on DP vs HDMI
+ * - Receiver register names
+ * Seriously. In the DP spec, the 16-bit register containing
+ * downstream information is called BINFO, on HDMI it's called
+ * BSTATUS. To confuse matters further, DP has a BSTATUS register
+ * with a completely different definition.
+ * - KSV FIFO
+ * On HDMI, the ksv fifo is read all at once, whereas on DP it must
+ * be read 3 keys at a time
+ * - Aksv output
+ * Since Aksv is hidden in hardware, there's different procedures
+ * to send it over DP AUX vs DDC
+ */
+struct intel_hdcp_shim {
+ /* Outputs the transmitter's An and Aksv values to the receiver. */
+ int (*write_an_aksv)(struct intel_digital_port *dig_port, u8 *an);
+
+ /* Reads the receiver's key selection vector */
+ int (*read_bksv)(struct intel_digital_port *dig_port, u8 *bksv);
+
+ /*
+ * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The
+ * definitions are the same in the respective specs, but the names are
+ * different. Call it BSTATUS since that's the name the HDMI spec
+ * uses and it was there first.
+ */
+ int (*read_bstatus)(struct intel_digital_port *dig_port,
+ u8 *bstatus);
+
+ /* Determines whether a repeater is present downstream */
+ int (*repeater_present)(struct intel_digital_port *dig_port,
+ bool *repeater_present);
+
+ /* Reads the receiver's Ri' value */
+ int (*read_ri_prime)(struct intel_digital_port *dig_port, u8 *ri);
+
+ /* Determines if the receiver's KSV FIFO is ready for consumption */
+ int (*read_ksv_ready)(struct intel_digital_port *dig_port,
+ bool *ksv_ready);
+
+ /* Reads the ksv fifo for num_downstream devices */
+ int (*read_ksv_fifo)(struct intel_digital_port *dig_port,
+ int num_downstream, u8 *ksv_fifo);
+
+ /* Reads a 32-bit part of V' from the receiver */
+ int (*read_v_prime_part)(struct intel_digital_port *dig_port,
+ int i, u32 *part);
+
+ /* Enables HDCP signalling on the port */
+ int (*toggle_signalling)(struct intel_digital_port *dig_port,
+ enum transcoder cpu_transcoder,
+ bool enable);
+
+ /* Enable/Disable stream encryption on DP MST Transport Link */
+ int (*stream_encryption)(struct intel_connector *connector,
+ bool enable);
+
+ /* Ensures the link is still protected */
+ bool (*check_link)(struct intel_digital_port *dig_port,
+ struct intel_connector *connector);
+
+ /* Detects panel's hdcp capability. This is optional for HDMI. */
+ int (*hdcp_capable)(struct intel_digital_port *dig_port,
+ bool *hdcp_capable);
+
+ /* HDCP adaptation(DP/HDMI) required on the port */
+ enum hdcp_wired_protocol protocol;
+
+ /* Detects whether sink is HDCP2.2 capable */
+ int (*hdcp_2_2_capable)(struct intel_digital_port *dig_port,
+ bool *capable);
+
+ /* Write HDCP2.2 messages */
+ int (*write_2_2_msg)(struct intel_digital_port *dig_port,
+ void *buf, size_t size);
+
+ /* Read HDCP2.2 messages */
+ int (*read_2_2_msg)(struct intel_digital_port *dig_port,
+ u8 msg_id, void *buf, size_t size);
+
+ /*
+ * Implementation of DP HDCP2.2 Errata for the communication of stream
+ * type to Receivers. In DP HDCP2.2 Stream type is one of the input to
+ * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI.
+ */
+ int (*config_stream_type)(struct intel_digital_port *dig_port,
+ bool is_repeater, u8 type);
+
+ /* Enable/Disable HDCP 2.2 stream encryption on DP MST Transport Link */
+ int (*stream_2_2_encryption)(struct intel_connector *connector,
+ bool enable);
+
+ /* HDCP2.2 Link Integrity Check */
+ int (*check_2_2_link)(struct intel_digital_port *dig_port,
+ struct intel_connector *connector);
+};
+
+struct intel_hdcp {
+ const struct intel_hdcp_shim *shim;
+ /* Mutex for hdcp state of the connector */
+ struct mutex mutex;
+ u64 value;
+ struct delayed_work check_work;
+ struct work_struct prop_work;
+
+ /* HDCP1.4 Encryption status */
+ bool hdcp_encrypted;
+
+ /* HDCP2.2 related definitions */
+ /* Flag indicates whether this connector supports HDCP2.2 or not. */
+ bool hdcp2_supported;
+
+ /* HDCP2.2 Encryption status */
+ bool hdcp2_encrypted;
+
+ /*
+ * Content Stream Type defined by content owner. TYPE0(0x0) content can
+ * flow in the link protected by HDCP2.2 or HDCP1.4, where as TYPE1(0x1)
+ * content can flow only through a link protected by HDCP2.2.
+ */
+ u8 content_type;
+
+ bool is_paired;
+ bool is_repeater;
+
+ /*
+ * Count of ReceiverID_List received. Initialized to 0 at AKE_INIT.
+ * Incremented after processing the RepeaterAuth_Send_ReceiverID_List.
+ * When it rolls over re-auth has to be triggered.
+ */
+ u32 seq_num_v;
+
+ /*
+ * Count of RepeaterAuth_Stream_Manage msg propagated.
+ * Initialized to 0 on AKE_INIT. Incremented after every successful
+ * transmission of RepeaterAuth_Stream_Manage message. When it rolls
+ * over re-Auth has to be triggered.
+ */
+ u32 seq_num_m;
+
+ /*
+ * Work queue to signal the CP_IRQ. Used for the waiters to read the
+ * available information from HDCP DP sink.
+ */
+ wait_queue_head_t cp_irq_queue;
+ atomic_t cp_irq_count;
+ int cp_irq_count_cached;
+
+ /*
+ * HDCP register access for gen12+ need the transcoder associated.
+ * Transcoder attached to the connector could be changed at modeset.
+ * Hence caching the transcoder here.
+ */
+ enum transcoder cpu_transcoder;
+ /* Only used for DP MST stream encryption */
+ enum transcoder stream_transcoder;
+};
+
+struct intel_connector {
+ struct drm_connector base;
+ /*
+ * The fixed encoder this connector is connected to.
+ */
+ struct intel_encoder *encoder;
+
+ /* ACPI device id for ACPI and driver cooperation */
+ u32 acpi_device_id;
+
+ /* Reads out the current hw, returning true if the connector is enabled
+ * and active (i.e. dpms ON state). */
+ bool (*get_hw_state)(struct intel_connector *);
+
+ /* Panel info for eDP and LVDS */
+ struct intel_panel panel;
+
+ /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
+ struct edid *edid;
+ struct edid *detect_edid;
+
+ /* Number of times hotplug detection was tried after an HPD interrupt */
+ int hotplug_retries;
+
+ /* since POLL and HPD connectors may use the same HPD line keep the native
+ state of connector->polled in case hotplug storm detection changes it */
+ u8 polled;
+
+ struct drm_dp_mst_port *port;
+
+ struct intel_dp *mst_port;
+
+ /* Work struct to schedule a uevent on link train failure */
+ struct work_struct modeset_retry_work;
+
+ struct intel_hdcp hdcp;
+};
+
+struct intel_digital_connector_state {
+ struct drm_connector_state base;
+
+ enum hdmi_force_audio force_audio;
+ int broadcast_rgb;
+};
+
+#define to_intel_digital_connector_state(x) container_of(x, struct intel_digital_connector_state, base)
+
+struct dpll {
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
+};
+
+struct intel_atomic_state {
+ struct drm_atomic_state base;
+
+ intel_wakeref_t wakeref;
+
+ struct __intel_global_objs_state *global_objs;
+ int num_global_objs;
+
+ bool dpll_set, modeset;
+
+ struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
+
+ /*
+ * Current watermarks can't be trusted during hardware readout, so
+ * don't bother calculating intermediate watermarks.
+ */
+ bool skip_intermediate_wm;
+
+ bool rps_interactive;
+
+ struct i915_sw_fence commit_ready;
+
+ struct llist_node freed;
+};
+
+struct intel_plane_state {
+ struct drm_plane_state uapi;
+
+ /*
+ * actual hardware state, the state we program to the hardware.
+ * The following members are used to verify the hardware state:
+ * During initial hw readout, they need to be copied from uapi.
+ */
+ struct {
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+
+ u16 alpha;
+ u16 pixel_blend_mode;
+ unsigned int rotation;
+ enum drm_color_encoding color_encoding;
+ enum drm_color_range color_range;
+ enum drm_scaling_filter scaling_filter;
+ } hw;
+
+ struct i915_vma *ggtt_vma;
+ struct i915_vma *dpt_vma;
+ unsigned long flags;
+#define PLANE_HAS_FENCE BIT(0)
+
+ struct intel_fb_view view;
+
+ /* Plane pxp decryption state */
+ bool decrypt;
+
+ /* Plane state to display black pixels when pxp is borked */
+ bool force_black;
+
+ /* plane control register */
+ u32 ctl;
+
+ /* plane color control register */
+ u32 color_ctl;
+
+ /* chroma upsampler control register */
+ u32 cus_ctl;
+
+ /*
+ * scaler_id
+ * = -1 : not using a scaler
+ * >= 0 : using a scalers
+ *
+ * plane requiring a scaler:
+ * - During check_plane, its bit is set in
+ * crtc_state->scaler_state.scaler_users by calling helper function
+ * update_scaler_plane.
+ * - scaler_id indicates the scaler it got assigned.
+ *
+ * plane doesn't require a scaler:
+ * - this can happen when scaling is no more required or plane simply
+ * got disabled.
+ * - During check_plane, corresponding bit is reset in
+ * crtc_state->scaler_state.scaler_users by calling helper function
+ * update_scaler_plane.
+ */
+ int scaler_id;
+
+ /*
+ * planar_linked_plane:
+ *
+ * ICL planar formats require 2 planes that are updated as pairs.
+ * This member is used to make sure the other plane is also updated
+ * when required, and for update_slave() to find the correct
+ * plane_state to pass as argument.
+ */
+ struct intel_plane *planar_linked_plane;
+
+ /*
+ * planar_slave:
+ * If set don't update use the linked plane's state for updating
+ * this plane during atomic commit with the update_slave() callback.
+ *
+ * It's also used by the watermark code to ignore wm calculations on
+ * this plane. They're calculated by the linked plane's wm code.
+ */
+ u32 planar_slave;
+
+ struct drm_intel_sprite_colorkey ckey;
+
+ struct drm_rect psr2_sel_fetch_area;
+
+ /* Clear Color Value */
+ u64 ccval;
+
+ const char *no_fbc_reason;
+};
+
+struct intel_initial_plane_config {
+ struct intel_framebuffer *fb;
+ struct i915_vma *vma;
+ unsigned int tiling;
+ int size;
+ u32 base;
+ u8 rotation;
+};
+
+struct intel_scaler {
+ int in_use;
+ u32 mode;
+};
+
+struct intel_crtc_scaler_state {
+#define SKL_NUM_SCALERS 2
+ struct intel_scaler scalers[SKL_NUM_SCALERS];
+
+ /*
+ * scaler_users: keeps track of users requesting scalers on this crtc.
+ *
+ * If a bit is set, a user is using a scaler.
+ * Here user can be a plane or crtc as defined below:
+ * bits 0-30 - plane (bit position is index from drm_plane_index)
+ * bit 31 - crtc
+ *
+ * Instead of creating a new index to cover planes and crtc, using
+ * existing drm_plane_index for planes which is well less than 31
+ * planes and bit 31 for crtc. This should be fine to cover all
+ * our platforms.
+ *
+ * intel_atomic_setup_scalers will setup available scalers to users
+ * requesting scalers. It will gracefully fail if request exceeds
+ * avilability.
+ */
+#define SKL_CRTC_INDEX 31
+ unsigned scaler_users;
+
+ /* scaler used by crtc for panel fitting purpose */
+ int scaler_id;
+};
+
+/* {crtc,crtc_state}->mode_flags */
+/* Flag to get scanline using frame time stamps */
+#define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1)
+/* Flag to use the scanline counter instead of the pixel counter */
+#define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2)
+/*
+ * TE0 or TE1 flag is set if the crtc has a DSI encoder which
+ * is operating in command mode.
+ * Flag to use TE from DSI0 instead of VBI in command mode
+ */
+#define I915_MODE_FLAG_DSI_USE_TE0 (1<<3)
+/* Flag to use TE from DSI1 instead of VBI in command mode */
+#define I915_MODE_FLAG_DSI_USE_TE1 (1<<4)
+/* Flag to indicate mipi dsi periodic command mode where we do not get TE */
+#define I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE (1<<5)
+/* Do tricks to make vblank timestamps sane with VRR? */
+#define I915_MODE_FLAG_VRR (1<<6)
+
+struct intel_wm_level {
+ bool enable;
+ u32 pri_val;
+ u32 spr_val;
+ u32 cur_val;
+ u32 fbc_val;
+};
+
+struct intel_pipe_wm {
+ struct intel_wm_level wm[5];
+ bool fbc_wm_enabled;
+ bool pipe_enabled;
+ bool sprites_enabled;
+ bool sprites_scaled;
+};
+
+struct skl_wm_level {
+ u16 min_ddb_alloc;
+ u16 blocks;
+ u8 lines;
+ bool enable;
+ bool ignore_lines;
+ bool can_sagv;
+};
+
+struct skl_plane_wm {
+ struct skl_wm_level wm[8];
+ struct skl_wm_level uv_wm[8];
+ struct skl_wm_level trans_wm;
+ struct {
+ struct skl_wm_level wm0;
+ struct skl_wm_level trans_wm;
+ } sagv;
+ bool is_planar;
+};
+
+struct skl_pipe_wm {
+ struct skl_plane_wm planes[I915_MAX_PLANES];
+ bool use_sagv_wm;
+};
+
+enum vlv_wm_level {
+ VLV_WM_LEVEL_PM2,
+ VLV_WM_LEVEL_PM5,
+ VLV_WM_LEVEL_DDR_DVFS,
+ NUM_VLV_WM_LEVELS,
+};
+
+struct vlv_wm_state {
+ struct g4x_pipe_wm wm[NUM_VLV_WM_LEVELS];
+ struct g4x_sr_wm sr[NUM_VLV_WM_LEVELS];
+ u8 num_levels;
+ bool cxsr;
+};
+
+struct vlv_fifo_state {
+ u16 plane[I915_MAX_PLANES];
+};
+
+enum g4x_wm_level {
+ G4X_WM_LEVEL_NORMAL,
+ G4X_WM_LEVEL_SR,
+ G4X_WM_LEVEL_HPLL,
+ NUM_G4X_WM_LEVELS,
+};
+
+struct g4x_wm_state {
+ struct g4x_pipe_wm wm;
+ struct g4x_sr_wm sr;
+ struct g4x_sr_wm hpll;
+ bool cxsr;
+ bool hpll_en;
+ bool fbc_en;
+};
+
+struct intel_crtc_wm_state {
+ union {
+ /*
+ * raw:
+ * The "raw" watermark values produced by the formula
+ * given the plane's current state. They do not consider
+ * how much FIFO is actually allocated for each plane.
+ *
+ * optimal:
+ * The "optimal" watermark values given the current
+ * state of the planes and the amount of FIFO
+ * allocated to each, ignoring any previous state
+ * of the planes.
+ *
+ * intermediate:
+ * The "intermediate" watermark values when transitioning
+ * between the old and new "optimal" values. Used when
+ * the watermark registers are single buffered and hence
+ * their state changes asynchronously with regards to the
+ * actual plane registers. These are essentially the
+ * worst case combination of the old and new "optimal"
+ * watermarks, which are therefore safe to use when the
+ * plane is in either its old or new state.
+ */
+ struct {
+ struct intel_pipe_wm intermediate;
+ struct intel_pipe_wm optimal;
+ } ilk;
+
+ struct {
+ struct skl_pipe_wm raw;
+ /* gen9+ only needs 1-step wm programming */
+ struct skl_pipe_wm optimal;
+ struct skl_ddb_entry ddb;
+ /*
+ * pre-icl: for packed/planar CbCr
+ * icl+: for everything
+ */
+ struct skl_ddb_entry plane_ddb[I915_MAX_PLANES];
+ /* pre-icl: for planar Y */
+ struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES];
+ } skl;
+
+ struct {
+ struct g4x_pipe_wm raw[NUM_VLV_WM_LEVELS]; /* not inverted */
+ struct vlv_wm_state intermediate; /* inverted */
+ struct vlv_wm_state optimal; /* inverted */
+ struct vlv_fifo_state fifo_state;
+ } vlv;
+
+ struct {
+ struct g4x_pipe_wm raw[NUM_G4X_WM_LEVELS];
+ struct g4x_wm_state intermediate;
+ struct g4x_wm_state optimal;
+ } g4x;
+ };
+
+ /*
+ * Platforms with two-step watermark programming will need to
+ * update watermark programming post-vblank to switch from the
+ * safe intermediate watermarks to the optimal final
+ * watermarks.
+ */
+ bool need_postvbl_update;
+};
+
+enum intel_output_format {
+ INTEL_OUTPUT_FORMAT_RGB,
+ INTEL_OUTPUT_FORMAT_YCBCR420,
+ INTEL_OUTPUT_FORMAT_YCBCR444,
+};
+
+struct intel_mpllb_state {
+ u32 clock; /* in KHz */
+ u32 ref_control;
+ u32 mpllb_cp;
+ u32 mpllb_div;
+ u32 mpllb_div2;
+ u32 mpllb_fracn1;
+ u32 mpllb_fracn2;
+ u32 mpllb_sscen;
+ u32 mpllb_sscstep;
+};
+
+struct intel_crtc_state {
+ /*
+ * uapi (drm) state. This is the software state shown to userspace.
+ * In particular, the following members are used for bookkeeping:
+ * - crtc
+ * - state
+ * - *_changed
+ * - event
+ * - commit
+ * - mode_blob
+ */
+ struct drm_crtc_state uapi;
+
+ /*
+ * actual hardware state, the state we program to the hardware.
+ * The following members are used to verify the hardware state:
+ * - enable
+ * - active
+ * - mode / pipe_mode / adjusted_mode
+ * - color property blobs.
+ *
+ * During initial hw readout, they need to be copied to uapi.
+ *
+ * Bigjoiner will allow a transcoder mode that spans 2 pipes;
+ * Use the pipe_mode for calculations like watermarks, pipe
+ * scaler, and bandwidth.
+ *
+ * Use adjusted_mode for things that need to know the full
+ * mode on the transcoder, which spans all pipes.
+ */
+ struct {
+ bool active, enable;
+ struct drm_property_blob *degamma_lut, *gamma_lut, *ctm;
+ struct drm_display_mode mode, pipe_mode, adjusted_mode;
+ enum drm_scaling_filter scaling_filter;
+ } hw;
+
+ /**
+ * quirks - bitfield with hw state readout quirks
+ *
+ * For various reasons the hw state readout code might not be able to
+ * completely faithfully read out the current state. These cases are
+ * tracked with quirk flags so that fastboot and state checker can act
+ * accordingly.
+ */
+#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
+ unsigned long quirks;
+
+ unsigned fb_bits; /* framebuffers to flip */
+ bool update_pipe; /* can a fast modeset be performed? */
+ bool disable_cxsr;
+ bool update_wm_pre, update_wm_post; /* watermarks are updated */
+ bool fifo_changed; /* FIFO split is changed */
+ bool preload_luts;
+ bool inherited; /* state inherited from BIOS? */
+
+ /* Ask the hardware to actually async flip? */
+ bool do_async_flip;
+
+ /* Pipe source size (ie. panel fitter input size)
+ * All planes will be positioned inside this space,
+ * and get clipped at the edges. */
+ struct drm_rect pipe_src;
+
+ /*
+ * Pipe pixel rate, adjusted for
+ * panel fitter/pipe scaler downscaling.
+ */
+ unsigned int pixel_rate;
+
+ /* Whether to set up the PCH/FDI. Note that we never allow sharing
+ * between pch encoders and cpu encoders. */
+ bool has_pch_encoder;
+
+ /* Are we sending infoframes on the attached port */
+ bool has_infoframe;
+
+ /* CPU Transcoder for the pipe. Currently this can only differ from the
+ * pipe on Haswell and later (where we have a special eDP transcoder)
+ * and Broxton (where we have special DSI transcoders). */
+ enum transcoder cpu_transcoder;
+
+ /*
+ * Use reduced/limited/broadcast rbg range, compressing from the full
+ * range fed into the crtcs.
+ */
+ bool limited_color_range;
+
+ /* Bitmask of encoder types (enum intel_output_type)
+ * driven by the pipe.
+ */
+ unsigned int output_types;
+
+ /* Whether we should send NULL infoframes. Required for audio. */
+ bool has_hdmi_sink;
+
+ /* Audio enabled on this pipe. Only valid if either has_hdmi_sink or
+ * has_dp_encoder is set. */
+ bool has_audio;
+
+ /*
+ * Enable dithering, used when the selected pipe bpp doesn't match the
+ * plane bpp.
+ */
+ bool dither;
+
+ /*
+ * Dither gets enabled for 18bpp which causes CRC mismatch errors for
+ * compliance video pattern tests.
+ * Disable dither only if it is a compliance test request for
+ * 18bpp.
+ */
+ bool dither_force_disable;
+
+ /* Controls for the clock computation, to override various stages. */
+ bool clock_set;
+
+ /* SDVO TV has a bunch of special case. To make multifunction encoders
+ * work correctly, we need to track this at runtime.*/
+ bool sdvo_tv_clock;
+
+ /*
+ * crtc bandwidth limit, don't increase pipe bpp or clock if not really
+ * required. This is set in the 2nd loop of calling encoder's
+ * ->compute_config if the first pick doesn't work out.
+ */
+ bool bw_constrained;
+
+ /* Settings for the intel dpll used on pretty much everything but
+ * haswell. */
+ struct dpll dpll;
+
+ /* Selected dpll when shared or NULL. */
+ struct intel_shared_dpll *shared_dpll;
+
+ /* Actual register state of the dpll, for shared dpll cross-checking. */
+ union {
+ struct intel_dpll_hw_state dpll_hw_state;
+ struct intel_mpllb_state mpllb_state;
+ };
+
+ /*
+ * ICL reserved DPLLs for the CRTC/port. The active PLL is selected by
+ * setting shared_dpll and dpll_hw_state to one of these reserved ones.
+ */
+ struct icl_port_dpll {
+ struct intel_shared_dpll *pll;
+ struct intel_dpll_hw_state hw_state;
+ } icl_port_dplls[ICL_PORT_DPLL_COUNT];
+
+ /* DSI PLL registers */
+ struct {
+ u32 ctrl, div;
+ } dsi_pll;
+
+ int pipe_bpp;
+ struct intel_link_m_n dp_m_n;
+
+ /* m2_n2 for eDP downclock */
+ struct intel_link_m_n dp_m2_n2;
+ bool has_drrs;
+ bool seamless_m_n;
+
+ /* PSR is supported but might not be enabled due the lack of enabled planes */
+ bool has_psr;
+ bool has_psr2;
+ bool enable_psr2_sel_fetch;
+ bool req_psr2_sdp_prior_scanline;
+ u32 dc3co_exitline;
+ u16 su_y_granularity;
+ struct drm_dp_vsc_sdp psr_vsc;
+
+ /*
+ * Frequence the dpll for the port should run at. Differs from the
+ * adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also
+ * already multiplied by pixel_multiplier.
+ */
+ int port_clock;
+
+ /* Used by SDVO (and if we ever fix it, HDMI). */
+ unsigned pixel_multiplier;
+
+ /* I915_MODE_FLAG_* */
+ u8 mode_flags;
+
+ u8 lane_count;
+
+ /*
+ * Used by platforms having DP/HDMI PHY with programmable lane
+ * latency optimization.
+ */
+ u8 lane_lat_optim_mask;
+
+ /* minimum acceptable voltage level */
+ u8 min_voltage_level;
+
+ /* Panel fitter controls for gen2-gen4 + VLV */
+ struct {
+ u32 control;
+ u32 pgm_ratios;
+ u32 lvds_border_bits;
+ } gmch_pfit;
+
+ /* Panel fitter placement and size for Ironlake+ */
+ struct {
+ struct drm_rect dst;
+ bool enabled;
+ bool force_thru;
+ } pch_pfit;
+
+ /* FDI configuration, only valid if has_pch_encoder is set. */
+ int fdi_lanes;
+ struct intel_link_m_n fdi_m_n;
+
+ bool ips_enabled;
+
+ bool crc_enabled;
+
+ bool double_wide;
+
+ int pbn;
+
+ struct intel_crtc_scaler_state scaler_state;
+
+ /* w/a for waiting 2 vblanks during crtc enable */
+ enum pipe hsw_workaround_pipe;
+
+ /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
+ bool disable_lp_wm;
+
+ struct intel_crtc_wm_state wm;
+
+ int min_cdclk[I915_MAX_PLANES];
+
+ /* for packed/planar CbCr */
+ u32 data_rate[I915_MAX_PLANES];
+ /* for planar Y */
+ u32 data_rate_y[I915_MAX_PLANES];
+
+ /* FIXME unify with data_rate[]? */
+ u64 rel_data_rate[I915_MAX_PLANES];
+ u64 rel_data_rate_y[I915_MAX_PLANES];
+
+ /* Gamma mode programmed on the pipe */
+ u32 gamma_mode;
+
+ union {
+ /* CSC mode programmed on the pipe */
+ u32 csc_mode;
+
+ /* CHV CGM mode */
+ u32 cgm_mode;
+ };
+
+ /* bitmask of logically enabled planes (enum plane_id) */
+ u8 enabled_planes;
+
+ /* bitmask of actually visible planes (enum plane_id) */
+ u8 active_planes;
+ u8 scaled_planes;
+ u8 nv12_planes;
+ u8 c8_planes;
+
+ /* bitmask of planes that will be updated during the commit */
+ u8 update_planes;
+
+ u8 framestart_delay; /* 1-4 */
+ u8 msa_timing_delay; /* 0-3 */
+
+ struct {
+ u32 enable;
+ u32 gcp;
+ union hdmi_infoframe avi;
+ union hdmi_infoframe spd;
+ union hdmi_infoframe hdmi;
+ union hdmi_infoframe drm;
+ struct drm_dp_vsc_sdp vsc;
+ } infoframes;
+
+ /* HDMI scrambling status */
+ bool hdmi_scrambling;
+
+ /* HDMI High TMDS char rate ratio */
+ bool hdmi_high_tmds_clock_ratio;
+
+ /* Output format RGB/YCBCR etc */
+ enum intel_output_format output_format;
+
+ /* enable pipe gamma? */
+ bool gamma_enable;
+
+ /* enable pipe csc? */
+ bool csc_enable;
+
+ /* big joiner pipe bitmask */
+ u8 bigjoiner_pipes;
+
+ /* Display Stream compression state */
+ struct {
+ bool compression_enable;
+ bool dsc_split;
+ u16 compressed_bpp;
+ u8 slice_count;
+ struct drm_dsc_config config;
+ } dsc;
+
+ /* HSW+ linetime watermarks */
+ u16 linetime;
+ u16 ips_linetime;
+
+ /* Forward Error correction State */
+ bool fec_enable;
+
+ /* Pointer to master transcoder in case of tiled displays */
+ enum transcoder master_transcoder;
+
+ /* Bitmask to indicate slaves attached */
+ u8 sync_mode_slaves_mask;
+
+ /* Only valid on TGL+ */
+ enum transcoder mst_master_transcoder;
+
+ /* For DSB related info */
+ struct intel_dsb *dsb;
+
+ u32 psr2_man_track_ctl;
+
+ /* Variable Refresh Rate state */
+ struct {
+ bool enable;
+ u8 pipeline_full;
+ u16 flipline, vmin, vmax, guardband;
+ } vrr;
+
+ /* Stream Splitter for eDP MSO */
+ struct {
+ bool enable;
+ u8 link_count;
+ u8 pixel_overlap;
+ } splitter;
+
+ /* for loading single buffered registers during vblank */
+ struct drm_vblank_work vblank_work;
+};
+
+enum intel_pipe_crc_source {
+ INTEL_PIPE_CRC_SOURCE_NONE,
+ INTEL_PIPE_CRC_SOURCE_PLANE1,
+ INTEL_PIPE_CRC_SOURCE_PLANE2,
+ INTEL_PIPE_CRC_SOURCE_PLANE3,
+ INTEL_PIPE_CRC_SOURCE_PLANE4,
+ INTEL_PIPE_CRC_SOURCE_PLANE5,
+ INTEL_PIPE_CRC_SOURCE_PLANE6,
+ INTEL_PIPE_CRC_SOURCE_PLANE7,
+ INTEL_PIPE_CRC_SOURCE_PIPE,
+ /* TV/DP on pre-gen5/vlv can't use the pipe source. */
+ INTEL_PIPE_CRC_SOURCE_TV,
+ INTEL_PIPE_CRC_SOURCE_DP_B,
+ INTEL_PIPE_CRC_SOURCE_DP_C,
+ INTEL_PIPE_CRC_SOURCE_DP_D,
+ INTEL_PIPE_CRC_SOURCE_AUTO,
+ INTEL_PIPE_CRC_SOURCE_MAX,
+};
+
+enum drrs_refresh_rate {
+ DRRS_REFRESH_RATE_HIGH,
+ DRRS_REFRESH_RATE_LOW,
+};
+
+#define INTEL_PIPE_CRC_ENTRIES_NR 128
+struct intel_pipe_crc {
+ spinlock_t lock;
+ int skipped;
+ enum intel_pipe_crc_source source;
+};
+
+struct intel_crtc {
+ struct drm_crtc base;
+ enum pipe pipe;
+ /*
+ * Whether the crtc and the connected output pipeline is active. Implies
+ * that crtc->enabled is set, i.e. the current mode configuration has
+ * some outputs connected to this crtc.
+ */
+ bool active;
+ u8 plane_ids_mask;
+
+ /* I915_MODE_FLAG_* */
+ u8 mode_flags;
+
+ u16 vmax_vblank_start;
+
+ struct intel_display_power_domain_set enabled_power_domains;
+ struct intel_overlay *overlay;
+
+ struct intel_crtc_state *config;
+
+ /* Access to these should be protected by dev_priv->irq_lock. */
+ bool cpu_fifo_underrun_disabled;
+ bool pch_fifo_underrun_disabled;
+
+ /* per-pipe watermark state */
+ struct {
+ /* watermarks currently being used */
+ union {
+ struct intel_pipe_wm ilk;
+ struct vlv_wm_state vlv;
+ struct g4x_wm_state g4x;
+ } active;
+ } wm;
+
+ struct {
+ struct mutex mutex;
+ struct delayed_work work;
+ enum drrs_refresh_rate refresh_rate;
+ unsigned int frontbuffer_bits;
+ unsigned int busy_frontbuffer_bits;
+ enum transcoder cpu_transcoder;
+ struct intel_link_m_n m_n, m2_n2;
+ } drrs;
+
+ int scanline_offset;
+
+ struct {
+ unsigned start_vbl_count;
+ ktime_t start_vbl_time;
+ int min_vbl, max_vbl;
+ int scanline_start;
+#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
+ struct {
+ u64 min;
+ u64 max;
+ u64 sum;
+ unsigned int over;
+ unsigned int times[17]; /* [1us, 16ms] */
+ } vbl;
+#endif
+ } debug;
+
+ /* scalers available on this crtc */
+ int num_scalers;
+
+ /* for loading single buffered registers during vblank */
+ struct pm_qos_request vblank_pm_qos;
+
+#ifdef CONFIG_DEBUG_FS
+ struct intel_pipe_crc pipe_crc;
+#endif
+};
+
+struct intel_plane {
+ struct drm_plane base;
+ enum i9xx_plane_id i9xx_plane;
+ enum plane_id id;
+ enum pipe pipe;
+ bool need_async_flip_disable_wa;
+ u32 frontbuffer_bit;
+
+ struct {
+ u32 base, cntl, size;
+ } cursor;
+
+ struct intel_fbc *fbc;
+
+ /*
+ * NOTE: Do not place new plane state fields here (e.g., when adding
+ * new plane properties). New runtime state should now be placed in
+ * the intel_plane_state structure and accessed via plane_state.
+ */
+
+ int (*min_width)(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation);
+ int (*max_width)(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation);
+ int (*max_height)(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation);
+ unsigned int (*max_stride)(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation);
+ /* Write all non-self arming plane registers */
+ void (*update_noarm)(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+ /* Write all self-arming plane registers */
+ void (*update_arm)(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+ /* Disable the plane, must arm */
+ void (*disable_arm)(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+ bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
+ int (*check_plane)(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
+ int (*min_cdclk)(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+ void (*async_flip)(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool async_flip);
+ void (*enable_flip_done)(struct intel_plane *plane);
+ void (*disable_flip_done)(struct intel_plane *plane);
+};
+
+struct intel_watermark_params {
+ u16 fifo_size;
+ u16 max_wm;
+ u8 default_wm;
+ u8 guard_size;
+ u8 cacheline_size;
+};
+
+struct cxsr_latency {
+ bool is_desktop : 1;
+ bool is_ddr3 : 1;
+ u16 fsb_freq;
+ u16 mem_freq;
+ u16 display_sr;
+ u16 display_hpll_disable;
+ u16 cursor_sr;
+ u16 cursor_hpll_disable;
+};
+
+#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
+#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi)
+#define to_intel_connector(x) container_of(x, struct intel_connector, base)
+#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define to_intel_plane(x) container_of(x, struct intel_plane, base)
+#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, uapi)
+#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
+
+struct intel_hdmi {
+ i915_reg_t hdmi_reg;
+ int ddc_bus;
+ struct {
+ enum drm_dp_dual_mode_type type;
+ int max_tmds_clock;
+ } dp_dual_mode;
+ bool has_hdmi_sink;
+ bool has_audio;
+ struct intel_connector *attached_connector;
+ struct cec_notifier *cec_notifier;
+};
+
+struct intel_dp_mst_encoder;
+
+struct intel_dp_compliance_data {
+ unsigned long edid;
+ u8 video_pattern;
+ u16 hdisplay, vdisplay;
+ u8 bpc;
+ struct drm_dp_phy_test_params phytest;
+};
+
+struct intel_dp_compliance {
+ unsigned long test_type;
+ struct intel_dp_compliance_data test_data;
+ bool test_active;
+ int test_link_rate;
+ u8 test_lane_count;
+};
+
+struct intel_dp_pcon_frl {
+ bool is_trained;
+ int trained_rate_gbps;
+};
+
+struct intel_pps {
+ int panel_power_up_delay;
+ int panel_power_down_delay;
+ int panel_power_cycle_delay;
+ int backlight_on_delay;
+ int backlight_off_delay;
+ struct delayed_work panel_vdd_work;
+ bool want_panel_vdd;
+ bool initializing;
+ unsigned long last_power_on;
+ unsigned long last_backlight_off;
+ ktime_t panel_power_off_time;
+ intel_wakeref_t vdd_wakeref;
+
+ /*
+ * Pipe whose power sequencer is currently locked into
+ * this port. Only relevant on VLV/CHV.
+ */
+ enum pipe pps_pipe;
+ /*
+ * Pipe currently driving the port. Used for preventing
+ * the use of the PPS for any pipe currentrly driving
+ * external DP as that will mess things up on VLV.
+ */
+ enum pipe active_pipe;
+ /*
+ * Set if the sequencer may be reset due to a power transition,
+ * requiring a reinitialization. Only relevant on BXT.
+ */
+ bool pps_reset;
+ struct edp_power_seq pps_delays;
+ struct edp_power_seq bios_pps_delays;
+};
+
+struct intel_psr {
+ /* Mutex for PSR state of the transcoder */
+ struct mutex lock;
+
+#define I915_PSR_DEBUG_MODE_MASK 0x0f
+#define I915_PSR_DEBUG_DEFAULT 0x00
+#define I915_PSR_DEBUG_DISABLE 0x01
+#define I915_PSR_DEBUG_ENABLE 0x02
+#define I915_PSR_DEBUG_FORCE_PSR1 0x03
+#define I915_PSR_DEBUG_ENABLE_SEL_FETCH 0x4
+#define I915_PSR_DEBUG_IRQ 0x10
+
+ u32 debug;
+ bool sink_support;
+ bool source_support;
+ bool enabled;
+ bool paused;
+ enum pipe pipe;
+ enum transcoder transcoder;
+ bool active;
+ struct work_struct work;
+ unsigned int busy_frontbuffer_bits;
+ bool sink_psr2_support;
+ bool link_standby;
+ bool colorimetry_support;
+ bool psr2_enabled;
+ bool psr2_sel_fetch_enabled;
+ bool psr2_sel_fetch_cff_enabled;
+ bool req_psr2_sdp_prior_scanline;
+ u8 sink_sync_latency;
+ u8 io_wake_lines;
+ u8 fast_wake_lines;
+ ktime_t last_entry_attempt;
+ ktime_t last_exit;
+ bool sink_not_reliable;
+ bool irq_aux_error;
+ u16 su_w_granularity;
+ u16 su_y_granularity;
+ u32 dc3co_exitline;
+ u32 dc3co_exit_delay;
+ struct delayed_work dc3co_work;
+};
+
+struct intel_dp {
+ i915_reg_t output_reg;
+ u32 DP;
+ int link_rate;
+ u8 lane_count;
+ u8 sink_count;
+ bool link_trained;
+ bool has_hdmi_sink;
+ bool has_audio;
+ bool reset_link_params;
+ bool use_max_params;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
+ u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+ u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
+ u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
+ u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE];
+ u8 lttpr_phy_caps[DP_MAX_LTTPR_COUNT][DP_LTTPR_PHY_CAP_SIZE];
+ u8 fec_capable;
+ u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE];
+ /* source rates */
+ int num_source_rates;
+ const int *source_rates;
+ /* sink rates as reported by DP_MAX_LINK_RATE/DP_SUPPORTED_LINK_RATES */
+ int num_sink_rates;
+ int sink_rates[DP_MAX_SUPPORTED_RATES];
+ bool use_rate_select;
+ /* Max sink lane count as reported by DP_MAX_LANE_COUNT */
+ int max_sink_lane_count;
+ /* intersection of source and sink rates */
+ int num_common_rates;
+ int common_rates[DP_MAX_SUPPORTED_RATES];
+ /* Max lane count for the current link */
+ int max_link_lane_count;
+ /* Max rate for the current link */
+ int max_link_rate;
+ int mso_link_count;
+ int mso_pixel_overlap;
+ /* sink or branch descriptor */
+ struct drm_dp_desc desc;
+ struct drm_dp_aux aux;
+ u32 aux_busy_last_status;
+ u8 train_set[4];
+
+ struct intel_pps pps;
+
+ bool is_mst;
+ int active_mst_links;
+
+ /* connector directly attached - won't be use for modeset in mst world */
+ struct intel_connector *attached_connector;
+
+ /* mst connector list */
+ struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
+ struct drm_dp_mst_topology_mgr mst_mgr;
+
+ u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index);
+ /*
+ * This function returns the value we have to program the AUX_CTL
+ * register with to kick off an AUX transaction.
+ */
+ u32 (*get_aux_send_ctl)(struct intel_dp *dp, int send_bytes,
+ u32 aux_clock_divider);
+
+ i915_reg_t (*aux_ch_ctl_reg)(struct intel_dp *dp);
+ i915_reg_t (*aux_ch_data_reg)(struct intel_dp *dp, int index);
+
+ /* This is called before a link training is starterd */
+ void (*prepare_link_retrain)(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+ void (*set_link_train)(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ u8 dp_train_pat);
+ void (*set_idle_link_train)(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+
+ u8 (*preemph_max)(struct intel_dp *intel_dp);
+ u8 (*voltage_max)(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+
+ /* Displayport compliance testing */
+ struct intel_dp_compliance compliance;
+
+ /* Downstream facing port caps */
+ struct {
+ int min_tmds_clock, max_tmds_clock;
+ int max_dotclock;
+ int pcon_max_frl_bw;
+ u8 max_bpc;
+ bool ycbcr_444_to_420;
+ bool rgb_to_ycbcr;
+ } dfp;
+
+ /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+ struct pm_qos_request pm_qos;
+
+ /* Display stream compression testing */
+ bool force_dsc_en;
+ int force_dsc_bpc;
+
+ bool hobl_failed;
+ bool hobl_active;
+
+ struct intel_dp_pcon_frl frl;
+
+ struct intel_psr psr;
+
+ /* When we last wrote the OUI for eDP */
+ unsigned long last_oui_write;
+};
+
+enum lspcon_vendor {
+ LSPCON_VENDOR_MCA,
+ LSPCON_VENDOR_PARADE
+};
+
+struct intel_lspcon {
+ bool active;
+ bool hdr_supported;
+ enum drm_lspcon_mode mode;
+ enum lspcon_vendor vendor;
+};
+
+struct intel_digital_port {
+ struct intel_encoder base;
+ u32 saved_port_bits;
+ struct intel_dp dp;
+ struct intel_hdmi hdmi;
+ struct intel_lspcon lspcon;
+ enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
+ bool release_cl2_override;
+ u8 max_lanes;
+ /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
+ enum aux_ch aux_ch;
+ enum intel_display_power_domain ddi_io_power_domain;
+ intel_wakeref_t ddi_io_wakeref;
+ intel_wakeref_t aux_wakeref;
+
+ struct mutex tc_lock; /* protects the TypeC port mode */
+ intel_wakeref_t tc_lock_wakeref;
+ enum intel_display_power_domain tc_lock_power_domain;
+ struct delayed_work tc_disconnect_phy_work;
+ int tc_link_refcount;
+ bool tc_legacy_port:1;
+ char tc_port_name[8];
+ enum tc_port_mode tc_mode;
+ enum tc_port_mode tc_init_mode;
+ enum phy_fia tc_phy_fia;
+ u8 tc_phy_fia_idx;
+
+ /* protects num_hdcp_streams reference count, hdcp_port_data and hdcp_auth_status */
+ struct mutex hdcp_mutex;
+ /* the number of pipes using HDCP signalling out of this port */
+ unsigned int num_hdcp_streams;
+ /* port HDCP auth status */
+ bool hdcp_auth_status;
+ /* HDCP port data need to pass to security f/w */
+ struct hdcp_port_data hdcp_port_data;
+ /* Whether the MST topology supports HDCP Type 1 Content */
+ bool hdcp_mst_type1_capable;
+
+ void (*write_infoframe)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len);
+ void (*read_infoframe)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len);
+ void (*set_infoframes)(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+ u32 (*infoframes_enabled)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
+ bool (*connected)(struct intel_encoder *encoder);
+};
+
+struct intel_dp_mst_encoder {
+ struct intel_encoder base;
+ enum pipe pipe;
+ struct intel_digital_port *primary;
+ struct intel_connector *connector;
+};
+
+static inline enum dpio_channel
+vlv_dig_port_to_channel(struct intel_digital_port *dig_port)
+{
+ switch (dig_port->base.port) {
+ default:
+ MISSING_CASE(dig_port->base.port);
+ fallthrough;
+ case PORT_B:
+ case PORT_D:
+ return DPIO_CH0;
+ case PORT_C:
+ return DPIO_CH1;
+ }
+}
+
+static inline enum dpio_phy
+vlv_dig_port_to_phy(struct intel_digital_port *dig_port)
+{
+ switch (dig_port->base.port) {
+ default:
+ MISSING_CASE(dig_port->base.port);
+ fallthrough;
+ case PORT_B:
+ case PORT_C:
+ return DPIO_PHY0;
+ case PORT_D:
+ return DPIO_PHY1;
+ }
+}
+
+static inline enum dpio_channel
+vlv_pipe_to_channel(enum pipe pipe)
+{
+ switch (pipe) {
+ default:
+ MISSING_CASE(pipe);
+ fallthrough;
+ case PIPE_A:
+ case PIPE_C:
+ return DPIO_CH0;
+ case PIPE_B:
+ return DPIO_CH1;
+ }
+}
+
+struct intel_load_detect_pipe {
+ struct drm_atomic_state *restore_state;
+};
+
+static inline struct intel_encoder *
+intel_attached_encoder(struct intel_connector *connector)
+{
+ return connector->encoder;
+}
+
+static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
+{
+ switch (encoder->type) {
+ case INTEL_OUTPUT_DDI:
+ case INTEL_OUTPUT_DP:
+ case INTEL_OUTPUT_EDP:
+ case INTEL_OUTPUT_HDMI:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool intel_encoder_is_mst(struct intel_encoder *encoder)
+{
+ return encoder->type == INTEL_OUTPUT_DP_MST;
+}
+
+static inline struct intel_dp_mst_encoder *
+enc_to_mst(struct intel_encoder *encoder)
+{
+ return container_of(&encoder->base, struct intel_dp_mst_encoder,
+ base.base);
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct intel_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = encoder;
+
+ if (intel_encoder_is_dig_port(intel_encoder))
+ return container_of(&encoder->base, struct intel_digital_port,
+ base.base);
+ else if (intel_encoder_is_mst(intel_encoder))
+ return enc_to_mst(encoder)->primary;
+ else
+ return NULL;
+}
+
+static inline struct intel_digital_port *
+intel_attached_dig_port(struct intel_connector *connector)
+{
+ return enc_to_dig_port(intel_attached_encoder(connector));
+}
+
+static inline struct intel_hdmi *
+enc_to_intel_hdmi(struct intel_encoder *encoder)
+{
+ return &enc_to_dig_port(encoder)->hdmi;
+}
+
+static inline struct intel_hdmi *
+intel_attached_hdmi(struct intel_connector *connector)
+{
+ return enc_to_intel_hdmi(intel_attached_encoder(connector));
+}
+
+static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
+{
+ return &enc_to_dig_port(encoder)->dp;
+}
+
+static inline struct intel_dp *intel_attached_dp(struct intel_connector *connector)
+{
+ return enc_to_intel_dp(intel_attached_encoder(connector));
+}
+
+static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
+{
+ switch (encoder->type) {
+ case INTEL_OUTPUT_DP:
+ case INTEL_OUTPUT_EDP:
+ return true;
+ case INTEL_OUTPUT_DDI:
+ /* Skip pure HDMI/DVI DDI encoders */
+ return i915_mmio_reg_valid(enc_to_intel_dp(encoder)->output_reg);
+ default:
+ return false;
+ }
+}
+
+static inline struct intel_lspcon *
+enc_to_intel_lspcon(struct intel_encoder *encoder)
+{
+ return &enc_to_dig_port(encoder)->lspcon;
+}
+
+static inline struct intel_digital_port *
+dp_to_dig_port(struct intel_dp *intel_dp)
+{
+ return container_of(intel_dp, struct intel_digital_port, dp);
+}
+
+static inline struct intel_lspcon *
+dp_to_lspcon(struct intel_dp *intel_dp)
+{
+ return &dp_to_dig_port(intel_dp)->lspcon;
+}
+
+#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev)
+
+#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
+ (intel_dp)->psr.source_support)
+
+static inline bool intel_encoder_can_psr(struct intel_encoder *encoder)
+{
+ if (!intel_encoder_is_dp(encoder))
+ return false;
+
+ return CAN_PSR(enc_to_intel_dp(encoder));
+}
+
+static inline struct intel_digital_port *
+hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
+{
+ return container_of(intel_hdmi, struct intel_digital_port, hdmi);
+}
+
+static inline struct intel_plane_state *
+intel_atomic_get_plane_state(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ struct drm_plane_state *ret =
+ drm_atomic_get_plane_state(&state->base, &plane->base);
+
+ if (IS_ERR(ret))
+ return ERR_CAST(ret);
+
+ return to_intel_plane_state(ret);
+}
+
+static inline struct intel_plane_state *
+intel_atomic_get_old_plane_state(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ return to_intel_plane_state(drm_atomic_get_old_plane_state(&state->base,
+ &plane->base));
+}
+
+static inline struct intel_plane_state *
+intel_atomic_get_new_plane_state(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ return to_intel_plane_state(drm_atomic_get_new_plane_state(&state->base,
+ &plane->base));
+}
+
+static inline struct intel_crtc_state *
+intel_atomic_get_old_crtc_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ return to_intel_crtc_state(drm_atomic_get_old_crtc_state(&state->base,
+ &crtc->base));
+}
+
+static inline struct intel_crtc_state *
+intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ return to_intel_crtc_state(drm_atomic_get_new_crtc_state(&state->base,
+ &crtc->base));
+}
+
+static inline struct intel_digital_connector_state *
+intel_atomic_get_new_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ return to_intel_digital_connector_state(
+ drm_atomic_get_new_connector_state(&state->base,
+ &connector->base));
+}
+
+static inline struct intel_digital_connector_state *
+intel_atomic_get_old_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ return to_intel_digital_connector_state(
+ drm_atomic_get_old_connector_state(&state->base,
+ &connector->base));
+}
+
+/* intel_display.c */
+static inline bool
+intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
+ enum intel_output_type type)
+{
+ return crtc_state->output_types & (1 << type);
+}
+static inline bool
+intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->output_types &
+ ((1 << INTEL_OUTPUT_DP) |
+ (1 << INTEL_OUTPUT_DP_MST) |
+ (1 << INTEL_OUTPUT_EDP));
+}
+
+static inline bool
+intel_crtc_needs_modeset(const struct intel_crtc_state *crtc_state)
+{
+ return drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
+}
+
+static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state)
+{
+ return i915_ggtt_offset(plane_state->ggtt_vma);
+}
+
+static inline struct intel_frontbuffer *
+to_intel_frontbuffer(struct drm_framebuffer *fb)
+{
+ return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
+}
+
+#endif /* __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
new file mode 100644
index 000000000..710b030c7
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+
+#include "intel_de.h"
+#include "intel_display.h"
+#include "intel_dkl_phy.h"
+
+static void
+dkl_phy_set_hip_idx(struct drm_i915_private *i915, i915_reg_t reg, int idx)
+{
+ enum tc_port tc_port = DKL_REG_TC_PORT(reg);
+
+ drm_WARN_ON(&i915->drm, tc_port < TC_PORT_1 || tc_port >= I915_MAX_TC_PORTS);
+
+ intel_de_write(i915,
+ HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, idx));
+}
+
+/**
+ * intel_dkl_phy_read - read a Dekel PHY register
+ * @i915: i915 device instance
+ * @reg: Dekel PHY register
+ * @ln: lane instance of @reg
+ *
+ * Read the @reg Dekel PHY register.
+ *
+ * Returns the read value.
+ */
+u32
+intel_dkl_phy_read(struct drm_i915_private *i915, i915_reg_t reg, int ln)
+{
+ u32 val;
+
+ spin_lock(&i915->display.dkl.phy_lock);
+
+ dkl_phy_set_hip_idx(i915, reg, ln);
+ val = intel_de_read(i915, reg);
+
+ spin_unlock(&i915->display.dkl.phy_lock);
+
+ return val;
+}
+
+/**
+ * intel_dkl_phy_write - write a Dekel PHY register
+ * @i915: i915 device instance
+ * @reg: Dekel PHY register
+ * @ln: lane instance of @reg
+ * @val: value to write
+ *
+ * Write @val to the @reg Dekel PHY register.
+ */
+void
+intel_dkl_phy_write(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 val)
+{
+ spin_lock(&i915->display.dkl.phy_lock);
+
+ dkl_phy_set_hip_idx(i915, reg, ln);
+ intel_de_write(i915, reg, val);
+
+ spin_unlock(&i915->display.dkl.phy_lock);
+}
+
+/**
+ * intel_dkl_phy_rmw - read-modify-write a Dekel PHY register
+ * @i915: i915 device instance
+ * @reg: Dekel PHY register
+ * @ln: lane instance of @reg
+ * @clear: mask to clear
+ * @set: mask to set
+ *
+ * Read the @reg Dekel PHY register, clearing then setting the @clear/@set bits in it, and writing
+ * this value back to the register if the value differs from the read one.
+ */
+void
+intel_dkl_phy_rmw(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 clear, u32 set)
+{
+ spin_lock(&i915->display.dkl.phy_lock);
+
+ dkl_phy_set_hip_idx(i915, reg, ln);
+ intel_de_rmw(i915, reg, clear, set);
+
+ spin_unlock(&i915->display.dkl.phy_lock);
+}
+
+/**
+ * intel_dkl_phy_posting_read - do a posting read from a Dekel PHY register
+ * @i915: i915 device instance
+ * @reg: Dekel PHY register
+ * @ln: lane instance of @reg
+ *
+ * Read the @reg Dekel PHY register without returning the read value.
+ */
+void
+intel_dkl_phy_posting_read(struct drm_i915_private *i915, i915_reg_t reg, int ln)
+{
+ spin_lock(&i915->display.dkl.phy_lock);
+
+ dkl_phy_set_hip_idx(i915, reg, ln);
+ intel_de_posting_read(i915, reg);
+
+ spin_unlock(&i915->display.dkl.phy_lock);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.h b/drivers/gpu/drm/i915/display/intel_dkl_phy.h
new file mode 100644
index 000000000..260ad121a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DKL_PHY_H__
+#define __INTEL_DKL_PHY_H__
+
+#include <linux/types.h>
+
+#include "i915_reg_defs.h"
+
+struct drm_i915_private;
+
+u32
+intel_dkl_phy_read(struct drm_i915_private *i915, i915_reg_t reg, int ln);
+void
+intel_dkl_phy_write(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 val);
+void
+intel_dkl_phy_rmw(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 clear, u32 set);
+void
+intel_dkl_phy_posting_read(struct drm_i915_private *i915, i915_reg_t reg, int ln);
+
+#endif /* __INTEL_DKL_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
new file mode 100644
index 000000000..e52ecc073
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -0,0 +1,1132 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_de.h"
+#include "intel_dmc.h"
+#include "intel_dmc_regs.h"
+
+/**
+ * DOC: DMC Firmware Support
+ *
+ * From gen9 onwards we have newly added DMC (Display microcontroller) in display
+ * engine to save and restore the state of display engine when it enter into
+ * low-power state and comes back to normal.
+ */
+
+#define DMC_VERSION(major, minor) ((major) << 16 | (minor))
+#define DMC_VERSION_MAJOR(version) ((version) >> 16)
+#define DMC_VERSION_MINOR(version) ((version) & 0xffff)
+
+#define DMC_PATH(platform, major, minor) \
+ "i915/" \
+ __stringify(platform) "_dmc_ver" \
+ __stringify(major) "_" \
+ __stringify(minor) ".bin"
+
+#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
+
+#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+
+#define DG2_DMC_PATH DMC_PATH(dg2, 2, 07)
+#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 07)
+MODULE_FIRMWARE(DG2_DMC_PATH);
+
+#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16)
+#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 16)
+MODULE_FIRMWARE(ADLP_DMC_PATH);
+
+#define ADLS_DMC_PATH DMC_PATH(adls, 2, 01)
+#define ADLS_DMC_VERSION_REQUIRED DMC_VERSION(2, 1)
+MODULE_FIRMWARE(ADLS_DMC_PATH);
+
+#define DG1_DMC_PATH DMC_PATH(dg1, 2, 02)
+#define DG1_DMC_VERSION_REQUIRED DMC_VERSION(2, 2)
+MODULE_FIRMWARE(DG1_DMC_PATH);
+
+#define RKL_DMC_PATH DMC_PATH(rkl, 2, 03)
+#define RKL_DMC_VERSION_REQUIRED DMC_VERSION(2, 3)
+MODULE_FIRMWARE(RKL_DMC_PATH);
+
+#define TGL_DMC_PATH DMC_PATH(tgl, 2, 12)
+#define TGL_DMC_VERSION_REQUIRED DMC_VERSION(2, 12)
+MODULE_FIRMWARE(TGL_DMC_PATH);
+
+#define ICL_DMC_PATH DMC_PATH(icl, 1, 09)
+#define ICL_DMC_VERSION_REQUIRED DMC_VERSION(1, 9)
+#define ICL_DMC_MAX_FW_SIZE 0x6000
+MODULE_FIRMWARE(ICL_DMC_PATH);
+
+#define GLK_DMC_PATH DMC_PATH(glk, 1, 04)
+#define GLK_DMC_VERSION_REQUIRED DMC_VERSION(1, 4)
+#define GLK_DMC_MAX_FW_SIZE 0x4000
+MODULE_FIRMWARE(GLK_DMC_PATH);
+
+#define KBL_DMC_PATH DMC_PATH(kbl, 1, 04)
+#define KBL_DMC_VERSION_REQUIRED DMC_VERSION(1, 4)
+#define KBL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
+MODULE_FIRMWARE(KBL_DMC_PATH);
+
+#define SKL_DMC_PATH DMC_PATH(skl, 1, 27)
+#define SKL_DMC_VERSION_REQUIRED DMC_VERSION(1, 27)
+#define SKL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
+MODULE_FIRMWARE(SKL_DMC_PATH);
+
+#define BXT_DMC_PATH DMC_PATH(bxt, 1, 07)
+#define BXT_DMC_VERSION_REQUIRED DMC_VERSION(1, 7)
+#define BXT_DMC_MAX_FW_SIZE 0x3000
+MODULE_FIRMWARE(BXT_DMC_PATH);
+
+#define DMC_DEFAULT_FW_OFFSET 0xFFFFFFFF
+#define PACKAGE_MAX_FW_INFO_ENTRIES 20
+#define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32
+#define DMC_V1_MAX_MMIO_COUNT 8
+#define DMC_V3_MAX_MMIO_COUNT 20
+#define DMC_V1_MMIO_START_RANGE 0x80000
+
+struct intel_css_header {
+ /* 0x09 for DMC */
+ u32 module_type;
+
+ /* Includes the DMC specific header in dwords */
+ u32 header_len;
+
+ /* always value would be 0x10000 */
+ u32 header_ver;
+
+ /* Not used */
+ u32 module_id;
+
+ /* Not used */
+ u32 module_vendor;
+
+ /* in YYYYMMDD format */
+ u32 date;
+
+ /* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
+ u32 size;
+
+ /* Not used */
+ u32 key_size;
+
+ /* Not used */
+ u32 modulus_size;
+
+ /* Not used */
+ u32 exponent_size;
+
+ /* Not used */
+ u32 reserved1[12];
+
+ /* Major Minor */
+ u32 version;
+
+ /* Not used */
+ u32 reserved2[8];
+
+ /* Not used */
+ u32 kernel_header_info;
+} __packed;
+
+struct intel_fw_info {
+ u8 reserved1;
+
+ /* reserved on package_header version 1, must be 0 on version 2 */
+ u8 dmc_id;
+
+ /* Stepping (A, B, C, ..., *). * is a wildcard */
+ char stepping;
+
+ /* Sub-stepping (0, 1, ..., *). * is a wildcard */
+ char substepping;
+
+ u32 offset;
+ u32 reserved2;
+} __packed;
+
+struct intel_package_header {
+ /* DMC container header length in dwords */
+ u8 header_len;
+
+ /* 0x01, 0x02 */
+ u8 header_ver;
+
+ u8 reserved[10];
+
+ /* Number of valid entries in the FWInfo array below */
+ u32 num_entries;
+} __packed;
+
+struct intel_dmc_header_base {
+ /* always value would be 0x40403E3E */
+ u32 signature;
+
+ /* DMC binary header length */
+ u8 header_len;
+
+ /* 0x01 */
+ u8 header_ver;
+
+ /* Reserved */
+ u16 dmcc_ver;
+
+ /* Major, Minor */
+ u32 project;
+
+ /* Firmware program size (excluding header) in dwords */
+ u32 fw_size;
+
+ /* Major Minor version */
+ u32 fw_version;
+} __packed;
+
+struct intel_dmc_header_v1 {
+ struct intel_dmc_header_base base;
+
+ /* Number of valid MMIO cycles present. */
+ u32 mmio_count;
+
+ /* MMIO address */
+ u32 mmioaddr[DMC_V1_MAX_MMIO_COUNT];
+
+ /* MMIO data */
+ u32 mmiodata[DMC_V1_MAX_MMIO_COUNT];
+
+ /* FW filename */
+ char dfile[32];
+
+ u32 reserved1[2];
+} __packed;
+
+struct intel_dmc_header_v3 {
+ struct intel_dmc_header_base base;
+
+ /* DMC RAM start MMIO address */
+ u32 start_mmioaddr;
+
+ u32 reserved[9];
+
+ /* FW filename */
+ char dfile[32];
+
+ /* Number of valid MMIO cycles present. */
+ u32 mmio_count;
+
+ /* MMIO address */
+ u32 mmioaddr[DMC_V3_MAX_MMIO_COUNT];
+
+ /* MMIO data */
+ u32 mmiodata[DMC_V3_MAX_MMIO_COUNT];
+} __packed;
+
+struct stepping_info {
+ char stepping;
+ char substepping;
+};
+
+static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id)
+{
+ return i915->display.dmc.dmc_info[dmc_id].payload;
+}
+
+bool intel_dmc_has_payload(struct drm_i915_private *i915)
+{
+ return has_dmc_id_fw(i915, DMC_FW_MAIN);
+}
+
+static const struct stepping_info *
+intel_get_stepping_info(struct drm_i915_private *i915,
+ struct stepping_info *si)
+{
+ const char *step_name = intel_step_name(RUNTIME_INFO(i915)->step.display_step);
+
+ si->stepping = step_name[0];
+ si->substepping = step_name[1];
+ return si;
+}
+
+static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
+{
+ /* The below bit doesn't need to be cleared ever afterwards */
+ intel_de_rmw(dev_priv, DC_STATE_DEBUG, 0,
+ DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP);
+ intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
+}
+
+static void disable_event_handler(struct drm_i915_private *i915,
+ i915_reg_t ctl_reg, i915_reg_t htp_reg)
+{
+ intel_de_write(i915, ctl_reg,
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_FALSE));
+ intel_de_write(i915, htp_reg, 0);
+}
+
+static void
+disable_flip_queue_event(struct drm_i915_private *i915,
+ i915_reg_t ctl_reg, i915_reg_t htp_reg)
+{
+ u32 event_ctl;
+ u32 event_htp;
+
+ event_ctl = intel_de_read(i915, ctl_reg);
+ event_htp = intel_de_read(i915, htp_reg);
+ if (event_ctl != (DMC_EVT_CTL_ENABLE |
+ DMC_EVT_CTL_RECURRING |
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_CLK_MSEC)) ||
+ !event_htp) {
+ drm_dbg_kms(&i915->drm,
+ "Unexpected DMC event configuration (control %08x htp %08x)\n",
+ event_ctl, event_htp);
+ return;
+ }
+
+ disable_event_handler(i915, ctl_reg, htp_reg);
+}
+
+static bool
+get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id,
+ i915_reg_t *ctl_reg, i915_reg_t *htp_reg)
+{
+ switch (dmc_id) {
+ case DMC_FW_MAIN:
+ if (DISPLAY_VER(i915) == 12) {
+ *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3);
+ *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3);
+
+ return true;
+ }
+ break;
+ case DMC_FW_PIPEA ... DMC_FW_PIPED:
+ if (IS_DG2(i915)) {
+ *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2);
+ *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2);
+
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+static void
+disable_all_flip_queue_events(struct drm_i915_private *i915)
+{
+ int dmc_id;
+
+ /* TODO: check if the following applies to all D13+ platforms. */
+ if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
+ return;
+
+ for (dmc_id = 0; dmc_id < DMC_FW_MAX; dmc_id++) {
+ i915_reg_t ctl_reg;
+ i915_reg_t htp_reg;
+
+ if (!has_dmc_id_fw(i915, dmc_id))
+ continue;
+
+ if (!get_flip_queue_event_regs(i915, dmc_id, &ctl_reg, &htp_reg))
+ continue;
+
+ disable_flip_queue_event(i915, ctl_reg, htp_reg);
+ }
+}
+
+static void disable_all_event_handlers(struct drm_i915_private *i915)
+{
+ int id;
+
+ /* TODO: disable the event handlers on pre-GEN12 platforms as well */
+ if (DISPLAY_VER(i915) < 12)
+ return;
+
+ for (id = DMC_FW_MAIN; id < DMC_FW_MAX; id++) {
+ int handler;
+
+ if (!has_dmc_id_fw(i915, id))
+ continue;
+
+ for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
+ disable_event_handler(i915,
+ DMC_EVT_CTL(i915, id, handler),
+ DMC_EVT_HTP(i915, id, handler));
+ }
+}
+
+static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+{
+ enum pipe pipe;
+
+ if (DISPLAY_VER(i915) != 13)
+ return;
+
+ /*
+ * Wa_16015201720:adl-p,dg2
+ * The WA requires clock gating to be disabled all the time
+ * for pipe A and B.
+ * For pipe C and D clock gating needs to be disabled only
+ * during initializing the firmware.
+ */
+ if (enable)
+ for (pipe = PIPE_A; pipe <= PIPE_D; pipe++)
+ intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
+ 0, PIPEDMC_GATING_DIS);
+ else
+ for (pipe = PIPE_C; pipe <= PIPE_D; pipe++)
+ intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
+ PIPEDMC_GATING_DIS, 0);
+}
+
+/**
+ * intel_dmc_load_program() - write the firmware from memory to register.
+ * @dev_priv: i915 drm device.
+ *
+ * DMC firmware is read from a .bin file and kept in internal memory one time.
+ * Everytime display comes back from low power state this function is called to
+ * copy the firmware from internal memory to registers.
+ */
+void intel_dmc_load_program(struct drm_i915_private *dev_priv)
+{
+ struct intel_dmc *dmc = &dev_priv->display.dmc;
+ u32 id, i;
+
+ if (!intel_dmc_has_payload(dev_priv))
+ return;
+
+ pipedmc_clock_gating_wa(dev_priv, true);
+
+ disable_all_event_handlers(dev_priv);
+
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+
+ preempt_disable();
+
+ for (id = 0; id < DMC_FW_MAX; id++) {
+ for (i = 0; i < dmc->dmc_info[id].dmc_fw_size; i++) {
+ intel_uncore_write_fw(&dev_priv->uncore,
+ DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i),
+ dmc->dmc_info[id].payload[i]);
+ }
+ }
+
+ preempt_enable();
+
+ for (id = 0; id < DMC_FW_MAX; id++) {
+ for (i = 0; i < dmc->dmc_info[id].mmio_count; i++) {
+ intel_de_write(dev_priv, dmc->dmc_info[id].mmioaddr[i],
+ dmc->dmc_info[id].mmiodata[i]);
+ }
+ }
+
+ dev_priv->display.dmc.dc_state = 0;
+
+ gen9_set_dc_state_debugmask(dev_priv);
+
+ /*
+ * Flip queue events need to be disabled before enabling DC5/6.
+ * i915 doesn't use the flip queue feature, so disable it already
+ * here.
+ */
+ disable_all_flip_queue_events(dev_priv);
+
+ pipedmc_clock_gating_wa(dev_priv, false);
+}
+
+/**
+ * intel_dmc_disable_program() - disable the firmware
+ * @i915: i915 drm device
+ *
+ * Disable all event handlers in the firmware, making sure the firmware is
+ * inactive after the display is uninitialized.
+ */
+void intel_dmc_disable_program(struct drm_i915_private *i915)
+{
+ if (!intel_dmc_has_payload(i915))
+ return;
+
+ pipedmc_clock_gating_wa(i915, true);
+ disable_all_event_handlers(i915);
+ pipedmc_clock_gating_wa(i915, false);
+}
+
+void assert_dmc_loaded(struct drm_i915_private *i915)
+{
+ drm_WARN_ONCE(&i915->drm,
+ !intel_de_read(i915, DMC_PROGRAM(i915->display.dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
+ "DMC program storage start is NULL\n");
+ drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE),
+ "DMC SSP Base Not fine\n");
+ drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_HTP_SKL),
+ "DMC HTP Not fine\n");
+}
+
+static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info,
+ const struct stepping_info *si)
+{
+ if ((fw_info->substepping == '*' && si->stepping == fw_info->stepping) ||
+ (si->stepping == fw_info->stepping && si->substepping == fw_info->substepping) ||
+ /*
+ * If we don't find a more specific one from above two checks, we
+ * then check for the generic one to be sure to work even with
+ * "broken firmware"
+ */
+ (si->stepping == '*' && si->substepping == fw_info->substepping) ||
+ (fw_info->stepping == '*' && fw_info->substepping == '*'))
+ return true;
+
+ return false;
+}
+
+/*
+ * Search fw_info table for dmc_offset to find firmware binary: num_entries is
+ * already sanitized.
+ */
+static void dmc_set_fw_offset(struct intel_dmc *dmc,
+ const struct intel_fw_info *fw_info,
+ unsigned int num_entries,
+ const struct stepping_info *si,
+ u8 package_ver)
+{
+ unsigned int i, id;
+
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+
+ for (i = 0; i < num_entries; i++) {
+ id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
+
+ if (id >= DMC_FW_MAX) {
+ drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", id);
+ continue;
+ }
+
+ /* More specific versions come first, so we don't even have to
+ * check for the stepping since we already found a previous FW
+ * for this id.
+ */
+ if (dmc->dmc_info[id].present)
+ continue;
+
+ if (fw_info_matches_stepping(&fw_info[i], si)) {
+ dmc->dmc_info[id].present = true;
+ dmc->dmc_info[id].dmc_offset = fw_info[i].offset;
+ }
+ }
+}
+
+static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
+ const u32 *mmioaddr, u32 mmio_count,
+ int header_ver, u8 dmc_id)
+{
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ u32 start_range, end_range;
+ int i;
+
+ if (dmc_id >= DMC_FW_MAX) {
+ drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
+ return false;
+ }
+
+ if (header_ver == 1) {
+ start_range = DMC_MMIO_START_RANGE;
+ end_range = DMC_MMIO_END_RANGE;
+ } else if (dmc_id == DMC_FW_MAIN) {
+ start_range = TGL_MAIN_MMIO_START;
+ end_range = TGL_MAIN_MMIO_END;
+ } else if (DISPLAY_VER(i915) >= 13) {
+ start_range = ADLP_PIPE_MMIO_START;
+ end_range = ADLP_PIPE_MMIO_END;
+ } else if (DISPLAY_VER(i915) >= 12) {
+ start_range = TGL_PIPE_MMIO_START(dmc_id);
+ end_range = TGL_PIPE_MMIO_END(dmc_id);
+ } else {
+ drm_warn(&i915->drm, "Unknown mmio range for sanity check");
+ return false;
+ }
+
+ for (i = 0; i < mmio_count; i++) {
+ if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
+ return false;
+ }
+
+ return true;
+}
+
+static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
+ const struct intel_dmc_header_base *dmc_header,
+ size_t rem_size, u8 dmc_id)
+{
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
+ unsigned int header_len_bytes, dmc_header_size, payload_size, i;
+ const u32 *mmioaddr, *mmiodata;
+ u32 mmio_count, mmio_count_max, start_mmioaddr;
+ u8 *payload;
+
+ BUILD_BUG_ON(ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
+ ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
+
+ /*
+ * Check if we can access common fields, we will checkc again below
+ * after we have read the version
+ */
+ if (rem_size < sizeof(struct intel_dmc_header_base))
+ goto error_truncated;
+
+ /* Cope with small differences between v1 and v3 */
+ if (dmc_header->header_ver == 3) {
+ const struct intel_dmc_header_v3 *v3 =
+ (const struct intel_dmc_header_v3 *)dmc_header;
+
+ if (rem_size < sizeof(struct intel_dmc_header_v3))
+ goto error_truncated;
+
+ mmioaddr = v3->mmioaddr;
+ mmiodata = v3->mmiodata;
+ mmio_count = v3->mmio_count;
+ mmio_count_max = DMC_V3_MAX_MMIO_COUNT;
+ /* header_len is in dwords */
+ header_len_bytes = dmc_header->header_len * 4;
+ start_mmioaddr = v3->start_mmioaddr;
+ dmc_header_size = sizeof(*v3);
+ } else if (dmc_header->header_ver == 1) {
+ const struct intel_dmc_header_v1 *v1 =
+ (const struct intel_dmc_header_v1 *)dmc_header;
+
+ if (rem_size < sizeof(struct intel_dmc_header_v1))
+ goto error_truncated;
+
+ mmioaddr = v1->mmioaddr;
+ mmiodata = v1->mmiodata;
+ mmio_count = v1->mmio_count;
+ mmio_count_max = DMC_V1_MAX_MMIO_COUNT;
+ header_len_bytes = dmc_header->header_len;
+ start_mmioaddr = DMC_V1_MMIO_START_RANGE;
+ dmc_header_size = sizeof(*v1);
+ } else {
+ drm_err(&i915->drm, "Unknown DMC fw header version: %u\n",
+ dmc_header->header_ver);
+ return 0;
+ }
+
+ if (header_len_bytes != dmc_header_size) {
+ drm_err(&i915->drm, "DMC firmware has wrong dmc header length "
+ "(%u bytes)\n", header_len_bytes);
+ return 0;
+ }
+
+ /* Cache the dmc header info. */
+ if (mmio_count > mmio_count_max) {
+ drm_err(&i915->drm, "DMC firmware has wrong mmio count %u\n", mmio_count);
+ return 0;
+ }
+
+ if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
+ dmc_header->header_ver, dmc_id)) {
+ drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
+ return 0;
+ }
+
+ for (i = 0; i < mmio_count; i++) {
+ dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
+ dmc_info->mmiodata[i] = mmiodata[i];
+ }
+ dmc_info->mmio_count = mmio_count;
+ dmc_info->start_mmioaddr = start_mmioaddr;
+
+ rem_size -= header_len_bytes;
+
+ /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
+ payload_size = dmc_header->fw_size * 4;
+ if (rem_size < payload_size)
+ goto error_truncated;
+
+ if (payload_size > dmc->max_fw_size) {
+ drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size);
+ return 0;
+ }
+ dmc_info->dmc_fw_size = dmc_header->fw_size;
+
+ dmc_info->payload = kmalloc(payload_size, GFP_KERNEL);
+ if (!dmc_info->payload)
+ return 0;
+
+ payload = (u8 *)(dmc_header) + header_len_bytes;
+ memcpy(dmc_info->payload, payload, payload_size);
+
+ return header_len_bytes + payload_size;
+
+error_truncated:
+ drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
+ return 0;
+}
+
+static u32
+parse_dmc_fw_package(struct intel_dmc *dmc,
+ const struct intel_package_header *package_header,
+ const struct stepping_info *si,
+ size_t rem_size)
+{
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+ u32 package_size = sizeof(struct intel_package_header);
+ u32 num_entries, max_entries;
+ const struct intel_fw_info *fw_info;
+
+ if (rem_size < package_size)
+ goto error_truncated;
+
+ if (package_header->header_ver == 1) {
+ max_entries = PACKAGE_MAX_FW_INFO_ENTRIES;
+ } else if (package_header->header_ver == 2) {
+ max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES;
+ } else {
+ drm_err(&i915->drm, "DMC firmware has unknown header version %u\n",
+ package_header->header_ver);
+ return 0;
+ }
+
+ /*
+ * We should always have space for max_entries,
+ * even if not all are used
+ */
+ package_size += max_entries * sizeof(struct intel_fw_info);
+ if (rem_size < package_size)
+ goto error_truncated;
+
+ if (package_header->header_len * 4 != package_size) {
+ drm_err(&i915->drm, "DMC firmware has wrong package header length "
+ "(%u bytes)\n", package_size);
+ return 0;
+ }
+
+ num_entries = package_header->num_entries;
+ if (WARN_ON(package_header->num_entries > max_entries))
+ num_entries = max_entries;
+
+ fw_info = (const struct intel_fw_info *)
+ ((u8 *)package_header + sizeof(*package_header));
+ dmc_set_fw_offset(dmc, fw_info, num_entries, si,
+ package_header->header_ver);
+
+ /* dmc_offset is in dwords */
+ return package_size;
+
+error_truncated:
+ drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
+ return 0;
+}
+
+/* Return number of bytes parsed or 0 on error */
+static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
+ struct intel_css_header *css_header,
+ size_t rem_size)
+{
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
+
+ if (rem_size < sizeof(struct intel_css_header)) {
+ drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
+ return 0;
+ }
+
+ if (sizeof(struct intel_css_header) !=
+ (css_header->header_len * 4)) {
+ drm_err(&i915->drm, "DMC firmware has wrong CSS header length "
+ "(%u bytes)\n",
+ (css_header->header_len * 4));
+ return 0;
+ }
+
+ if (dmc->required_version &&
+ css_header->version != dmc->required_version) {
+ drm_info(&i915->drm, "Refusing to load DMC firmware v%u.%u,"
+ " please use v%u.%u\n",
+ DMC_VERSION_MAJOR(css_header->version),
+ DMC_VERSION_MINOR(css_header->version),
+ DMC_VERSION_MAJOR(dmc->required_version),
+ DMC_VERSION_MINOR(dmc->required_version));
+ return 0;
+ }
+
+ dmc->version = css_header->version;
+
+ return sizeof(struct intel_css_header);
+}
+
+static void parse_dmc_fw(struct drm_i915_private *dev_priv,
+ const struct firmware *fw)
+{
+ struct intel_css_header *css_header;
+ struct intel_package_header *package_header;
+ struct intel_dmc_header_base *dmc_header;
+ struct intel_dmc *dmc = &dev_priv->display.dmc;
+ struct stepping_info display_info = { '*', '*'};
+ const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info);
+ u32 readcount = 0;
+ u32 r, offset;
+ int id;
+
+ if (!fw)
+ return;
+
+ /* Extract CSS Header information */
+ css_header = (struct intel_css_header *)fw->data;
+ r = parse_dmc_fw_css(dmc, css_header, fw->size);
+ if (!r)
+ return;
+
+ readcount += r;
+
+ /* Extract Package Header information */
+ package_header = (struct intel_package_header *)&fw->data[readcount];
+ r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount);
+ if (!r)
+ return;
+
+ readcount += r;
+
+ for (id = 0; id < DMC_FW_MAX; id++) {
+ if (!dev_priv->display.dmc.dmc_info[id].present)
+ continue;
+
+ offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
+ if (offset > fw->size) {
+ drm_err(&dev_priv->drm, "Reading beyond the fw_size\n");
+ continue;
+ }
+
+ dmc_header = (struct intel_dmc_header_base *)&fw->data[offset];
+ parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, id);
+ }
+}
+
+static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv)
+{
+ drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
+ dev_priv->display.dmc.wakeref =
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+}
+
+static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv)
+{
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&dev_priv->display.dmc.wakeref);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+}
+
+static void dmc_load_work_fn(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv;
+ struct intel_dmc *dmc;
+ const struct firmware *fw = NULL;
+
+ dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work);
+ dmc = &dev_priv->display.dmc;
+
+ request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev);
+ parse_dmc_fw(dev_priv, fw);
+
+ if (intel_dmc_has_payload(dev_priv)) {
+ intel_dmc_load_program(dev_priv);
+ intel_dmc_runtime_pm_put(dev_priv);
+
+ drm_info(&dev_priv->drm,
+ "Finished loading DMC firmware %s (v%u.%u)\n",
+ dev_priv->display.dmc.fw_path, DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
+ } else {
+ drm_notice(&dev_priv->drm,
+ "Failed to load DMC firmware %s."
+ " Disabling runtime power management.\n",
+ dmc->fw_path);
+ drm_notice(&dev_priv->drm, "DMC firmware homepage: %s",
+ INTEL_UC_FIRMWARE_URL);
+ }
+
+ release_firmware(fw);
+}
+
+/**
+ * intel_dmc_ucode_init() - initialize the firmware loading.
+ * @dev_priv: i915 drm device.
+ *
+ * This function is called at the time of loading the display driver to read
+ * firmware from a .bin file and copied into a internal memory.
+ */
+void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_dmc *dmc = &dev_priv->display.dmc;
+
+ INIT_WORK(&dev_priv->display.dmc.work, dmc_load_work_fn);
+
+ if (!HAS_DMC(dev_priv))
+ return;
+
+ /*
+ * Obtain a runtime pm reference, until DMC is loaded, to avoid entering
+ * runtime-suspend.
+ *
+ * On error, we return with the rpm wakeref held to prevent runtime
+ * suspend as runtime suspend *requires* a working DMC for whatever
+ * reason.
+ */
+ intel_dmc_runtime_pm_get(dev_priv);
+
+ if (IS_DG2(dev_priv)) {
+ dmc->fw_path = DG2_DMC_PATH;
+ dmc->required_version = DG2_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
+ } else if (IS_ALDERLAKE_P(dev_priv)) {
+ dmc->fw_path = ADLP_DMC_PATH;
+ dmc->required_version = ADLP_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
+ } else if (IS_ALDERLAKE_S(dev_priv)) {
+ dmc->fw_path = ADLS_DMC_PATH;
+ dmc->required_version = ADLS_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (IS_DG1(dev_priv)) {
+ dmc->fw_path = DG1_DMC_PATH;
+ dmc->required_version = DG1_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (IS_ROCKETLAKE(dev_priv)) {
+ dmc->fw_path = RKL_DMC_PATH;
+ dmc->required_version = RKL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (IS_TIGERLAKE(dev_priv)) {
+ dmc->fw_path = TGL_DMC_PATH;
+ dmc->required_version = TGL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
+ } else if (DISPLAY_VER(dev_priv) == 11) {
+ dmc->fw_path = ICL_DMC_PATH;
+ dmc->required_version = ICL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ dmc->fw_path = GLK_DMC_PATH;
+ dmc->required_version = GLK_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE;
+ } else if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv)) {
+ dmc->fw_path = KBL_DMC_PATH;
+ dmc->required_version = KBL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ dmc->fw_path = SKL_DMC_PATH;
+ dmc->required_version = SKL_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE;
+ } else if (IS_BROXTON(dev_priv)) {
+ dmc->fw_path = BXT_DMC_PATH;
+ dmc->required_version = BXT_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE;
+ }
+
+ if (dev_priv->params.dmc_firmware_path) {
+ if (strlen(dev_priv->params.dmc_firmware_path) == 0) {
+ dmc->fw_path = NULL;
+ drm_info(&dev_priv->drm,
+ "Disabling DMC firmware and runtime PM\n");
+ return;
+ }
+
+ dmc->fw_path = dev_priv->params.dmc_firmware_path;
+ /* Bypass version check for firmware override. */
+ dmc->required_version = 0;
+ }
+
+ if (!dmc->fw_path) {
+ drm_dbg_kms(&dev_priv->drm,
+ "No known DMC firmware for platform, disabling runtime PM\n");
+ return;
+ }
+
+ drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path);
+ schedule_work(&dev_priv->display.dmc.work);
+}
+
+/**
+ * intel_dmc_ucode_suspend() - prepare DMC firmware before system suspend
+ * @dev_priv: i915 drm device
+ *
+ * Prepare the DMC firmware before entering system suspend. This includes
+ * flushing pending work items and releasing any resources acquired during
+ * init.
+ */
+void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_DMC(dev_priv))
+ return;
+
+ flush_work(&dev_priv->display.dmc.work);
+
+ /* Drop the reference held in case DMC isn't loaded. */
+ if (!intel_dmc_has_payload(dev_priv))
+ intel_dmc_runtime_pm_put(dev_priv);
+}
+
+/**
+ * intel_dmc_ucode_resume() - init DMC firmware during system resume
+ * @dev_priv: i915 drm device
+ *
+ * Reinitialize the DMC firmware during system resume, reacquiring any
+ * resources released in intel_dmc_ucode_suspend().
+ */
+void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_DMC(dev_priv))
+ return;
+
+ /*
+ * Reacquire the reference to keep RPM disabled in case DMC isn't
+ * loaded.
+ */
+ if (!intel_dmc_has_payload(dev_priv))
+ intel_dmc_runtime_pm_get(dev_priv);
+}
+
+/**
+ * intel_dmc_ucode_fini() - unload the DMC firmware.
+ * @dev_priv: i915 drm device.
+ *
+ * Firmmware unloading includes freeing the internal memory and reset the
+ * firmware loading status.
+ */
+void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
+{
+ int id;
+
+ if (!HAS_DMC(dev_priv))
+ return;
+
+ intel_dmc_ucode_suspend(dev_priv);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
+
+ for (id = 0; id < DMC_FW_MAX; id++)
+ kfree(dev_priv->display.dmc.dmc_info[id].payload);
+}
+
+void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
+ struct drm_i915_private *i915)
+{
+ struct intel_dmc *dmc = &i915->display.dmc;
+
+ if (!HAS_DMC(i915))
+ return;
+
+ i915_error_printf(m, "DMC loaded: %s\n",
+ str_yes_no(intel_dmc_has_payload(i915)));
+ i915_error_printf(m, "DMC fw version: %d.%d\n",
+ DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
+}
+
+static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = m->private;
+ intel_wakeref_t wakeref;
+ struct intel_dmc *dmc;
+ i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
+
+ if (!HAS_DMC(i915))
+ return -ENODEV;
+
+ dmc = &i915->display.dmc;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ seq_printf(m, "fw loaded: %s\n",
+ str_yes_no(intel_dmc_has_payload(i915)));
+ seq_printf(m, "path: %s\n", dmc->fw_path);
+ seq_printf(m, "Pipe A fw support: %s\n",
+ str_yes_no(GRAPHICS_VER(i915) >= 12));
+ seq_printf(m, "Pipe A fw loaded: %s\n",
+ str_yes_no(dmc->dmc_info[DMC_FW_PIPEA].payload));
+ seq_printf(m, "Pipe B fw support: %s\n",
+ str_yes_no(IS_ALDERLAKE_P(i915)));
+ seq_printf(m, "Pipe B fw loaded: %s\n",
+ str_yes_no(dmc->dmc_info[DMC_FW_PIPEB].payload));
+
+ if (!intel_dmc_has_payload(i915))
+ goto out;
+
+ seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
+ DMC_VERSION_MINOR(dmc->version));
+
+ if (DISPLAY_VER(i915) >= 12) {
+ if (IS_DGFX(i915)) {
+ dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
+ } else {
+ dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
+ dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
+ }
+
+ /*
+ * NOTE: DMC_DEBUG3 is a general purpose reg.
+ * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
+ * reg for DC3CO debugging and validation,
+ * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
+ */
+ seq_printf(m, "DC3CO count: %d\n",
+ intel_de_read(i915, IS_DGFX(i915) ?
+ DG1_DMC_DEBUG3 : TGL_DMC_DEBUG3));
+ } else {
+ dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT :
+ SKL_DMC_DC3_DC5_COUNT;
+ if (!IS_GEMINILAKE(i915) && !IS_BROXTON(i915))
+ dc6_reg = SKL_DMC_DC5_DC6_COUNT;
+ }
+
+ seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(i915, dc5_reg));
+ if (i915_mmio_reg_valid(dc6_reg))
+ seq_printf(m, "DC5 -> DC6 count: %d\n",
+ intel_de_read(i915, dc6_reg));
+
+out:
+ seq_printf(m, "program base: 0x%08x\n",
+ intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
+ seq_printf(m, "ssp base: 0x%08x\n",
+ intel_de_read(i915, DMC_SSP_BASE));
+ seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL));
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status);
+
+void intel_dmc_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root,
+ i915, &intel_dmc_debugfs_status_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
new file mode 100644
index 000000000..67e03315e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DMC_H__
+#define __INTEL_DMC_H__
+
+#include "i915_reg_defs.h"
+#include "intel_wakeref.h"
+#include <linux/workqueue.h>
+
+struct drm_i915_error_state_buf;
+struct drm_i915_private;
+
+enum {
+ DMC_FW_MAIN = 0,
+ DMC_FW_PIPEA,
+ DMC_FW_PIPEB,
+ DMC_FW_PIPEC,
+ DMC_FW_PIPED,
+ DMC_FW_MAX
+};
+
+struct intel_dmc {
+ struct work_struct work;
+ const char *fw_path;
+ u32 required_version;
+ u32 max_fw_size; /* bytes */
+ u32 version;
+ struct dmc_fw_info {
+ u32 mmio_count;
+ i915_reg_t mmioaddr[20];
+ u32 mmiodata[20];
+ u32 dmc_offset;
+ u32 start_mmioaddr;
+ u32 dmc_fw_size; /*dwords */
+ u32 *payload;
+ bool present;
+ } dmc_info[DMC_FW_MAX];
+
+ u32 dc_state;
+ u32 target_dc_state;
+ u32 allowed_dc_mask;
+ intel_wakeref_t wakeref;
+};
+
+void intel_dmc_ucode_init(struct drm_i915_private *i915);
+void intel_dmc_load_program(struct drm_i915_private *i915);
+void intel_dmc_disable_program(struct drm_i915_private *i915);
+void intel_dmc_ucode_fini(struct drm_i915_private *i915);
+void intel_dmc_ucode_suspend(struct drm_i915_private *i915);
+void intel_dmc_ucode_resume(struct drm_i915_private *i915);
+bool intel_dmc_has_payload(struct drm_i915_private *i915);
+void intel_dmc_debugfs_register(struct drm_i915_private *i915);
+void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
+ struct drm_i915_private *i915);
+
+void assert_dmc_loaded(struct drm_i915_private *i915);
+
+#endif /* __INTEL_DMC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
new file mode 100644
index 000000000..5e5e41644
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DMC_REGS_H__
+#define __INTEL_DMC_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4)
+#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
+
+#define _ADLP_PIPEDMC_REG_MMIO_BASE_A 0x5f000
+#define _TGL_PIPEDMC_REG_MMIO_BASE_A 0x92000
+
+#define __PIPEDMC_REG_MMIO_BASE(i915, dmc_id) \
+ ((DISPLAY_VER(i915) >= 13 ? _ADLP_PIPEDMC_REG_MMIO_BASE_A : \
+ _TGL_PIPEDMC_REG_MMIO_BASE_A) + \
+ 0x400 * ((dmc_id) - 1))
+
+#define __DMC_REG_MMIO_BASE 0x8f000
+
+#define _DMC_REG_MMIO_BASE(i915, dmc_id) \
+ ((dmc_id) == DMC_FW_MAIN ? __DMC_REG_MMIO_BASE : \
+ __PIPEDMC_REG_MMIO_BASE(i915, dmc_id))
+
+#define _DMC_REG(i915, dmc_id, reg) \
+ ((reg) - __DMC_REG_MMIO_BASE + _DMC_REG_MMIO_BASE(i915, dmc_id))
+
+#define DMC_EVENT_HANDLER_COUNT_GEN12 8
+
+#define _DMC_EVT_HTP_0 0x8f004
+
+#define DMC_EVT_HTP(i915, dmc_id, handler) \
+ _MMIO(_DMC_REG(i915, dmc_id, _DMC_EVT_HTP_0) + 4 * (handler))
+
+#define _DMC_EVT_CTL_0 0x8f034
+
+#define DMC_EVT_CTL(i915, dmc_id, handler) \
+ _MMIO(_DMC_REG(i915, dmc_id, _DMC_EVT_CTL_0) + 4 * (handler))
+
+#define DMC_EVT_CTL_ENABLE REG_BIT(31)
+#define DMC_EVT_CTL_RECURRING REG_BIT(30)
+#define DMC_EVT_CTL_TYPE_MASK REG_GENMASK(17, 16)
+#define DMC_EVT_CTL_TYPE_LEVEL_0 0
+#define DMC_EVT_CTL_TYPE_LEVEL_1 1
+#define DMC_EVT_CTL_TYPE_EDGE_1_0 2
+#define DMC_EVT_CTL_TYPE_EDGE_0_1 3
+
+#define DMC_EVT_CTL_EVENT_ID_MASK REG_GENMASK(15, 8)
+#define DMC_EVT_CTL_EVENT_ID_FALSE 0x01
+/* An event handler scheduled to run at a 1 kHz frequency. */
+#define DMC_EVT_CTL_EVENT_ID_CLK_MSEC 0xbf
+
+#define DMC_HTP_ADDR_SKL 0x00500034
+#define DMC_SSP_BASE _MMIO(0x8F074)
+#define DMC_HTP_SKL _MMIO(0x8F004)
+#define DMC_LAST_WRITE _MMIO(0x8F034)
+#define DMC_LAST_WRITE_VALUE 0xc003b400
+#define DMC_MMIO_START_RANGE 0x80000
+#define DMC_MMIO_END_RANGE 0x8FFFF
+#define DMC_V1_MMIO_START_RANGE 0x80000
+#define TGL_MAIN_MMIO_START 0x8F000
+#define TGL_MAIN_MMIO_END 0x8FFFF
+#define _TGL_PIPEA_MMIO_START 0x92000
+#define _TGL_PIPEA_MMIO_END 0x93FFF
+#define _TGL_PIPEB_MMIO_START 0x96000
+#define _TGL_PIPEB_MMIO_END 0x97FFF
+#define ADLP_PIPE_MMIO_START 0x5F000
+#define ADLP_PIPE_MMIO_END 0x5FFFF
+
+#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\
+ _TGL_PIPEB_MMIO_START)
+
+#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\
+ _TGL_PIPEB_MMIO_END)
+
+#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
+#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
+#define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)
+#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
+#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
+#define DG1_DMC_DEBUG_DC5_COUNT _MMIO(0x134154)
+
+#define TGL_DMC_DEBUG3 _MMIO(0x101090)
+#define DG1_DMC_DEBUG3 _MMIO(0x13415c)
+
+#endif /* __INTEL_DMC_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
new file mode 100644
index 000000000..4699c2110
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -0,0 +1,5475 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+#include <linux/timekeeping.h>
+#include <linux/types.h>
+
+#include <asm/byteorder.h>
+
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dsc_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
+
+#include "g4x_dp.h"
+#include "i915_debugfs.h"
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_audio.h"
+#include "intel_backlight.h"
+#include "intel_combo_phy_regs.h"
+#include "intel_connector.h"
+#include "intel_crtc.h"
+#include "intel_ddi.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_dp_aux.h"
+#include "intel_dp_hdcp.h"
+#include "intel_dp_link_training.h"
+#include "intel_dp_mst.h"
+#include "intel_dpio_phy.h"
+#include "intel_dpll.h"
+#include "intel_fifo_underrun.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_hotplug.h"
+#include "intel_lspcon.h"
+#include "intel_lvds.h"
+#include "intel_panel.h"
+#include "intel_pch_display.h"
+#include "intel_pps.h"
+#include "intel_psr.h"
+#include "intel_tc.h"
+#include "intel_vdsc.h"
+#include "intel_vrr.h"
+
+/* DP DSC throughput values used for slice count calculations KPixels/s */
+#define DP_DSC_PEAK_PIXEL_RATE 2720000
+#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
+#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
+
+/* DP DSC FEC Overhead factor = 1/(0.972261) */
+#define DP_DSC_FEC_OVERHEAD_FACTOR 972261
+
+/* Compliance test status bits */
+#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
+#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
+#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
+#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
+
+
+/* Constants for DP DSC configurations */
+static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
+
+/* With Single pipe configuration, HW is capable of supporting maximum
+ * of 4 slices per line.
+ */
+static const u8 valid_dsc_slicecount[] = {1, 2, 4};
+
+/**
+ * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ *
+ * This function is not safe to use prior to encoder type being set.
+ */
+bool intel_dp_is_edp(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+
+ return dig_port->base.type == INTEL_OUTPUT_EDP;
+}
+
+static void intel_dp_unset_edid(struct intel_dp *intel_dp);
+static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc);
+
+/* Is link rate UHBR and thus 128b/132b? */
+bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->port_clock >= 1000000;
+}
+
+static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
+{
+ intel_dp->sink_rates[0] = 162000;
+ intel_dp->num_sink_rates = 1;
+}
+
+/* update sink rates from dpcd */
+static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
+{
+ static const int dp_rates[] = {
+ 162000, 270000, 540000, 810000
+ };
+ int i, max_rate;
+ int max_lttpr_rate;
+
+ if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
+ /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
+ static const int quirk_rates[] = { 162000, 270000, 324000 };
+
+ memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
+ intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
+
+ return;
+ }
+
+ /*
+ * Sink rates for 8b/10b.
+ */
+ max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
+ max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
+ if (max_lttpr_rate)
+ max_rate = min(max_rate, max_lttpr_rate);
+
+ for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
+ if (dp_rates[i] > max_rate)
+ break;
+ intel_dp->sink_rates[i] = dp_rates[i];
+ }
+
+ /*
+ * Sink rates for 128b/132b. If set, sink should support all 8b/10b
+ * rates and 10 Gbps.
+ */
+ if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) {
+ u8 uhbr_rates = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3);
+
+ drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates);
+
+ if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) {
+ /* We have a repeater */
+ if (intel_dp->lttpr_common_caps[0] >= 0x20 &&
+ intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] &
+ DP_PHY_REPEATER_128B132B_SUPPORTED) {
+ /* Repeater supports 128b/132b, valid UHBR rates */
+ uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ } else {
+ /* Does not support 128b/132b */
+ uhbr_rates = 0;
+ }
+ }
+
+ if (uhbr_rates & DP_UHBR10)
+ intel_dp->sink_rates[i++] = 1000000;
+ if (uhbr_rates & DP_UHBR13_5)
+ intel_dp->sink_rates[i++] = 1350000;
+ if (uhbr_rates & DP_UHBR20)
+ intel_dp->sink_rates[i++] = 2000000;
+ }
+
+ intel_dp->num_sink_rates = i;
+}
+
+static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &intel_dig_port->base;
+
+ intel_dp_set_dpcd_sink_rates(intel_dp);
+
+ if (intel_dp->num_sink_rates)
+ return;
+
+ drm_err(&dp_to_i915(intel_dp)->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name);
+
+ intel_dp_set_default_sink_rates(intel_dp);
+}
+
+static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp)
+{
+ intel_dp->max_sink_lane_count = 1;
+}
+
+static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &intel_dig_port->base;
+
+ intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+
+ switch (intel_dp->max_sink_lane_count) {
+ case 1:
+ case 2:
+ case 4:
+ return;
+ }
+
+ drm_err(&dp_to_i915(intel_dp)->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ intel_dp->max_sink_lane_count);
+
+ intel_dp_set_default_max_sink_lane_count(intel_dp);
+}
+
+/* Get length of rates array potentially limited by max_rate. */
+static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
+{
+ int i;
+
+ /* Limit results by potentially reduced max rate */
+ for (i = 0; i < len; i++) {
+ if (rates[len - i - 1] <= max_rate)
+ return len - i;
+ }
+
+ return 0;
+}
+
+/* Get length of common rates array potentially limited by max_rate. */
+static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
+ int max_rate)
+{
+ return intel_dp_rate_limit_len(intel_dp->common_rates,
+ intel_dp->num_common_rates, max_rate);
+}
+
+static int intel_dp_common_rate(struct intel_dp *intel_dp, int index)
+{
+ if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm,
+ index < 0 || index >= intel_dp->num_common_rates))
+ return 162000;
+
+ return intel_dp->common_rates[index];
+}
+
+/* Theoretical max between source and sink */
+static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
+{
+ return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1);
+}
+
+static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port)
+{
+ int vbt_max_lanes = intel_bios_dp_max_lane_count(&dig_port->base);
+ int max_lanes = dig_port->max_lanes;
+
+ if (vbt_max_lanes)
+ max_lanes = min(max_lanes, vbt_max_lanes);
+
+ return max_lanes;
+}
+
+/* Theoretical max between source and sink */
+static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ int source_max = intel_dp_max_source_lane_count(dig_port);
+ int sink_max = intel_dp->max_sink_lane_count;
+ int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
+ int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
+
+ if (lttpr_max)
+ sink_max = min(sink_max, lttpr_max);
+
+ return min3(source_max, sink_max, fia_max);
+}
+
+int intel_dp_max_lane_count(struct intel_dp *intel_dp)
+{
+ switch (intel_dp->max_link_lane_count) {
+ case 1:
+ case 2:
+ case 4:
+ return intel_dp->max_link_lane_count;
+ default:
+ MISSING_CASE(intel_dp->max_link_lane_count);
+ return 1;
+ }
+}
+
+/*
+ * The required data bandwidth for a mode with given pixel clock and bpp. This
+ * is the required net bandwidth independent of the data bandwidth efficiency.
+ */
+int
+intel_dp_link_required(int pixel_clock, int bpp)
+{
+ /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
+ return DIV_ROUND_UP(pixel_clock * bpp, 8);
+}
+
+/*
+ * Given a link rate and lanes, get the data bandwidth.
+ *
+ * Data bandwidth is the actual payload rate, which depends on the data
+ * bandwidth efficiency and the link rate.
+ *
+ * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency
+ * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) =
+ * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by
+ * coincidence, the port clock in kHz matches the data bandwidth in kBps, and
+ * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no
+ * longer holds for data bandwidth as soon as FEC or MST is taken into account!)
+ *
+ * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For
+ * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875
+ * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000
+ * does not match the symbol clock, the port clock (not even if you think in
+ * terms of a byte clock), nor the data bandwidth. It only matches the link bit
+ * rate in units of 10000 bps.
+ */
+int
+intel_dp_max_data_rate(int max_link_rate, int max_lanes)
+{
+ if (max_link_rate >= 1000000) {
+ /*
+ * UHBR rates always use 128b/132b channel encoding, and have
+ * 97.71% data bandwidth efficiency. Consider max_link_rate the
+ * link bit rate in units of 10000 bps.
+ */
+ int max_link_rate_kbps = max_link_rate * 10;
+
+ max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000);
+ max_link_rate = max_link_rate_kbps / 8;
+ }
+
+ /*
+ * Lower than UHBR rates always use 8b/10b channel encoding, and have
+ * 80% data bandwidth efficiency for SST non-FEC. However, this turns
+ * out to be a nop by coincidence, and can be skipped:
+ *
+ * int max_link_rate_kbps = max_link_rate * 10;
+ * max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10);
+ * max_link_rate = max_link_rate_kbps / 8;
+ */
+
+ return max_link_rate * max_lanes;
+}
+
+bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &intel_dig_port->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ return DISPLAY_VER(dev_priv) >= 12 ||
+ (DISPLAY_VER(dev_priv) == 11 &&
+ encoder->port != PORT_A);
+}
+
+static int dg2_max_source_rate(struct intel_dp *intel_dp)
+{
+ return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
+}
+
+static int icl_max_source_rate(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+
+ if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
+ return 540000;
+
+ return 810000;
+}
+
+static int ehl_max_source_rate(struct intel_dp *intel_dp)
+{
+ if (intel_dp_is_edp(intel_dp))
+ return 540000;
+
+ return 810000;
+}
+
+static int vbt_max_link_rate(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int max_rate;
+
+ max_rate = intel_bios_dp_max_link_rate(encoder);
+
+ if (intel_dp_is_edp(intel_dp)) {
+ struct intel_connector *connector = intel_dp->attached_connector;
+ int edp_max_rate = connector->panel.vbt.edp.max_link_rate;
+
+ if (max_rate && edp_max_rate)
+ max_rate = min(max_rate, edp_max_rate);
+ else if (edp_max_rate)
+ max_rate = edp_max_rate;
+ }
+
+ return max_rate;
+}
+
+static void
+intel_dp_set_source_rates(struct intel_dp *intel_dp)
+{
+ /* The values must be in increasing order */
+ static const int icl_rates[] = {
+ 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
+ 1000000, 1350000,
+ };
+ static const int bxt_rates[] = {
+ 162000, 216000, 243000, 270000, 324000, 432000, 540000
+ };
+ static const int skl_rates[] = {
+ 162000, 216000, 270000, 324000, 432000, 540000
+ };
+ static const int hsw_rates[] = {
+ 162000, 270000, 540000
+ };
+ static const int g4x_rates[] = {
+ 162000, 270000
+ };
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ const int *source_rates;
+ int size, max_rate = 0, vbt_max_rate;
+
+ /* This should only be done once */
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->source_rates || intel_dp->num_source_rates);
+
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ source_rates = icl_rates;
+ size = ARRAY_SIZE(icl_rates);
+ if (IS_DG2(dev_priv))
+ max_rate = dg2_max_source_rate(intel_dp);
+ else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
+ IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ max_rate = 810000;
+ else if (IS_JSL_EHL(dev_priv))
+ max_rate = ehl_max_source_rate(intel_dp);
+ else
+ max_rate = icl_max_source_rate(intel_dp);
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ source_rates = bxt_rates;
+ size = ARRAY_SIZE(bxt_rates);
+ } else if (DISPLAY_VER(dev_priv) == 9) {
+ source_rates = skl_rates;
+ size = ARRAY_SIZE(skl_rates);
+ } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
+ IS_BROADWELL(dev_priv)) {
+ source_rates = hsw_rates;
+ size = ARRAY_SIZE(hsw_rates);
+ } else {
+ source_rates = g4x_rates;
+ size = ARRAY_SIZE(g4x_rates);
+ }
+
+ vbt_max_rate = vbt_max_link_rate(intel_dp);
+ if (max_rate && vbt_max_rate)
+ max_rate = min(max_rate, vbt_max_rate);
+ else if (vbt_max_rate)
+ max_rate = vbt_max_rate;
+
+ if (max_rate)
+ size = intel_dp_rate_limit_len(source_rates, size, max_rate);
+
+ intel_dp->source_rates = source_rates;
+ intel_dp->num_source_rates = size;
+}
+
+static int intersect_rates(const int *source_rates, int source_len,
+ const int *sink_rates, int sink_len,
+ int *common_rates)
+{
+ int i = 0, j = 0, k = 0;
+
+ while (i < source_len && j < sink_len) {
+ if (source_rates[i] == sink_rates[j]) {
+ if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
+ return k;
+ common_rates[k] = source_rates[i];
+ ++k;
+ ++i;
+ ++j;
+ } else if (source_rates[i] < sink_rates[j]) {
+ ++i;
+ } else {
+ ++j;
+ }
+ }
+ return k;
+}
+
+/* return index of rate in rates array, or -1 if not found */
+static int intel_dp_rate_index(const int *rates, int len, int rate)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (rate == rates[i])
+ return i;
+
+ return -1;
+}
+
+static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_WARN_ON(&i915->drm,
+ !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
+
+ intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
+ intel_dp->num_source_rates,
+ intel_dp->sink_rates,
+ intel_dp->num_sink_rates,
+ intel_dp->common_rates);
+
+ /* Paranoia, there should always be something in common. */
+ if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
+ intel_dp->common_rates[0] = 162000;
+ intel_dp->num_common_rates = 1;
+ }
+}
+
+static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
+ u8 lane_count)
+{
+ /*
+ * FIXME: we need to synchronize the current link parameters with
+ * hardware readout. Currently fast link training doesn't work on
+ * boot-up.
+ */
+ if (link_rate == 0 ||
+ link_rate > intel_dp->max_link_rate)
+ return false;
+
+ if (lane_count == 0 ||
+ lane_count > intel_dp_max_lane_count(intel_dp))
+ return false;
+
+ return true;
+}
+
+static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
+ int link_rate,
+ u8 lane_count)
+{
+ /* FIXME figure out what we actually want here */
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_preferred_fixed_mode(intel_dp->attached_connector);
+ int mode_rate, max_rate;
+
+ mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
+ max_rate = intel_dp_max_data_rate(link_rate, lane_count);
+ if (mode_rate > max_rate)
+ return false;
+
+ return true;
+}
+
+int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
+ int link_rate, u8 lane_count)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int index;
+
+ /*
+ * TODO: Enable fallback on MST links once MST link compute can handle
+ * the fallback params.
+ */
+ if (intel_dp->is_mst) {
+ drm_err(&i915->drm, "Link Training Unsuccessful\n");
+ return -1;
+ }
+
+ if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
+ drm_dbg_kms(&i915->drm,
+ "Retrying Link training for eDP with max parameters\n");
+ intel_dp->use_max_params = true;
+ return 0;
+ }
+
+ index = intel_dp_rate_index(intel_dp->common_rates,
+ intel_dp->num_common_rates,
+ link_rate);
+ if (index > 0) {
+ if (intel_dp_is_edp(intel_dp) &&
+ !intel_dp_can_link_train_fallback_for_edp(intel_dp,
+ intel_dp_common_rate(intel_dp, index - 1),
+ lane_count)) {
+ drm_dbg_kms(&i915->drm,
+ "Retrying Link training for eDP with same parameters\n");
+ return 0;
+ }
+ intel_dp->max_link_rate = intel_dp_common_rate(intel_dp, index - 1);
+ intel_dp->max_link_lane_count = lane_count;
+ } else if (lane_count > 1) {
+ if (intel_dp_is_edp(intel_dp) &&
+ !intel_dp_can_link_train_fallback_for_edp(intel_dp,
+ intel_dp_max_common_rate(intel_dp),
+ lane_count >> 1)) {
+ drm_dbg_kms(&i915->drm,
+ "Retrying Link training for eDP with same parameters\n");
+ return 0;
+ }
+ intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
+ intel_dp->max_link_lane_count = lane_count >> 1;
+ } else {
+ drm_err(&i915->drm, "Link Training Unsuccessful\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
+{
+ return div_u64(mul_u32_u32(mode_clock, 1000000U),
+ DP_DSC_FEC_OVERHEAD_FACTOR);
+}
+
+static int
+small_joiner_ram_size_bits(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 13)
+ return 17280 * 8;
+ else if (DISPLAY_VER(i915) >= 11)
+ return 7680 * 8;
+ else
+ return 6144 * 8;
+}
+
+static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
+ u32 link_clock, u32 lane_count,
+ u32 mode_clock, u32 mode_hdisplay,
+ bool bigjoiner,
+ u32 pipe_bpp)
+{
+ u32 bits_per_pixel, max_bpp_small_joiner_ram;
+ int i;
+
+ /*
+ * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
+ * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
+ * for SST -> TimeSlotsPerMTP is 1,
+ * for MST -> TimeSlotsPerMTP has to be calculated
+ */
+ bits_per_pixel = (link_clock * lane_count * 8) /
+ intel_dp_mode_to_fec_clock(mode_clock);
+
+ /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
+ max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
+ mode_hdisplay;
+
+ if (bigjoiner)
+ max_bpp_small_joiner_ram *= 2;
+
+ /*
+ * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
+ * check, output bpp from small joiner RAM check)
+ */
+ bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
+
+ if (bigjoiner) {
+ u32 max_bpp_bigjoiner =
+ i915->display.cdclk.max_cdclk_freq * 48 /
+ intel_dp_mode_to_fec_clock(mode_clock);
+
+ bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
+ }
+
+ /* Error out if the max bpp is less than smallest allowed valid bpp */
+ if (bits_per_pixel < valid_dsc_bpp[0]) {
+ drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
+ bits_per_pixel, valid_dsc_bpp[0]);
+ return 0;
+ }
+
+ /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
+ if (DISPLAY_VER(i915) >= 13) {
+ bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
+ } else {
+ /* Find the nearest match in the array of known BPPs from VESA */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
+ if (bits_per_pixel < valid_dsc_bpp[i + 1])
+ break;
+ }
+ bits_per_pixel = valid_dsc_bpp[i];
+ }
+
+ /*
+ * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
+ * fractional part is 0
+ */
+ return bits_per_pixel << 4;
+}
+
+static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+ int mode_clock, int mode_hdisplay,
+ bool bigjoiner)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 min_slice_count, i;
+ int max_slice_width;
+
+ if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
+ min_slice_count = DIV_ROUND_UP(mode_clock,
+ DP_DSC_MAX_ENC_THROUGHPUT_0);
+ else
+ min_slice_count = DIV_ROUND_UP(mode_clock,
+ DP_DSC_MAX_ENC_THROUGHPUT_1);
+
+ max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
+ if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
+ drm_dbg_kms(&i915->drm,
+ "Unsupported slice width %d by DP DSC Sink device\n",
+ max_slice_width);
+ return 0;
+ }
+ /* Also take into account max slice width */
+ min_slice_count = max_t(u8, min_slice_count,
+ DIV_ROUND_UP(mode_hdisplay,
+ max_slice_width));
+
+ /* Find the closest match to the valid slice count values */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
+ u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner;
+
+ if (test_slice_count >
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false))
+ break;
+
+ /* big joiner needs small joiner to be enabled */
+ if (bigjoiner && test_slice_count < 4)
+ continue;
+
+ if (min_slice_count <= test_slice_count)
+ return test_slice_count;
+ }
+
+ drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
+ min_slice_count);
+ return 0;
+}
+
+static enum intel_output_format
+intel_dp_output_format(struct intel_connector *connector,
+ bool ycbcr_420_output)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+
+ if (!connector->base.ycbcr_420_allowed || !ycbcr_420_output)
+ return INTEL_OUTPUT_FORMAT_RGB;
+
+ if (intel_dp->dfp.rgb_to_ycbcr &&
+ intel_dp->dfp.ycbcr_444_to_420)
+ return INTEL_OUTPUT_FORMAT_RGB;
+
+ if (intel_dp->dfp.ycbcr_444_to_420)
+ return INTEL_OUTPUT_FORMAT_YCBCR444;
+ else
+ return INTEL_OUTPUT_FORMAT_YCBCR420;
+}
+
+int intel_dp_min_bpp(enum intel_output_format output_format)
+{
+ if (output_format == INTEL_OUTPUT_FORMAT_RGB)
+ return 6 * 3;
+ else
+ return 8 * 3;
+}
+
+static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
+{
+ /*
+ * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
+ * format of the number of bytes per pixel will be half the number
+ * of bytes of RGB pixel.
+ */
+ if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ bpp /= 2;
+
+ return bpp;
+}
+
+static int
+intel_dp_mode_min_output_bpp(struct intel_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ const struct drm_display_info *info = &connector->base.display_info;
+ enum intel_output_format output_format =
+ intel_dp_output_format(connector, drm_mode_is_420_only(info, mode));
+
+ return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
+}
+
+static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
+ int hdisplay)
+{
+ /*
+ * Older platforms don't like hdisplay==4096 with DP.
+ *
+ * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
+ * and frame counter increment), but we don't get vblank interrupts,
+ * and the pipe underruns immediately. The link also doesn't seem
+ * to get trained properly.
+ *
+ * On CHV the vblank interrupts don't seem to disappear but
+ * otherwise the symptoms are similar.
+ *
+ * TODO: confirm the behaviour on HSW+
+ */
+ return hdisplay == 4096 && !HAS_DDI(dev_priv);
+}
+
+static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info = &connector->base.display_info;
+ int max_tmds_clock = intel_dp->dfp.max_tmds_clock;
+
+ /* Only consider the sink's max TMDS clock if we know this is a HDMI DFP */
+ if (max_tmds_clock && info->max_tmds_clock)
+ max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock);
+
+ return max_tmds_clock;
+}
+
+static enum drm_mode_status
+intel_dp_tmds_clock_valid(struct intel_dp *intel_dp,
+ int clock, int bpc, bool ycbcr420_output,
+ bool respect_downstream_limits)
+{
+ int tmds_clock, min_tmds_clock, max_tmds_clock;
+
+ if (!respect_downstream_limits)
+ return MODE_OK;
+
+ tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output);
+
+ min_tmds_clock = intel_dp->dfp.min_tmds_clock;
+ max_tmds_clock = intel_dp_max_tmds_clock(intel_dp);
+
+ if (min_tmds_clock && tmds_clock < min_tmds_clock)
+ return MODE_CLOCK_LOW;
+
+ if (max_tmds_clock && tmds_clock > max_tmds_clock)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static enum drm_mode_status
+intel_dp_mode_valid_downstream(struct intel_connector *connector,
+ const struct drm_display_mode *mode,
+ int target_clock)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ const struct drm_display_info *info = &connector->base.display_info;
+ enum drm_mode_status status;
+ bool ycbcr_420_only;
+
+ /* If PCON supports FRL MODE, check FRL bandwidth constraints */
+ if (intel_dp->dfp.pcon_max_frl_bw) {
+ int target_bw;
+ int max_frl_bw;
+ int bpp = intel_dp_mode_min_output_bpp(connector, mode);
+
+ target_bw = bpp * target_clock;
+
+ max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
+
+ /* converting bw from Gbps to Kbps*/
+ max_frl_bw = max_frl_bw * 1000000;
+
+ if (target_bw > max_frl_bw)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+ }
+
+ if (intel_dp->dfp.max_dotclock &&
+ target_clock > intel_dp->dfp.max_dotclock)
+ return MODE_CLOCK_HIGH;
+
+ ycbcr_420_only = drm_mode_is_420_only(info, mode);
+
+ /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
+ status = intel_dp_tmds_clock_valid(intel_dp, target_clock,
+ 8, ycbcr_420_only, true);
+
+ if (status != MODE_OK) {
+ if (ycbcr_420_only ||
+ !connector->base.ycbcr_420_allowed ||
+ !drm_mode_is_420_also(info, mode))
+ return status;
+
+ status = intel_dp_tmds_clock_valid(intel_dp, target_clock,
+ 8, true, true);
+ if (status != MODE_OK)
+ return status;
+ }
+
+ return MODE_OK;
+}
+
+static bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
+ int hdisplay, int clock)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (!intel_dp_can_bigjoiner(intel_dp))
+ return false;
+
+ return clock > i915->max_dotclk_freq || hdisplay > 5120;
+}
+
+static enum drm_mode_status
+intel_dp_mode_valid(struct drm_connector *_connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ const struct drm_display_mode *fixed_mode;
+ int target_clock = mode->clock;
+ int max_rate, mode_rate, max_lanes, max_link_clock;
+ int max_dotclk = dev_priv->max_dotclk_freq;
+ u16 dsc_max_output_bpp = 0;
+ u8 dsc_slice_count = 0;
+ enum drm_mode_status status;
+ bool dsc = false, bigjoiner = false;
+
+ status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+ if (status != MODE_OK)
+ return status;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
+ fixed_mode = intel_panel_fixed_mode(connector, mode);
+ if (intel_dp_is_edp(intel_dp) && fixed_mode) {
+ status = intel_panel_mode_valid(connector, mode);
+ if (status != MODE_OK)
+ return status;
+
+ target_clock = fixed_mode->clock;
+ }
+
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
+ bigjoiner = true;
+ max_dotclk *= 2;
+ }
+ if (target_clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
+
+ max_link_clock = intel_dp_max_link_rate(intel_dp);
+ max_lanes = intel_dp_max_lane_count(intel_dp);
+
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+ mode_rate = intel_dp_link_required(target_clock,
+ intel_dp_mode_min_output_bpp(connector, mode));
+
+ if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
+ return MODE_H_ILLEGAL;
+
+ /*
+ * Output bpp is stored in 6.4 format so right shift by 4 to get the
+ * integer value since we support only integer values of bpp.
+ */
+ if (DISPLAY_VER(dev_priv) >= 10 &&
+ drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
+ /*
+ * TBD pass the connector BPC,
+ * for now U8_MAX so that max BPC on that platform would be picked
+ */
+ int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX);
+
+ if (intel_dp_is_edp(intel_dp)) {
+ dsc_max_output_bpp =
+ drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
+ dsc_slice_count =
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ true);
+ } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
+ dsc_max_output_bpp =
+ intel_dp_dsc_get_output_bpp(dev_priv,
+ max_link_clock,
+ max_lanes,
+ target_clock,
+ mode->hdisplay,
+ bigjoiner,
+ pipe_bpp) >> 4;
+ dsc_slice_count =
+ intel_dp_dsc_get_slice_count(intel_dp,
+ target_clock,
+ mode->hdisplay,
+ bigjoiner);
+ }
+
+ dsc = dsc_max_output_bpp && dsc_slice_count;
+ }
+
+ /*
+ * Big joiner configuration needs DSC for TGL which is not true for
+ * XE_LPD where uncompressed joiner is supported.
+ */
+ if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
+ return MODE_CLOCK_HIGH;
+
+ if (mode_rate > max_rate && !dsc)
+ return MODE_CLOCK_HIGH;
+
+ status = intel_dp_mode_valid_downstream(connector, mode, target_clock);
+ if (status != MODE_OK)
+ return status;
+
+ return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
+}
+
+bool intel_dp_source_supports_tps3(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915);
+}
+
+bool intel_dp_source_supports_tps4(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 10;
+}
+
+static void snprintf_int_array(char *str, size_t len,
+ const int *array, int nelem)
+{
+ int i;
+
+ str[0] = '\0';
+
+ for (i = 0; i < nelem; i++) {
+ int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
+ if (r >= len)
+ return;
+ str += r;
+ len -= r;
+ }
+}
+
+static void intel_dp_print_rates(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ char str[128]; /* FIXME: too big for stack? */
+
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ snprintf_int_array(str, sizeof(str),
+ intel_dp->source_rates, intel_dp->num_source_rates);
+ drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
+
+ snprintf_int_array(str, sizeof(str),
+ intel_dp->sink_rates, intel_dp->num_sink_rates);
+ drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
+
+ snprintf_int_array(str, sizeof(str),
+ intel_dp->common_rates, intel_dp->num_common_rates);
+ drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
+}
+
+int
+intel_dp_max_link_rate(struct intel_dp *intel_dp)
+{
+ int len;
+
+ len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
+
+ return intel_dp_common_rate(intel_dp, len - 1);
+}
+
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int i = intel_dp_rate_index(intel_dp->sink_rates,
+ intel_dp->num_sink_rates, rate);
+
+ if (drm_WARN_ON(&i915->drm, i < 0))
+ i = 0;
+
+ return i;
+}
+
+void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
+ u8 *link_bw, u8 *rate_select)
+{
+ /* eDP 1.4 rate select method. */
+ if (intel_dp->use_rate_select) {
+ *link_bw = 0;
+ *rate_select =
+ intel_dp_rate_select(intel_dp, port_clock);
+ } else {
+ *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
+ *rate_select = 0;
+ }
+}
+
+static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ /* On TGL, FEC is supported on all Pipes */
+ if (DISPLAY_VER(dev_priv) >= 12)
+ return true;
+
+ if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A)
+ return true;
+
+ return false;
+}
+
+static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
+ drm_dp_sink_supports_fec(intel_dp->fec_capable);
+}
+
+static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
+ return false;
+
+ return intel_dsc_source_support(crtc_state) &&
+ drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
+}
+
+static bool intel_dp_is_ycbcr420(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
+ intel_dp->dfp.ycbcr_444_to_420);
+}
+
+static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int bpc, bool respect_downstream_limits)
+{
+ bool ycbcr420_output = intel_dp_is_ycbcr420(intel_dp, crtc_state);
+ int clock = crtc_state->hw.adjusted_mode.crtc_clock;
+
+ /*
+ * Current bpc could already be below 8bpc due to
+ * FDI bandwidth constraints or other limits.
+ * HDMI minimum is 8bpc however.
+ */
+ bpc = max(bpc, 8);
+
+ /*
+ * We will never exceed downstream TMDS clock limits while
+ * attempting deep color. If the user insists on forcing an
+ * out of spec mode they will have to be satisfied with 8bpc.
+ */
+ if (!respect_downstream_limits)
+ bpc = 8;
+
+ for (; bpc >= 8; bpc -= 2) {
+ if (intel_hdmi_bpc_possible(crtc_state, bpc,
+ intel_dp->has_hdmi_sink, ycbcr420_output) &&
+ intel_dp_tmds_clock_valid(intel_dp, clock, bpc, ycbcr420_output,
+ respect_downstream_limits) == MODE_OK)
+ return bpc;
+ }
+
+ return -EINVAL;
+}
+
+static int intel_dp_max_bpp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool respect_downstream_limits)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ int bpp, bpc;
+
+ bpc = crtc_state->pipe_bpp / 3;
+
+ if (intel_dp->dfp.max_bpc)
+ bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
+
+ if (intel_dp->dfp.min_tmds_clock) {
+ int max_hdmi_bpc;
+
+ max_hdmi_bpc = intel_dp_hdmi_compute_bpc(intel_dp, crtc_state, bpc,
+ respect_downstream_limits);
+ if (max_hdmi_bpc < 0)
+ return 0;
+
+ bpc = min(bpc, max_hdmi_bpc);
+ }
+
+ bpp = bpc * 3;
+ if (intel_dp_is_edp(intel_dp)) {
+ /* Get bpp from vbt only for panels that dont have bpp in edid */
+ if (intel_connector->base.display_info.bpc == 0 &&
+ intel_connector->panel.vbt.edp.bpp &&
+ intel_connector->panel.vbt.edp.bpp < bpp) {
+ drm_dbg_kms(&dev_priv->drm,
+ "clamping bpp for eDP panel to BIOS-provided %i\n",
+ intel_connector->panel.vbt.edp.bpp);
+ bpp = intel_connector->panel.vbt.edp.bpp;
+ }
+ }
+
+ return bpp;
+}
+
+/* Adjust link config limits based on compliance test requests. */
+void
+intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct link_config_limits *limits)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ /* For DP Compliance we override the computed bpp for the pipe */
+ if (intel_dp->compliance.test_data.bpc != 0) {
+ int bpp = 3 * intel_dp->compliance.test_data.bpc;
+
+ limits->min_bpp = limits->max_bpp = bpp;
+ pipe_config->dither_force_disable = bpp == 6 * 3;
+
+ drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
+ }
+
+ /* Use values requested by Compliance Test Request */
+ if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
+ int index;
+
+ /* Validate the compliance test data since max values
+ * might have changed due to link train fallback.
+ */
+ if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
+ intel_dp->compliance.test_lane_count)) {
+ index = intel_dp_rate_index(intel_dp->common_rates,
+ intel_dp->num_common_rates,
+ intel_dp->compliance.test_link_rate);
+ if (index >= 0)
+ limits->min_rate = limits->max_rate =
+ intel_dp->compliance.test_link_rate;
+ limits->min_lane_count = limits->max_lane_count =
+ intel_dp->compliance.test_lane_count;
+ }
+ }
+}
+
+static bool has_seamless_m_n(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ /*
+ * Seamless M/N reprogramming only implemented
+ * for BDW+ double buffered M/N registers so far.
+ */
+ return HAS_DOUBLE_BUFFERED_M_N(i915) &&
+ intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
+}
+
+static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+ /* FIXME a bit of a mess wrt clock vs. crtc_clock */
+ if (has_seamless_m_n(connector))
+ return intel_panel_highest_mode(connector, adjusted_mode)->clock;
+ else
+ return adjusted_mode->crtc_clock;
+}
+
+/* Optimize link config in order: max bpp, min clock, min lanes */
+static int
+intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state,
+ const struct link_config_limits *limits)
+{
+ int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state);
+ int mode_rate, link_rate, link_avail;
+
+ for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
+
+ mode_rate = intel_dp_link_required(clock, output_bpp);
+
+ for (i = 0; i < intel_dp->num_common_rates; i++) {
+ link_rate = intel_dp_common_rate(intel_dp, i);
+ if (link_rate < limits->min_rate ||
+ link_rate > limits->max_rate)
+ continue;
+
+ for (lane_count = limits->min_lane_count;
+ lane_count <= limits->max_lane_count;
+ lane_count <<= 1) {
+ link_avail = intel_dp_max_data_rate(link_rate,
+ lane_count);
+
+ if (mode_rate <= link_avail) {
+ pipe_config->lane_count = lane_count;
+ pipe_config->pipe_bpp = bpp;
+ pipe_config->port_clock = link_rate;
+
+ return 0;
+ }
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int i, num_bpc;
+ u8 dsc_bpc[3] = {0};
+ u8 dsc_max_bpc;
+
+ /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
+ if (DISPLAY_VER(i915) >= 12)
+ dsc_max_bpc = min_t(u8, 12, max_req_bpc);
+ else
+ dsc_max_bpc = min_t(u8, 10, max_req_bpc);
+
+ num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
+ dsc_bpc);
+ for (i = 0; i < num_bpc; i++) {
+ if (dsc_max_bpc >= dsc_bpc[i])
+ return dsc_bpc[i] * 3;
+ }
+
+ return 0;
+}
+
+static int intel_dp_source_dsc_version_minor(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(i915) >= 14 ? 2 : 1;
+}
+
+static int intel_dp_sink_dsc_version_minor(struct intel_dp *intel_dp)
+{
+ return (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >>
+ DP_DSC_MINOR_SHIFT;
+}
+
+static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ u8 line_buf_depth;
+ int ret;
+
+ /*
+ * RC_MODEL_SIZE is currently a constant across all configurations.
+ *
+ * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
+ * DP_DSC_RC_BUF_SIZE for this.
+ */
+ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+
+ /*
+ * Slice Height of 8 works for all currently available panels. So start
+ * with that if pic_height is an integral multiple of 8. Eventually add
+ * logic to try multiple slice heights.
+ */
+ if (vdsc_cfg->pic_height % 8 == 0)
+ vdsc_cfg->slice_height = 8;
+ else if (vdsc_cfg->pic_height % 4 == 0)
+ vdsc_cfg->slice_height = 4;
+ else
+ vdsc_cfg->slice_height = 2;
+
+ ret = intel_dsc_compute_params(crtc_state);
+ if (ret)
+ return ret;
+
+ vdsc_cfg->dsc_version_major =
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
+ vdsc_cfg->dsc_version_minor =
+ min(intel_dp_source_dsc_version_minor(intel_dp),
+ intel_dp_sink_dsc_version_minor(intel_dp));
+
+ vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
+ DP_DSC_RGB;
+
+ line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
+ if (!line_buf_depth) {
+ drm_dbg_kms(&i915->drm,
+ "DSC Sink Line Buffer Depth invalid\n");
+ return -EINVAL;
+ }
+
+ if (vdsc_cfg->dsc_version_minor == 2)
+ vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
+ else
+ vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
+
+ vdsc_cfg->block_pred_enable =
+ intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
+
+ return drm_dsc_compute_rc_parameters(vdsc_cfg);
+}
+
+static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+ int pipe_bpp;
+ int ret;
+
+ pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
+ intel_dp_supports_fec(intel_dp, pipe_config);
+
+ if (!intel_dp_supports_dsc(intel_dp, pipe_config))
+ return -EINVAL;
+
+ pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc);
+
+ if (intel_dp->force_dsc_bpc) {
+ pipe_bpp = intel_dp->force_dsc_bpc * 3;
+ drm_dbg_kms(&dev_priv->drm, "Input DSC BPP forced to %d", pipe_bpp);
+ }
+
+ /* Min Input BPC for ICL+ is 8 */
+ if (pipe_bpp < 8 * 3) {
+ drm_dbg_kms(&dev_priv->drm,
+ "No DSC support for less than 8bpc\n");
+ return -EINVAL;
+ }
+
+ /*
+ * For now enable DSC for max bpp, max link rate, max lane count.
+ * Optimize this later for the minimum possible link rate/lane count
+ * with DSC enabled for the requested mode.
+ */
+ pipe_config->pipe_bpp = pipe_bpp;
+ pipe_config->port_clock = limits->max_rate;
+ pipe_config->lane_count = limits->max_lane_count;
+
+ if (intel_dp_is_edp(intel_dp)) {
+ pipe_config->dsc.compressed_bpp =
+ min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
+ pipe_config->pipe_bpp);
+ pipe_config->dsc.slice_count =
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ true);
+ if (!pipe_config->dsc.slice_count) {
+ drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n",
+ pipe_config->dsc.slice_count);
+ return -EINVAL;
+ }
+ } else {
+ u16 dsc_max_output_bpp;
+ u8 dsc_dp_slice_count;
+
+ dsc_max_output_bpp =
+ intel_dp_dsc_get_output_bpp(dev_priv,
+ pipe_config->port_clock,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay,
+ pipe_config->bigjoiner_pipes,
+ pipe_bpp);
+ dsc_dp_slice_count =
+ intel_dp_dsc_get_slice_count(intel_dp,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay,
+ pipe_config->bigjoiner_pipes);
+ if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Compressed BPP/Slice Count not supported\n");
+ return -EINVAL;
+ }
+ pipe_config->dsc.compressed_bpp = min_t(u16,
+ dsc_max_output_bpp >> 4,
+ pipe_config->pipe_bpp);
+ pipe_config->dsc.slice_count = dsc_dp_slice_count;
+ }
+
+ /*
+ * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
+ * is greater than the maximum Cdclock and if slice count is even
+ * then we need to use 2 VDSC instances.
+ */
+ if (adjusted_mode->crtc_clock > dev_priv->display.cdclk.max_cdclk_freq ||
+ pipe_config->bigjoiner_pipes) {
+ if (pipe_config->dsc.slice_count < 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Cannot split stream to use 2 VDSC instances\n");
+ return -EINVAL;
+ }
+
+ pipe_config->dsc.dsc_split = true;
+ }
+
+ ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
+ if (ret < 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Cannot compute valid DSC parameters for Input Bpp = %d "
+ "Compressed BPP = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc.compressed_bpp);
+ return ret;
+ }
+
+ pipe_config->dsc.compression_enable = true;
+ drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
+ "Compressed Bpp = %d Slice Count = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc.compressed_bpp,
+ pipe_config->dsc.slice_count);
+
+ return 0;
+}
+
+static int
+intel_dp_compute_link_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ bool respect_downstream_limits)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct link_config_limits limits;
+ bool joiner_needs_dsc = false;
+ int ret;
+
+ limits.min_rate = intel_dp_common_rate(intel_dp, 0);
+ limits.max_rate = intel_dp_max_link_rate(intel_dp);
+
+ limits.min_lane_count = 1;
+ limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
+
+ limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
+ limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config, respect_downstream_limits);
+
+ if (intel_dp->use_max_params) {
+ /*
+ * Use the maximum clock and number of lanes the eDP panel
+ * advertizes being capable of in case the initial fast
+ * optimal params failed us. The panels are generally
+ * designed to support only a single clock and lane
+ * configuration, and typically on older panels these
+ * values correspond to the native resolution of the panel.
+ */
+ limits.min_lane_count = limits.max_lane_count;
+ limits.min_rate = limits.max_rate;
+ }
+
+ intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
+
+ drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
+ "max rate %d max bpp %d pixel clock %iKHz\n",
+ limits.max_lane_count, limits.max_rate,
+ limits.max_bpp, adjusted_mode->crtc_clock);
+
+ if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay,
+ adjusted_mode->crtc_clock))
+ pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
+
+ /*
+ * Pipe joiner needs compression up to display 12 due to bandwidth
+ * limitation. DG2 onwards pipe joiner can be enabled without
+ * compression.
+ */
+ joiner_needs_dsc = DISPLAY_VER(i915) < 13 && pipe_config->bigjoiner_pipes;
+
+ /*
+ * Optimize for slow and wide for everything, because there are some
+ * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
+ */
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, conn_state, &limits);
+
+ if (ret || joiner_needs_dsc || intel_dp->force_dsc_en) {
+ drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
+ str_yes_no(ret), str_yes_no(joiner_needs_dsc),
+ str_yes_no(intel_dp->force_dsc_en));
+ ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
+ conn_state, &limits);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (pipe_config->dsc.compression_enable) {
+ drm_dbg_kms(&i915->drm,
+ "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp,
+ pipe_config->dsc.compressed_bpp);
+
+ drm_dbg_kms(&i915->drm,
+ "DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->dsc.compressed_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
+ } else {
+ drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp);
+
+ drm_dbg_kms(&i915->drm,
+ "DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->pipe_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
+ }
+ return 0;
+}
+
+bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ /*
+ * Our YCbCr output is always limited range.
+ * crtc_state->limited_color_range only applies to RGB,
+ * and it must never be set for YCbCr or we risk setting
+ * some conflicting bits in PIPECONF which will mess up
+ * the colors on the monitor.
+ */
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ return false;
+
+ if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
+ /*
+ * See:
+ * CEA-861-E - 5.1 Default Encoding Parameters
+ * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
+ */
+ return crtc_state->pipe_bpp != 18 &&
+ drm_default_rgb_quant_range(adjusted_mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+ } else {
+ return intel_conn_state->broadcast_rgb ==
+ INTEL_BROADCAST_RGB_LIMITED;
+ }
+}
+
+static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (IS_G4X(dev_priv))
+ return false;
+ if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A)
+ return false;
+
+ return true;
+}
+
+static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state,
+ struct drm_dp_vsc_sdp *vsc)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /*
+ * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
+ * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
+ * Colorimetry Format indication.
+ */
+ vsc->revision = 0x5;
+ vsc->length = 0x13;
+
+ /* DP 1.4a spec, Table 2-120 */
+ switch (crtc_state->output_format) {
+ case INTEL_OUTPUT_FORMAT_YCBCR444:
+ vsc->pixelformat = DP_PIXELFORMAT_YUV444;
+ break;
+ case INTEL_OUTPUT_FORMAT_YCBCR420:
+ vsc->pixelformat = DP_PIXELFORMAT_YUV420;
+ break;
+ case INTEL_OUTPUT_FORMAT_RGB:
+ default:
+ vsc->pixelformat = DP_PIXELFORMAT_RGB;
+ }
+
+ switch (conn_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_BT709_YCC:
+ vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_601:
+ vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_709:
+ vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
+ break;
+ case DRM_MODE_COLORIMETRY_SYCC_601:
+ vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
+ break;
+ case DRM_MODE_COLORIMETRY_OPYCC_601:
+ vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
+ break;
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
+ vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
+ break;
+ default:
+ /*
+ * RGB->YCBCR color conversion uses the BT.709
+ * color space.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
+ else
+ vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
+ break;
+ }
+
+ vsc->bpc = crtc_state->pipe_bpp / 3;
+
+ /* only RGB pixelformat supports 6 bpc */
+ drm_WARN_ON(&dev_priv->drm,
+ vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
+
+ /* all YCbCr are always limited range */
+ vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
+ vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
+}
+
+static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
+
+ /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
+ if (crtc_state->has_psr)
+ return;
+
+ if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+ return;
+
+ crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
+ vsc->sdp_type = DP_SDP_VSC;
+ intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
+ &crtc_state->infoframes.vsc);
+}
+
+void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state,
+ struct drm_dp_vsc_sdp *vsc)
+{
+ vsc->sdp_type = DP_SDP_VSC;
+
+ if (crtc_state->has_psr2) {
+ if (intel_dp->psr.colorimetry_support &&
+ intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
+ /* [PSR2, +Colorimetry] */
+ intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
+ vsc);
+ } else {
+ /*
+ * [PSR2, -Colorimetry]
+ * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
+ * 3D stereo + PSR/PSR2 + Y-coordinate.
+ */
+ vsc->revision = 0x4;
+ vsc->length = 0xe;
+ }
+ } else {
+ /*
+ * [PSR1]
+ * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
+ * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
+ * higher).
+ */
+ vsc->revision = 0x2;
+ vsc->length = 0x8;
+ }
+}
+
+static void
+intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ int ret;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
+
+ if (!conn_state->hdr_output_metadata)
+ return;
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
+
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
+ return;
+ }
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
+}
+
+static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder)
+{
+ if (HAS_DOUBLE_BUFFERED_M_N(i915))
+ return true;
+
+ return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
+}
+
+static bool can_enable_drrs(struct intel_connector *connector,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_display_mode *downclock_mode)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ if (pipe_config->vrr.enable)
+ return false;
+
+ /*
+ * DRRS and PSR can't be enable together, so giving preference to PSR
+ * as it allows more power-savings by complete shutting down display,
+ * so to guarantee this, intel_drrs_compute_config() must be called
+ * after intel_psr_compute_config().
+ */
+ if (pipe_config->has_psr)
+ return false;
+
+ /* FIXME missing FDI M2/N2 etc. */
+ if (pipe_config->has_pch_encoder)
+ return false;
+
+ if (!cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
+ return false;
+
+ return downclock_mode &&
+ intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
+}
+
+static void
+intel_dp_drrs_compute_config(struct intel_connector *connector,
+ struct intel_crtc_state *pipe_config,
+ int output_bpp)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *downclock_mode =
+ intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
+ int pixel_clock;
+
+ if (has_seamless_m_n(connector))
+ pipe_config->seamless_m_n = true;
+
+ if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
+ if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
+ intel_zero_m_n(&pipe_config->dp_m2_n2);
+ return;
+ }
+
+ if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915))
+ pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay;
+
+ pipe_config->has_drrs = true;
+
+ pixel_clock = downclock_mode->clock;
+ if (pipe_config->splitter.enable)
+ pixel_clock /= pipe_config->splitter.link_count;
+
+ intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
+ pipe_config->port_clock, &pipe_config->dp_m2_n2,
+ pipe_config->fec_enable);
+
+ /* FIXME: abstract this better */
+ if (pipe_config->splitter.enable)
+ pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count;
+}
+
+static bool intel_dp_has_audio(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+
+ if (!intel_dp_port_has_audio(i915, encoder->port))
+ return false;
+
+ if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+ return intel_dp->has_audio;
+ else
+ return intel_conn_state->force_audio == HDMI_AUDIO_ON;
+}
+
+static int
+intel_dp_compute_output_format(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ bool respect_downstream_limits)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info = &connector->base.display_info;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ bool ycbcr_420_only;
+ int ret;
+
+ ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
+
+ crtc_state->output_format = intel_dp_output_format(connector, ycbcr_420_only);
+
+ if (ycbcr_420_only && !intel_dp_is_ycbcr420(intel_dp, crtc_state)) {
+ drm_dbg_kms(&i915->drm,
+ "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ }
+
+ ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state,
+ respect_downstream_limits);
+ if (ret) {
+ if (intel_dp_is_ycbcr420(intel_dp, crtc_state) ||
+ !connector->base.ycbcr_420_allowed ||
+ !drm_mode_is_420_also(info, adjusted_mode))
+ return ret;
+
+ crtc_state->output_format = intel_dp_output_format(connector, true);
+ ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state,
+ respect_downstream_limits);
+ }
+
+ return ret;
+}
+
+int
+intel_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ const struct drm_display_mode *fixed_mode;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ int ret = 0, output_bpp;
+
+ if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
+ pipe_config->has_pch_encoder = true;
+
+ pipe_config->has_audio = intel_dp_has_audio(encoder, pipe_config, conn_state);
+
+ fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode);
+ if (intel_dp_is_edp(intel_dp) && fixed_mode) {
+ ret = intel_panel_compute_config(connector, adjusted_mode);
+ if (ret)
+ return ret;
+ }
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return -EINVAL;
+
+ if (HAS_GMCH(dev_priv) &&
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return -EINVAL;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return -EINVAL;
+
+ if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
+ return -EINVAL;
+
+ /*
+ * Try to respect downstream TMDS clock limits first, if
+ * that fails assume the user might know something we don't.
+ */
+ ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, true);
+ if (ret)
+ ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, false);
+ if (ret)
+ return ret;
+
+ if ((intel_dp_is_edp(intel_dp) && fixed_mode) ||
+ pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ ret = intel_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
+ }
+
+ pipe_config->limited_color_range =
+ intel_dp_limited_color_range(pipe_config, conn_state);
+
+ if (pipe_config->dsc.compression_enable)
+ output_bpp = pipe_config->dsc.compressed_bpp;
+ else
+ output_bpp = intel_dp_output_bpp(pipe_config->output_format,
+ pipe_config->pipe_bpp);
+
+ if (intel_dp->mso_link_count) {
+ int n = intel_dp->mso_link_count;
+ int overlap = intel_dp->mso_pixel_overlap;
+
+ pipe_config->splitter.enable = true;
+ pipe_config->splitter.link_count = n;
+ pipe_config->splitter.pixel_overlap = overlap;
+
+ drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n",
+ n, overlap);
+
+ adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap;
+ adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap;
+ adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap;
+ adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap;
+ adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap;
+ adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap;
+ adjusted_mode->crtc_clock /= n;
+ }
+
+ intel_link_compute_m_n(output_bpp,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n,
+ pipe_config->fec_enable);
+
+ /* FIXME: abstract this better */
+ if (pipe_config->splitter.enable)
+ pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count;
+
+ if (!HAS_DDI(dev_priv))
+ g4x_dp_set_clock(encoder, pipe_config);
+
+ intel_vrr_compute_config(pipe_config, conn_state);
+ intel_psr_compute_config(intel_dp, pipe_config, conn_state);
+ intel_dp_drrs_compute_config(connector, pipe_config, output_bpp);
+ intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
+ intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
+
+ return 0;
+}
+
+void intel_dp_set_link_params(struct intel_dp *intel_dp,
+ int link_rate, int lane_count)
+{
+ memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+ intel_dp->link_trained = false;
+ intel_dp->link_rate = link_rate;
+ intel_dp->lane_count = lane_count;
+}
+
+static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp)
+{
+ intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
+ intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
+}
+
+/* Enable backlight PWM and backlight PP control. */
+void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ drm_dbg_kms(&i915->drm, "\n");
+
+ intel_backlight_enable(crtc_state, conn_state);
+ intel_pps_backlight_on(intel_dp);
+}
+
+/* Disable backlight PP control and backlight PWM. */
+void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ drm_dbg_kms(&i915->drm, "\n");
+
+ intel_pps_backlight_off(intel_dp);
+ intel_backlight_disable(old_conn_state);
+}
+
+static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
+{
+ /*
+ * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
+ * be capable of signalling downstream hpd with a long pulse.
+ * Whether or not that means D3 is safe to use is not clear,
+ * but let's assume so until proven otherwise.
+ *
+ * FIXME should really check all downstream ports...
+ */
+ return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
+ drm_dp_is_branch(intel_dp->dpcd) &&
+ intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
+}
+
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int ret;
+
+ if (!crtc_state->dsc.compression_enable)
+ return;
+
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
+ enable ? DP_DECOMPRESSION_EN : 0);
+ if (ret < 0)
+ drm_dbg_kms(&i915->drm,
+ "Failed to %s sink decompression state\n",
+ str_enable_disable(enable));
+}
+
+static void
+intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 oui[] = { 0x00, 0xaa, 0x01 };
+ u8 buf[3] = { 0 };
+
+ /*
+ * During driver init, we want to be careful and avoid changing the source OUI if it's
+ * already set to what we want, so as to avoid clearing any state by accident
+ */
+ if (careful) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
+ drm_err(&i915->drm, "Failed to read source OUI\n");
+
+ if (memcmp(oui, buf, sizeof(oui)) == 0)
+ return;
+ }
+
+ if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
+ drm_err(&i915->drm, "Failed to write source OUI\n");
+
+ intel_dp->last_oui_write = jiffies;
+}
+
+void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Performing OUI wait\n");
+ wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30);
+}
+
+/* If the device supports it, try to set the power state appropriately */
+void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ int ret, i;
+
+ /* Should have a valid DPCD by this point */
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (mode != DP_SET_POWER_D0) {
+ if (downstream_hpd_needs_d0(intel_dp))
+ return;
+
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
+ } else {
+ struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
+
+ lspcon_resume(dp_to_dig_port(intel_dp));
+
+ /* Write the source OUI as early as possible */
+ if (intel_dp_is_edp(intel_dp))
+ intel_edp_init_source_oui(intel_dp, false);
+
+ /*
+ * When turning on, we need to retry for 1ms to give the sink
+ * time to wake up.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
+ if (ret == 1)
+ break;
+ msleep(1);
+ }
+
+ if (ret == 1 && lspcon->active)
+ lspcon_wait_pcon_mode(lspcon);
+ }
+
+ if (ret != 1)
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n",
+ encoder->base.base.id, encoder->base.name,
+ mode == DP_SET_POWER_D0 ? "D0" : "D3");
+}
+
+static bool
+intel_dp_get_dpcd(struct intel_dp *intel_dp);
+
+/**
+ * intel_dp_sync_state - sync the encoder state during init/resume
+ * @encoder: intel encoder to sync
+ * @crtc_state: state for the CRTC connected to the encoder
+ *
+ * Sync any state stored in the encoder wrt. HW state during driver init
+ * and system resume.
+ */
+void intel_dp_sync_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (!crtc_state)
+ return;
+
+ /*
+ * Don't clobber DPCD if it's been already read out during output
+ * setup (eDP) or detect.
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] == 0)
+ intel_dp_get_dpcd(intel_dp);
+
+ intel_dp_reset_max_link_params(intel_dp);
+}
+
+bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ /*
+ * If BIOS has set an unsupported or non-standard link rate for some
+ * reason force an encoder recompute and full modeset.
+ */
+ if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates,
+ crtc_state->port_clock) < 0) {
+ drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n");
+ crtc_state->uapi.connectors_changed = true;
+ return false;
+ }
+
+ /*
+ * FIXME hack to force full modeset when DSC is being used.
+ *
+ * As long as we do not have full state readout and config comparison
+ * of crtc_state->dsc, we have no way to ensure reliable fastset.
+ * Remove once we have readout for DSC.
+ */
+ if (crtc_state->dsc.compression_enable) {
+ drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n");
+ crtc_state->uapi.mode_changed = true;
+ return false;
+ }
+
+ if (CAN_PSR(intel_dp)) {
+ drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n");
+ crtc_state->uapi.mode_changed = true;
+ return false;
+ }
+
+ return true;
+}
+
+static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ /* Clear the cached register set to avoid using stale values */
+
+ memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
+ intel_dp->pcon_dsc_dpcd,
+ sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
+ drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ DP_PCON_DSC_ENCODER);
+
+ drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
+ (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
+}
+
+static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
+{
+ int bw_gbps[] = {9, 18, 24, 32, 40, 48};
+ int i;
+
+ for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
+ if (frl_bw_mask & (1 << i))
+ return bw_gbps[i];
+ }
+ return 0;
+}
+
+static int intel_dp_pcon_set_frl_mask(int max_frl)
+{
+ switch (max_frl) {
+ case 48:
+ return DP_PCON_FRL_BW_MASK_48GBPS;
+ case 40:
+ return DP_PCON_FRL_BW_MASK_40GBPS;
+ case 32:
+ return DP_PCON_FRL_BW_MASK_32GBPS;
+ case 24:
+ return DP_PCON_FRL_BW_MASK_24GBPS;
+ case 18:
+ return DP_PCON_FRL_BW_MASK_18GBPS;
+ case 9:
+ return DP_PCON_FRL_BW_MASK_9GBPS;
+ }
+
+ return 0;
+}
+
+static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+ int max_frl_rate;
+ int max_lanes, rate_per_lane;
+ int max_dsc_lanes, dsc_rate_per_lane;
+
+ max_lanes = connector->display_info.hdmi.max_lanes;
+ rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
+ max_frl_rate = max_lanes * rate_per_lane;
+
+ if (connector->display_info.hdmi.dsc_cap.v_1p2) {
+ max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
+ dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
+ if (max_dsc_lanes && dsc_rate_per_lane)
+ max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
+ }
+
+ return max_frl_rate;
+}
+
+static bool
+intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp,
+ u8 max_frl_bw_mask, u8 *frl_trained_mask)
+{
+ if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) &&
+ drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL &&
+ *frl_trained_mask >= max_frl_bw_mask)
+ return true;
+
+ return false;
+}
+
+static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
+{
+#define TIMEOUT_FRL_READY_MS 500
+#define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
+
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
+ u8 max_frl_bw_mask = 0, frl_trained_mask;
+ bool is_active;
+
+ max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
+ drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
+
+ max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
+ drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
+
+ max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
+
+ if (max_frl_bw <= 0)
+ return -EINVAL;
+
+ max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
+ drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask);
+
+ if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask))
+ goto frl_trained;
+
+ ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
+ if (ret < 0)
+ return ret;
+ /* Wait for PCON to be FRL Ready */
+ wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
+
+ if (!is_active)
+ return -ETIMEDOUT;
+
+ ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw,
+ DP_PCON_ENABLE_SEQUENTIAL_LINK);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask,
+ DP_PCON_FRL_LINK_TRAIN_NORMAL);
+ if (ret < 0)
+ return ret;
+ ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
+ if (ret < 0)
+ return ret;
+ /*
+ * Wait for FRL to be completed
+ * Check if the HDMI Link is up and active.
+ */
+ wait_for(is_active =
+ intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask),
+ TIMEOUT_HDMI_LINK_ACTIVE_MS);
+
+ if (!is_active)
+ return -ETIMEDOUT;
+
+frl_trained:
+ drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
+ intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
+ intel_dp->frl.is_trained = true;
+ drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
+
+ return 0;
+}
+
+static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
+{
+ if (drm_dp_is_branch(intel_dp->dpcd) &&
+ intel_dp->has_hdmi_sink &&
+ intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
+ return true;
+
+ return false;
+}
+
+static
+int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp)
+{
+ int ret;
+ u8 buf = 0;
+
+ /* Set PCON source control mode */
+ buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE;
+
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+ if (ret < 0)
+ return ret;
+
+ /* Set HDMI LINK ENABLE */
+ buf |= DP_PCON_ENABLE_HDMI_LINK;
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+void intel_dp_check_frl_training(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ /*
+ * Always go for FRL training if:
+ * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7)
+ * -sink is HDMI2.1
+ */
+ if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) ||
+ !intel_dp_is_hdmi_2_1_sink(intel_dp) ||
+ intel_dp->frl.is_trained)
+ return;
+
+ if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
+ int ret, mode;
+
+ drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n");
+ ret = intel_dp_pcon_set_tmds_mode(intel_dp);
+ mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
+
+ if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
+ drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
+ } else {
+ drm_dbg(&dev_priv->drm, "FRL training Completed\n");
+ }
+}
+
+static int
+intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
+{
+ int vactive = crtc_state->hw.adjusted_mode.vdisplay;
+
+ return intel_hdmi_dsc_get_slice_height(vactive);
+}
+
+static int
+intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+ int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
+ int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
+ int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
+ int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
+
+ return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
+ pcon_max_slice_width,
+ hdmi_max_slices, hdmi_throughput);
+}
+
+static int
+intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int num_slices, int slice_width)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+ int output_format = crtc_state->output_format;
+ bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
+ int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
+ int hdmi_max_chunk_bytes =
+ connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
+
+ return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
+ num_slices, output_format, hdmi_all_bpp,
+ hdmi_max_chunk_bytes);
+}
+
+void
+intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 pps_param[6];
+ int slice_height;
+ int slice_width;
+ int num_slices;
+ int bits_per_pixel;
+ int ret;
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct drm_connector *connector;
+ bool hdmi_is_dsc_1_2;
+
+ if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
+ return;
+
+ if (!intel_connector)
+ return;
+ connector = &intel_connector->base;
+ hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
+
+ if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
+ !hdmi_is_dsc_1_2)
+ return;
+
+ slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
+ if (!slice_height)
+ return;
+
+ num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
+ if (!num_slices)
+ return;
+
+ slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
+ num_slices);
+
+ bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
+ num_slices, slice_width);
+ if (!bits_per_pixel)
+ return;
+
+ pps_param[0] = slice_height & 0xFF;
+ pps_param[1] = slice_height >> 8;
+ pps_param[2] = slice_width & 0xFF;
+ pps_param[3] = slice_width >> 8;
+ pps_param[4] = bits_per_pixel & 0xFF;
+ pps_param[5] = (bits_per_pixel >> 8) & 0x3;
+
+ ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
+ if (ret < 0)
+ drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
+}
+
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 tmp;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x13)
+ return;
+
+ if (!drm_dp_is_branch(intel_dp->dpcd))
+ return;
+
+ tmp = intel_dp->has_hdmi_sink ?
+ DP_HDMI_DVI_OUTPUT_CONFIG : 0;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
+ drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n",
+ str_enable_disable(intel_dp->has_hdmi_sink));
+
+ tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
+ intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
+ drm_dbg_kms(&i915->drm,
+ "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n",
+ str_enable_disable(intel_dp->dfp.ycbcr_444_to_420));
+
+ tmp = intel_dp->dfp.rgb_to_ycbcr ?
+ DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0;
+
+ if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
+ drm_dbg_kms(&i915->drm,
+ "Failed to %s protocol converter RGB->YCbCr conversion mode\n",
+ str_enable_disable(tmp));
+}
+
+
+bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
+{
+ u8 dprx = 0;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
+ &dprx) != 1)
+ return false;
+ return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
+}
+
+static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ /*
+ * Clear the cached register set to avoid using stale values
+ * for the sinks that do not support DSC.
+ */
+ memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
+
+ /* Clear fec_capable to avoid using stale values */
+ intel_dp->fec_capable = 0;
+
+ /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
+ intel_dp->edp_dpcd[0] >= DP_EDP_14) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
+ intel_dp->dsc_dpcd,
+ sizeof(intel_dp->dsc_dpcd)) < 0)
+ drm_err(&i915->drm,
+ "Failed to read DPCD register 0x%x\n",
+ DP_DSC_SUPPORT);
+
+ drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
+ (int)sizeof(intel_dp->dsc_dpcd),
+ intel_dp->dsc_dpcd);
+
+ /* FEC is supported only on DP 1.4 */
+ if (!intel_dp_is_edp(intel_dp) &&
+ drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
+ &intel_dp->fec_capable) < 0)
+ drm_err(&i915->drm,
+ "Failed to read FEC DPCD register\n");
+
+ drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
+ intel_dp->fec_capable);
+ }
+}
+
+static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ int n = intel_dp->mso_link_count;
+ int overlap = intel_dp->mso_pixel_overlap;
+
+ if (!mode || !n)
+ return;
+
+ mode->hdisplay = (mode->hdisplay - overlap) * n;
+ mode->hsync_start = (mode->hsync_start - overlap) * n;
+ mode->hsync_end = (mode->hsync_end - overlap) * n;
+ mode->htotal = (mode->htotal - overlap) * n;
+ mode->clock *= n;
+
+ drm_mode_set_name(mode);
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n",
+ connector->base.base.id, connector->base.name,
+ DRM_MODE_ARG(mode));
+}
+
+void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) {
+ /*
+ * This is a big fat ugly hack.
+ *
+ * Some machines in UEFI boot mode provide us a VBT that has 18
+ * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+ * unknown we fail to light up. Yet the same BIOS boots up with
+ * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+ * max, not what it tells us to use.
+ *
+ * Note: This will still be broken if the eDP panel is not lit
+ * up by the BIOS, and thus we can't get the mode at module
+ * load.
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_bpp, connector->panel.vbt.edp.bpp);
+ connector->panel.vbt.edp.bpp = pipe_bpp;
+ }
+}
+
+static void intel_edp_mso_init(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_display_info *info = &connector->base.display_info;
+ u8 mso;
+
+ if (intel_dp->edp_dpcd[0] < DP_EDP_14)
+ return;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) {
+ drm_err(&i915->drm, "Failed to read MSO cap\n");
+ return;
+ }
+
+ /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */
+ mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK;
+ if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) {
+ drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso);
+ mso = 0;
+ }
+
+ if (mso) {
+ drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n",
+ mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso,
+ info->mso_pixel_overlap);
+ if (!HAS_MSO(i915)) {
+ drm_err(&i915->drm, "No source MSO support, disabling\n");
+ mso = 0;
+ }
+ }
+
+ intel_dp->mso_link_count = mso;
+ intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0;
+}
+
+static bool
+intel_edp_init_dpcd(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+
+ /* this function is meant to be called only once */
+ drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
+
+ if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
+ return false;
+
+ drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+ drm_dp_is_branch(intel_dp->dpcd));
+
+ /*
+ * Read the eDP display control registers.
+ *
+ * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
+ * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
+ * set, but require eDP 1.4+ detection (e.g. for supported link rates
+ * method). The display control registers should read zero if they're
+ * not supported anyway.
+ */
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+ intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
+ sizeof(intel_dp->edp_dpcd)) {
+ drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
+ (int)sizeof(intel_dp->edp_dpcd),
+ intel_dp->edp_dpcd);
+
+ intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
+ }
+
+ /*
+ * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
+ * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
+ */
+ intel_psr_init_dpcd(intel_dp);
+
+ /* Clear the default sink rates */
+ intel_dp->num_sink_rates = 0;
+
+ /* Read the eDP 1.4+ supported link rates. */
+ if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+ int i;
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
+ sink_rates, sizeof(sink_rates));
+
+ for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
+ int val = le16_to_cpu(sink_rates[i]);
+
+ if (val == 0)
+ break;
+
+ /* Value read multiplied by 200kHz gives the per-lane
+ * link rate in kHz. The source rates are, however,
+ * stored in terms of LS_Clk kHz. The full conversion
+ * back to symbols is
+ * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
+ */
+ intel_dp->sink_rates[i] = (val * 200) / 10;
+ }
+ intel_dp->num_sink_rates = i;
+ }
+
+ /*
+ * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
+ * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
+ */
+ if (intel_dp->num_sink_rates)
+ intel_dp->use_rate_select = true;
+ else
+ intel_dp_set_sink_rates(intel_dp);
+ intel_dp_set_max_sink_lane_count(intel_dp);
+
+ /* Read the eDP DSC DPCD registers */
+ if (DISPLAY_VER(dev_priv) >= 10)
+ intel_dp_get_dsc_sink_cap(intel_dp);
+
+ /*
+ * If needed, program our source OUI so we can make various Intel-specific AUX services
+ * available (such as HDR backlight controls)
+ */
+ intel_edp_init_source_oui(intel_dp, true);
+
+ return true;
+}
+
+static bool
+intel_dp_has_sink_count(struct intel_dp *intel_dp)
+{
+ if (!intel_dp->attached_connector)
+ return false;
+
+ return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base,
+ intel_dp->dpcd,
+ &intel_dp->desc);
+}
+
+static bool
+intel_dp_get_dpcd(struct intel_dp *intel_dp)
+{
+ int ret;
+
+ if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0)
+ return false;
+
+ /*
+ * Don't clobber cached eDP rates. Also skip re-reading
+ * the OUI/ID since we know it won't change.
+ */
+ if (!intel_dp_is_edp(intel_dp)) {
+ drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+ drm_dp_is_branch(intel_dp->dpcd));
+
+ intel_dp_set_sink_rates(intel_dp);
+ intel_dp_set_max_sink_lane_count(intel_dp);
+ intel_dp_set_common_rates(intel_dp);
+ }
+
+ if (intel_dp_has_sink_count(intel_dp)) {
+ ret = drm_dp_read_sink_count(&intel_dp->aux);
+ if (ret < 0)
+ return false;
+
+ /*
+ * Sink count can change between short pulse hpd hence
+ * a member variable in intel_dp will track any changes
+ * between short pulse interrupts.
+ */
+ intel_dp->sink_count = ret;
+
+ /*
+ * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
+ * a dongle is present but no display. Unless we require to know
+ * if a dongle is present or not, we don't need to update
+ * downstream port information. So, an early return here saves
+ * time from performing other operations which are not required.
+ */
+ if (!intel_dp->sink_count)
+ return false;
+ }
+
+ return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd,
+ intel_dp->downstream_ports) == 0;
+}
+
+static bool
+intel_dp_can_mst(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ return i915->params.enable_dp_mst &&
+ intel_dp_mst_source_support(intel_dp) &&
+ drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
+}
+
+static void
+intel_dp_configure_mst(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder =
+ &dp_to_dig_port(intel_dp)->base;
+ bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
+ encoder->base.base.id, encoder->base.name,
+ str_yes_no(intel_dp_mst_source_support(intel_dp)),
+ str_yes_no(sink_can_mst),
+ str_yes_no(i915->params.enable_dp_mst));
+
+ if (!intel_dp_mst_source_support(intel_dp))
+ return;
+
+ intel_dp->is_mst = sink_can_mst &&
+ i915->params.enable_dp_mst;
+
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+ intel_dp->is_mst);
+}
+
+static bool
+intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi)
+{
+ return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4;
+}
+
+static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4])
+{
+ int retry;
+
+ for (retry = 0; retry < 3; retry++) {
+ if (drm_dp_dpcd_write(&intel_dp->aux, DP_SINK_COUNT_ESI + 1,
+ &esi[1], 3) == 3)
+ return true;
+ }
+
+ return false;
+}
+
+bool
+intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ /*
+ * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
+ * of Color Encoding Format and Content Color Gamut], in order to
+ * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ return true;
+
+ switch (conn_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_SYCC_601:
+ case DRM_MODE_COLORIMETRY_OPYCC_601:
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ case DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
+ struct dp_sdp *sdp, size_t size)
+{
+ size_t length = sizeof(struct dp_sdp);
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(sdp, 0, size);
+
+ /*
+ * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
+ * VSC SDP Header Bytes
+ */
+ sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
+ sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
+ sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
+ sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
+
+ /*
+ * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
+ * per DP 1.4a spec.
+ */
+ if (vsc->revision != 0x5)
+ goto out;
+
+ /* VSC SDP Payload for DB16 through DB18 */
+ /* Pixel Encoding and Colorimetry Formats */
+ sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
+ sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
+
+ switch (vsc->bpc) {
+ case 6:
+ /* 6bpc: 0x0 */
+ break;
+ case 8:
+ sdp->db[17] = 0x1; /* DB17[3:0] */
+ break;
+ case 10:
+ sdp->db[17] = 0x2;
+ break;
+ case 12:
+ sdp->db[17] = 0x3;
+ break;
+ case 16:
+ sdp->db[17] = 0x4;
+ break;
+ default:
+ MISSING_CASE(vsc->bpc);
+ break;
+ }
+ /* Dynamic Range and Component Bit Depth */
+ if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
+ sdp->db[17] |= 0x80; /* DB17[7] */
+
+ /* Content Type */
+ sdp->db[18] = vsc->content_type & 0x7;
+
+out:
+ return length;
+}
+
+static ssize_t
+intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
+ const struct hdmi_drm_infoframe *drm_infoframe,
+ struct dp_sdp *sdp,
+ size_t size)
+{
+ size_t length = sizeof(struct dp_sdp);
+ const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
+ unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
+ ssize_t len;
+
+ if (size < length)
+ return -ENOSPC;
+
+ memset(sdp, 0, size);
+
+ len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
+ if (len < 0) {
+ drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n");
+ return -ENOSPC;
+ }
+
+ if (len != infoframe_size) {
+ drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n");
+ return -ENOSPC;
+ }
+
+ /*
+ * Set up the infoframe sdp packet for HDR static metadata.
+ * Prepare VSC Header for SU as per DP 1.4a spec,
+ * Table 2-100 and Table 2-101
+ */
+
+ /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
+ sdp->sdp_header.HB0 = 0;
+ /*
+ * Packet Type 80h + Non-audio INFOFRAME Type value
+ * HDMI_INFOFRAME_TYPE_DRM: 0x87
+ * - 80h + Non-audio INFOFRAME Type value
+ * - InfoFrame Type: 0x07
+ * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
+ */
+ sdp->sdp_header.HB1 = drm_infoframe->type;
+ /*
+ * Least Significant Eight Bits of (Data Byte Count – 1)
+ * infoframe_size - 1
+ */
+ sdp->sdp_header.HB2 = 0x1D;
+ /* INFOFRAME SDP Version Number */
+ sdp->sdp_header.HB3 = (0x13 << 2);
+ /* CTA Header Byte 2 (INFOFRAME Version Number) */
+ sdp->db[0] = drm_infoframe->version;
+ /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
+ sdp->db[1] = drm_infoframe->length;
+ /*
+ * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
+ * HDMI_INFOFRAME_HEADER_SIZE
+ */
+ BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
+ memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
+ HDMI_DRM_INFOFRAME_SIZE);
+
+ /*
+ * Size of DP infoframe sdp packet for HDR static metadata consists of
+ * - DP SDP Header(struct dp_sdp_header): 4 bytes
+ * - Two Data Blocks: 2 bytes
+ * CTA Header Byte2 (INFOFRAME Version Number)
+ * CTA Header Byte3 (Length of INFOFRAME)
+ * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
+ *
+ * Prior to GEN11's GMP register size is identical to DP HDR static metadata
+ * infoframe size. But GEN11+ has larger than that size, write_infoframe
+ * will pad rest of the size.
+ */
+ return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
+}
+
+static void intel_write_dp_sdp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct dp_sdp sdp = {};
+ ssize_t len;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ switch (type) {
+ case DP_SDP_VSC:
+ len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
+ sizeof(sdp));
+ break;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv,
+ &crtc_state->infoframes.drm.drm,
+ &sdp, sizeof(sdp));
+ break;
+ default:
+ MISSING_CASE(type);
+ return;
+ }
+
+ if (drm_WARN_ON(&dev_priv->drm, len < 0))
+ return;
+
+ dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
+}
+
+void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_dp_vsc_sdp *vsc)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct dp_sdp sdp = {};
+ ssize_t len;
+
+ len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
+
+ if (drm_WARN_ON(&dev_priv->drm, len < 0))
+ return;
+
+ dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
+ &sdp, len);
+}
+
+void intel_dp_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
+ u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
+ VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
+ VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
+ u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
+
+ /* TODO: Add DSC case (DIP_ENABLE_PPS) */
+ /* When PSR is enabled, this routine doesn't disable VSC DIP */
+ if (!crtc_state->has_psr)
+ val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
+
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+
+ if (!enable)
+ return;
+
+ /* When PSR is enabled, VSC SDP is handled by PSR routine */
+ if (!crtc_state->has_psr)
+ intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
+
+ intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
+}
+
+static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
+ const void *buffer, size_t size)
+{
+ const struct dp_sdp *sdp = buffer;
+
+ if (size < sizeof(struct dp_sdp))
+ return -EINVAL;
+
+ memset(vsc, 0, sizeof(*vsc));
+
+ if (sdp->sdp_header.HB0 != 0)
+ return -EINVAL;
+
+ if (sdp->sdp_header.HB1 != DP_SDP_VSC)
+ return -EINVAL;
+
+ vsc->sdp_type = sdp->sdp_header.HB1;
+ vsc->revision = sdp->sdp_header.HB2;
+ vsc->length = sdp->sdp_header.HB3;
+
+ if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
+ (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
+ /*
+ * - HB2 = 0x2, HB3 = 0x8
+ * VSC SDP supporting 3D stereo + PSR
+ * - HB2 = 0x4, HB3 = 0xe
+ * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
+ * first scan line of the SU region (applies to eDP v1.4b
+ * and higher).
+ */
+ return 0;
+ } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
+ /*
+ * - HB2 = 0x5, HB3 = 0x13
+ * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
+ * Format.
+ */
+ vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
+ vsc->colorimetry = sdp->db[16] & 0xf;
+ vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
+
+ switch (sdp->db[17] & 0x7) {
+ case 0x0:
+ vsc->bpc = 6;
+ break;
+ case 0x1:
+ vsc->bpc = 8;
+ break;
+ case 0x2:
+ vsc->bpc = 10;
+ break;
+ case 0x3:
+ vsc->bpc = 12;
+ break;
+ case 0x4:
+ vsc->bpc = 16;
+ break;
+ default:
+ MISSING_CASE(sdp->db[17] & 0x7);
+ return -EINVAL;
+ }
+
+ vsc->content_type = sdp->db[18] & 0x7;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
+ const void *buffer, size_t size)
+{
+ int ret;
+
+ const struct dp_sdp *sdp = buffer;
+
+ if (size < sizeof(struct dp_sdp))
+ return -EINVAL;
+
+ if (sdp->sdp_header.HB0 != 0)
+ return -EINVAL;
+
+ if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
+ return -EINVAL;
+
+ /*
+ * Least Significant Eight Bits of (Data Byte Count – 1)
+ * 1Dh (i.e., Data Byte Count = 30 bytes).
+ */
+ if (sdp->sdp_header.HB2 != 0x1D)
+ return -EINVAL;
+
+ /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
+ if ((sdp->sdp_header.HB3 & 0x3) != 0)
+ return -EINVAL;
+
+ /* INFOFRAME SDP Version Number */
+ if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
+ return -EINVAL;
+
+ /* CTA Header Byte 2 (INFOFRAME Version Number) */
+ if (sdp->db[0] != 1)
+ return -EINVAL;
+
+ /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
+ if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
+ HDMI_DRM_INFOFRAME_SIZE);
+
+ return ret;
+}
+
+static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_dp_vsc_sdp *vsc)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ unsigned int type = DP_SDP_VSC;
+ struct dp_sdp sdp = {};
+ int ret;
+
+ /* When PSR is enabled, VSC SDP is handled by PSR routine */
+ if (crtc_state->has_psr)
+ return;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
+
+ ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
+
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
+}
+
+static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct hdmi_drm_infoframe *drm_infoframe)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
+ struct dp_sdp sdp = {};
+ int ret;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
+ sizeof(sdp));
+
+ ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
+ sizeof(sdp));
+
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to unpack DP HDR Metadata Infoframe SDP\n");
+}
+
+void intel_read_dp_sdp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ unsigned int type)
+{
+ switch (type) {
+ case DP_SDP_VSC:
+ intel_read_dp_vsc_sdp(encoder, crtc_state,
+ &crtc_state->infoframes.vsc);
+ break;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
+ &crtc_state->infoframes.drm.drm);
+ break;
+ default:
+ MISSING_CASE(type);
+ break;
+ }
+}
+
+static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int status = 0;
+ int test_link_rate;
+ u8 test_lane_count, test_link_bw;
+ /* (DP CTS 1.2)
+ * 4.3.1.11
+ */
+ /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
+ &test_lane_count);
+
+ if (status <= 0) {
+ drm_dbg_kms(&i915->drm, "Lane count read failed\n");
+ return DP_TEST_NAK;
+ }
+ test_lane_count &= DP_MAX_LANE_COUNT_MASK;
+
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
+ &test_link_bw);
+ if (status <= 0) {
+ drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
+ return DP_TEST_NAK;
+ }
+ test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
+
+ /* Validate the requested link rate and lane count */
+ if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
+ test_lane_count))
+ return DP_TEST_NAK;
+
+ intel_dp->compliance.test_lane_count = test_lane_count;
+ intel_dp->compliance.test_link_rate = test_link_rate;
+
+ return DP_TEST_ACK;
+}
+
+static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 test_pattern;
+ u8 test_misc;
+ __be16 h_width, v_height;
+ int status = 0;
+
+ /* Read the TEST_PATTERN (DP CTS 3.1.5) */
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
+ &test_pattern);
+ if (status <= 0) {
+ drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
+ return DP_TEST_NAK;
+ }
+ if (test_pattern != DP_COLOR_RAMP)
+ return DP_TEST_NAK;
+
+ status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
+ &h_width, 2);
+ if (status <= 0) {
+ drm_dbg_kms(&i915->drm, "H Width read failed\n");
+ return DP_TEST_NAK;
+ }
+
+ status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
+ &v_height, 2);
+ if (status <= 0) {
+ drm_dbg_kms(&i915->drm, "V Height read failed\n");
+ return DP_TEST_NAK;
+ }
+
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
+ &test_misc);
+ if (status <= 0) {
+ drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
+ return DP_TEST_NAK;
+ }
+ if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
+ return DP_TEST_NAK;
+ if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
+ return DP_TEST_NAK;
+ switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
+ case DP_TEST_BIT_DEPTH_6:
+ intel_dp->compliance.test_data.bpc = 6;
+ break;
+ case DP_TEST_BIT_DEPTH_8:
+ intel_dp->compliance.test_data.bpc = 8;
+ break;
+ default:
+ return DP_TEST_NAK;
+ }
+
+ intel_dp->compliance.test_data.video_pattern = test_pattern;
+ intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
+ intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
+ /* Set test active flag here so userspace doesn't interrupt things */
+ intel_dp->compliance.test_active = true;
+
+ return DP_TEST_ACK;
+}
+
+static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 test_result = DP_TEST_ACK;
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+
+ if (intel_connector->detect_edid == NULL ||
+ connector->edid_corrupt ||
+ intel_dp->aux.i2c_defer_count > 6) {
+ /* Check EDID read for NACKs, DEFERs and corruption
+ * (DP CTS 1.2 Core r1.1)
+ * 4.2.2.4 : Failed EDID read, I2C_NAK
+ * 4.2.2.5 : Failed EDID read, I2C_DEFER
+ * 4.2.2.6 : EDID corruption detected
+ * Use failsafe mode for all cases
+ */
+ if (intel_dp->aux.i2c_nack_count > 0 ||
+ intel_dp->aux.i2c_defer_count > 0)
+ drm_dbg_kms(&i915->drm,
+ "EDID read had %d NACKs, %d DEFERs\n",
+ intel_dp->aux.i2c_nack_count,
+ intel_dp->aux.i2c_defer_count);
+ intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
+ } else {
+ struct edid *block = intel_connector->detect_edid;
+
+ /* We have to write the checksum
+ * of the last block read
+ */
+ block += intel_connector->detect_edid->extensions;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
+ block->checksum) <= 0)
+ drm_dbg_kms(&i915->drm,
+ "Failed to write EDID checksum\n");
+
+ test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
+ intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
+ }
+
+ /* Set test active flag here so userspace doesn't interrupt things */
+ intel_dp->compliance.test_active = true;
+
+ return test_result;
+}
+
+static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ struct drm_dp_phy_test_params *data =
+ &intel_dp->compliance.test_data.phytest;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 pattern_val;
+
+ switch (data->phy_pattern) {
+ case DP_PHY_TEST_PATTERN_NONE:
+ drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
+ break;
+ case DP_PHY_TEST_PATTERN_D10_2:
+ drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
+ break;
+ case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+ drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE |
+ DDI_DP_COMP_CTL_SCRAMBLED_0);
+ break;
+ case DP_PHY_TEST_PATTERN_PRBS7:
+ drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n");
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
+ break;
+ case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ /*
+ * FIXME: Ideally pattern should come from DPCD 0x250. As
+ * current firmware of DPR-100 could not set it, so hardcoding
+ * now for complaince test.
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
+ pattern_val = 0x3e0f83e0;
+ intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
+ pattern_val = 0x0f83e0f8;
+ intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
+ pattern_val = 0x0000f83e;
+ intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE |
+ DDI_DP_COMP_CTL_CUSTOM80);
+ break;
+ case DP_PHY_TEST_PATTERN_CP2520:
+ /*
+ * FIXME: Ideally pattern should come from DPCD 0x24A. As
+ * current firmware of DPR-100 could not set it, so hardcoding
+ * now for complaince test.
+ */
+ drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n");
+ pattern_val = 0xFB;
+ intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
+ pattern_val);
+ break;
+ default:
+ WARN(1, "Invalid Phy Test Pattern\n");
+ }
+}
+
+static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct drm_dp_phy_test_params *data =
+ &intel_dp->compliance.test_data.phytest;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
+ link_status) < 0) {
+ drm_dbg_kms(&i915->drm, "failed to get link status\n");
+ return;
+ }
+
+ /* retrieve vswing & pre-emphasis setting */
+ intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
+ link_status);
+
+ intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
+
+ intel_dp_phy_pattern_update(intel_dp, crtc_state);
+
+ drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
+ intel_dp->train_set, crtc_state->lane_count);
+
+ drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
+ intel_dp->dpcd[DP_DPCD_REV]);
+}
+
+static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct drm_dp_phy_test_params *data =
+ &intel_dp->compliance.test_data.phytest;
+
+ if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
+ drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n");
+ return DP_TEST_NAK;
+ }
+
+ /* Set test active flag here so userspace doesn't interrupt things */
+ intel_dp->compliance.test_active = true;
+
+ return DP_TEST_ACK;
+}
+
+static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 response = DP_TEST_NAK;
+ u8 request = 0;
+ int status;
+
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
+ if (status <= 0) {
+ drm_dbg_kms(&i915->drm,
+ "Could not read test request from sink\n");
+ goto update_status;
+ }
+
+ switch (request) {
+ case DP_TEST_LINK_TRAINING:
+ drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
+ response = intel_dp_autotest_link_training(intel_dp);
+ break;
+ case DP_TEST_LINK_VIDEO_PATTERN:
+ drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
+ response = intel_dp_autotest_video_pattern(intel_dp);
+ break;
+ case DP_TEST_LINK_EDID_READ:
+ drm_dbg_kms(&i915->drm, "EDID test requested\n");
+ response = intel_dp_autotest_edid(intel_dp);
+ break;
+ case DP_TEST_LINK_PHY_TEST_PATTERN:
+ drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
+ response = intel_dp_autotest_phy_pattern(intel_dp);
+ break;
+ default:
+ drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
+ request);
+ break;
+ }
+
+ if (response & DP_TEST_ACK)
+ intel_dp->compliance.test_type = request;
+
+update_status:
+ status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
+ if (status <= 0)
+ drm_dbg_kms(&i915->drm,
+ "Could not write test response to sink\n");
+}
+
+static bool intel_dp_link_ok(struct intel_dp *intel_dp,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ bool uhbr = intel_dp->link_rate >= 1000000;
+ bool ok;
+
+ if (uhbr)
+ ok = drm_dp_128b132b_lane_channel_eq_done(link_status,
+ intel_dp->lane_count);
+ else
+ ok = drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
+
+ if (ok)
+ return true;
+
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] %s link not ok, retraining\n",
+ encoder->base.base.id, encoder->base.name,
+ uhbr ? "128b/132b" : "8b/10b");
+
+ return false;
+}
+
+static void
+intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
+{
+ bool handled = false;
+
+ drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled);
+
+ if (esi[1] & DP_CP_IRQ) {
+ intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
+ ack[1] |= DP_CP_IRQ;
+ }
+}
+
+static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u8 link_status[DP_LINK_STATUS_SIZE] = {};
+ const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2;
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status,
+ esi_link_status_size) != esi_link_status_size) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to read link status\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ return intel_dp_link_ok(intel_dp, link_status);
+}
+
+/**
+ * intel_dp_check_mst_status - service any pending MST interrupts, check link status
+ * @intel_dp: Intel DP struct
+ *
+ * Read any pending MST interrupts, call MST core to handle these and ack the
+ * interrupts. Check if the main and AUX link state is ok.
+ *
+ * Returns:
+ * - %true if pending interrupts were serviced (or no interrupts were
+ * pending) w/o detecting an error condition.
+ * - %false if an error condition - like AUX failure or a loss of link - is
+ * detected, which needs servicing from the hotplug work.
+ */
+static bool
+intel_dp_check_mst_status(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ bool link_ok = true;
+
+ drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
+
+ for (;;) {
+ u8 esi[4] = {};
+ u8 ack[4] = {};
+
+ if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
+ drm_dbg_kms(&i915->drm,
+ "failed to get ESI - device may have failed\n");
+ link_ok = false;
+
+ break;
+ }
+
+ drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi);
+
+ if (intel_dp->active_mst_links > 0 && link_ok &&
+ esi[3] & LINK_STATUS_CHANGED) {
+ if (!intel_dp_mst_link_status(intel_dp))
+ link_ok = false;
+ ack[3] |= LINK_STATUS_CHANGED;
+ }
+
+ intel_dp_mst_hpd_irq(intel_dp, esi, ack);
+
+ if (!memchr_inv(ack, 0, sizeof(ack)))
+ break;
+
+ if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
+ drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
+
+ if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
+ drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
+ }
+
+ return link_ok;
+}
+
+static void
+intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
+{
+ bool is_active;
+ u8 buf = 0;
+
+ is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
+ if (intel_dp->frl.is_trained && !is_active) {
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
+ return;
+
+ buf &= ~DP_PCON_ENABLE_HDMI_LINK;
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
+ return;
+
+ drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
+
+ intel_dp->frl.is_trained = false;
+
+ /* Restart FRL training or fall back to TMDS mode */
+ intel_dp_check_frl_training(intel_dp);
+ }
+}
+
+static bool
+intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!intel_dp->link_trained)
+ return false;
+
+ /*
+ * While PSR source HW is enabled, it will control main-link sending
+ * frames, enabling and disabling it so trying to do a retrain will fail
+ * as the link would or not be on or it could mix training patterns
+ * and frame data at the same time causing retrain to fail.
+ * Also when exiting PSR, HW will retrain the link anyways fixing
+ * any link status error.
+ */
+ if (intel_psr_enabled(intel_dp))
+ return false;
+
+ if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
+ link_status) < 0)
+ return false;
+
+ /*
+ * Validate the cached values of intel_dp->link_rate and
+ * intel_dp->lane_count before attempting to retrain.
+ *
+ * FIXME would be nice to user the crtc state here, but since
+ * we need to call this from the short HPD handler that seems
+ * a bit hard.
+ */
+ if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
+ intel_dp->lane_count))
+ return false;
+
+ /* Retrain if link not ok */
+ return !intel_dp_link_ok(intel_dp, link_status);
+}
+
+static bool intel_dp_has_connector(struct intel_dp *intel_dp,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder;
+ enum pipe pipe;
+
+ if (!conn_state->best_encoder)
+ return false;
+
+ /* SST */
+ encoder = &dp_to_dig_port(intel_dp)->base;
+ if (conn_state->best_encoder == &encoder->base)
+ return true;
+
+ /* MST */
+ for_each_pipe(i915, pipe) {
+ encoder = &intel_dp->mst_encoders[pipe]->base;
+ if (conn_state->best_encoder == &encoder->base)
+ return true;
+ }
+
+ return false;
+}
+
+static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
+ struct drm_modeset_acquire_ctx *ctx,
+ u8 *pipe_mask)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
+ int ret = 0;
+
+ *pipe_mask = 0;
+
+ if (!intel_dp_needs_link_retrain(intel_dp))
+ return 0;
+
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *conn_state =
+ connector->base.state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (!intel_dp_has_connector(intel_dp, conn_state))
+ continue;
+
+ crtc = to_intel_crtc(conn_state->crtc);
+ if (!crtc)
+ continue;
+
+ ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+ if (ret)
+ break;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ if (conn_state->commit &&
+ !try_wait_for_completion(&conn_state->commit->hw_done))
+ continue;
+
+ *pipe_mask |= BIT(crtc->pipe);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (!intel_dp_needs_link_retrain(intel_dp))
+ *pipe_mask = 0;
+
+ return ret;
+}
+
+static bool intel_dp_is_connected(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ return connector->base.status == connector_status_connected ||
+ intel_dp->is_mst;
+}
+
+int intel_dp_retrain_link(struct intel_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc;
+ u8 pipe_mask;
+ int ret;
+
+ if (!intel_dp_is_connected(intel_dp))
+ return 0;
+
+ ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ctx);
+ if (ret)
+ return ret;
+
+ ret = intel_dp_prep_link_retrain(intel_dp, ctx, &pipe_mask);
+ if (ret)
+ return ret;
+
+ if (pipe_mask == 0)
+ return 0;
+
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
+ encoder->base.base.id, encoder->base.name);
+
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ /* Suppress underruns caused by re-training */
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
+ if (crtc_state->has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ intel_crtc_pch_transcoder(crtc), false);
+ }
+
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ /* retrain on the MST master transcoder */
+ if (DISPLAY_VER(dev_priv) >= 12 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
+ !intel_dp_mst_is_master_trans(crtc_state))
+ continue;
+
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
+ intel_dp_start_link_train(intel_dp, crtc_state);
+ intel_dp_stop_link_train(intel_dp, crtc_state);
+ break;
+ }
+
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ /* Keep underrun reporting disabled until things are stable */
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+ if (crtc_state->has_pch_encoder)
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ intel_crtc_pch_transcoder(crtc), true);
+ }
+
+ return 0;
+}
+
+static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
+ struct drm_modeset_acquire_ctx *ctx,
+ u8 *pipe_mask)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
+ int ret = 0;
+
+ *pipe_mask = 0;
+
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *conn_state =
+ connector->base.state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (!intel_dp_has_connector(intel_dp, conn_state))
+ continue;
+
+ crtc = to_intel_crtc(conn_state->crtc);
+ if (!crtc)
+ continue;
+
+ ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+ if (ret)
+ break;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ if (conn_state->commit &&
+ !try_wait_for_completion(&conn_state->commit->hw_done))
+ continue;
+
+ *pipe_mask |= BIT(crtc->pipe);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return ret;
+}
+
+static int intel_dp_do_phy_test(struct intel_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc;
+ u8 pipe_mask;
+ int ret;
+
+ ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ctx);
+ if (ret)
+ return ret;
+
+ ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask);
+ if (ret)
+ return ret;
+
+ if (pipe_mask == 0)
+ return 0;
+
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n",
+ encoder->base.base.id, encoder->base.name);
+
+ for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ /* test on the MST master transcoder */
+ if (DISPLAY_VER(dev_priv) >= 12 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
+ !intel_dp_mst_is_master_trans(crtc_state))
+ continue;
+
+ intel_dp_process_phy_request(intel_dp, crtc_state);
+ break;
+ }
+
+ return 0;
+}
+
+void intel_dp_phy_test(struct intel_encoder *encoder)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ for (;;) {
+ ret = intel_dp_do_phy_test(encoder, &ctx);
+
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ continue;
+ }
+
+ break;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_WARN(encoder->base.dev, ret,
+ "Acquiring modeset locks failed with %i\n", ret);
+}
+
+static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 val;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
+ return;
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
+
+ if (val & DP_AUTOMATED_TEST_REQUEST)
+ intel_dp_handle_test_request(intel_dp);
+
+ if (val & DP_CP_IRQ)
+ intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
+
+ if (val & DP_SINK_SPECIFIC_IRQ)
+ drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
+}
+
+static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
+{
+ u8 val;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
+ return;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
+ return;
+
+ if (val & HDMI_LINK_STATUS_CHANGED)
+ intel_dp_handle_hdmi_link_status_change(intel_dp);
+}
+
+/*
+ * According to DP spec
+ * 5.1.2:
+ * 1. Read DPCD
+ * 2. Configure link according to Receiver Capabilities
+ * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
+ * 4. Check link status on receipt of hot-plug interrupt
+ *
+ * intel_dp_short_pulse - handles short pulse interrupts
+ * when full detection is not required.
+ * Returns %true if short pulse is handled and full detection
+ * is NOT required and %false otherwise.
+ */
+static bool
+intel_dp_short_pulse(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 old_sink_count = intel_dp->sink_count;
+ bool ret;
+
+ /*
+ * Clearing compliance test variables to allow capturing
+ * of values for next automated test request.
+ */
+ memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
+
+ /*
+ * Now read the DPCD to see if it's actually running
+ * If the current value of sink count doesn't match with
+ * the value that was stored earlier or dpcd read failed
+ * we need to do full detection
+ */
+ ret = intel_dp_get_dpcd(intel_dp);
+
+ if ((old_sink_count != intel_dp->sink_count) || !ret) {
+ /* No need to proceed if we are going to do full detect */
+ return false;
+ }
+
+ intel_dp_check_device_service_irq(intel_dp);
+ intel_dp_check_link_service_irq(intel_dp);
+
+ /* Handle CEC interrupts, if any */
+ drm_dp_cec_irq(&intel_dp->aux);
+
+ /* defer to the hotplug work for link retraining if needed */
+ if (intel_dp_needs_link_retrain(intel_dp))
+ return false;
+
+ intel_psr_short_pulse(intel_dp);
+
+ switch (intel_dp->compliance.test_type) {
+ case DP_TEST_LINK_TRAINING:
+ drm_dbg_kms(&dev_priv->drm,
+ "Link Training Compliance Test requested\n");
+ /* Send a Hotplug Uevent to userspace to start modeset */
+ drm_kms_helper_hotplug_event(&dev_priv->drm);
+ break;
+ case DP_TEST_LINK_PHY_TEST_PATTERN:
+ drm_dbg_kms(&dev_priv->drm,
+ "PHY test pattern Compliance Test requested\n");
+ /*
+ * Schedule long hpd to do the test
+ *
+ * FIXME get rid of the ad-hoc phy test modeset code
+ * and properly incorporate it into the normal modeset.
+ */
+ return false;
+ }
+
+ return true;
+}
+
+/* XXX this is probably wrong for multiple downstream ports */
+static enum drm_connector_status
+intel_dp_detect_dpcd(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u8 *dpcd = intel_dp->dpcd;
+ u8 type;
+
+ if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
+ return connector_status_connected;
+
+ lspcon_resume(dig_port);
+
+ if (!intel_dp_get_dpcd(intel_dp))
+ return connector_status_disconnected;
+
+ /* if there's no downstream port, we're done */
+ if (!drm_dp_is_branch(dpcd))
+ return connector_status_connected;
+
+ /* If we're HPD-aware, SINK_COUNT changes dynamically */
+ if (intel_dp_has_sink_count(intel_dp) &&
+ intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
+ return intel_dp->sink_count ?
+ connector_status_connected : connector_status_disconnected;
+ }
+
+ if (intel_dp_can_mst(intel_dp))
+ return connector_status_connected;
+
+ /* If no HPD, poke DDC gently */
+ if (drm_probe_ddc(&intel_dp->aux.ddc))
+ return connector_status_connected;
+
+ /* Well we tried, say unknown for unreliable port types */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+ type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+ if (type == DP_DS_PORT_TYPE_VGA ||
+ type == DP_DS_PORT_TYPE_NON_EDID)
+ return connector_status_unknown;
+ } else {
+ type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_TYPE_MASK;
+ if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
+ type == DP_DWN_STRM_PORT_TYPE_OTHER)
+ return connector_status_unknown;
+ }
+
+ /* Anything else is out of spec, warn and ignore */
+ drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
+ return connector_status_disconnected;
+}
+
+static enum drm_connector_status
+edp_detect(struct intel_dp *intel_dp)
+{
+ return connector_status_connected;
+}
+
+/*
+ * intel_digital_port_connected - is the specified port connected?
+ * @encoder: intel_encoder
+ *
+ * In cases where there's a connector physically connected but it can't be used
+ * by our hardware we also return false, since the rest of the driver should
+ * pretty much treat the port as disconnected. This is relevant for type-C
+ * (starting on ICL) where there's ownership involved.
+ *
+ * Return %true if port is connected, %false otherwise.
+ */
+bool intel_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ bool is_connected = false;
+ intel_wakeref_t wakeref;
+
+ with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ is_connected = dig_port->connected(encoder);
+
+ return is_connected;
+}
+
+static struct edid *
+intel_dp_get_edid(struct intel_dp *intel_dp)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+
+ /* use cached edid if we have one */
+ if (intel_connector->edid) {
+ /* invalid edid */
+ if (IS_ERR(intel_connector->edid))
+ return NULL;
+
+ return drm_edid_duplicate(intel_connector->edid);
+ } else
+ return drm_get_edid(&intel_connector->base,
+ &intel_dp->aux.ddc);
+}
+
+static void
+intel_dp_update_dfp(struct intel_dp *intel_dp,
+ const struct edid *edid)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ intel_dp->dfp.max_bpc =
+ drm_dp_downstream_max_bpc(intel_dp->dpcd,
+ intel_dp->downstream_ports, edid);
+
+ intel_dp->dfp.max_dotclock =
+ drm_dp_downstream_max_dotclock(intel_dp->dpcd,
+ intel_dp->downstream_ports);
+
+ intel_dp->dfp.min_tmds_clock =
+ drm_dp_downstream_min_tmds_clock(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ edid);
+ intel_dp->dfp.max_tmds_clock =
+ drm_dp_downstream_max_tmds_clock(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ edid);
+
+ intel_dp->dfp.pcon_max_frl_bw =
+ drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
+ intel_dp->downstream_ports);
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
+ connector->base.base.id, connector->base.name,
+ intel_dp->dfp.max_bpc,
+ intel_dp->dfp.max_dotclock,
+ intel_dp->dfp.min_tmds_clock,
+ intel_dp->dfp.max_tmds_clock,
+ intel_dp->dfp.pcon_max_frl_bw);
+
+ intel_dp_get_pcon_dsc_cap(intel_dp);
+}
+
+static void
+intel_dp_update_420(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr;
+
+ /* No YCbCr output support on gmch platforms */
+ if (HAS_GMCH(i915))
+ return;
+
+ /*
+ * ILK doesn't seem capable of DP YCbCr output. The
+ * displayed image is severly corrupted. SNB+ is fine.
+ */
+ if (IS_IRONLAKE(i915))
+ return;
+
+ is_branch = drm_dp_is_branch(intel_dp->dpcd);
+ ycbcr_420_passthrough =
+ drm_dp_downstream_420_passthrough(intel_dp->dpcd,
+ intel_dp->downstream_ports);
+ /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */
+ ycbcr_444_to_420 =
+ dp_to_dig_port(intel_dp)->lspcon.active ||
+ drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports);
+ rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
+
+ if (DISPLAY_VER(i915) >= 11) {
+ /* Let PCON convert from RGB->YCbCr if possible */
+ if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) {
+ intel_dp->dfp.rgb_to_ycbcr = true;
+ intel_dp->dfp.ycbcr_444_to_420 = true;
+ connector->base.ycbcr_420_allowed = true;
+ } else {
+ /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
+ intel_dp->dfp.ycbcr_444_to_420 =
+ ycbcr_444_to_420 && !ycbcr_420_passthrough;
+
+ connector->base.ycbcr_420_allowed =
+ !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
+ }
+ } else {
+ /* 4:4:4->4:2:0 conversion is the only way */
+ intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
+
+ connector->base.ycbcr_420_allowed = ycbcr_444_to_420;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
+ connector->base.base.id, connector->base.name,
+ str_yes_no(intel_dp->dfp.rgb_to_ycbcr),
+ str_yes_no(connector->base.ycbcr_420_allowed),
+ str_yes_no(intel_dp->dfp.ycbcr_444_to_420));
+}
+
+static void
+intel_dp_set_edid(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct edid *edid;
+ bool vrr_capable;
+
+ intel_dp_unset_edid(intel_dp);
+ edid = intel_dp_get_edid(intel_dp);
+ connector->detect_edid = edid;
+
+ vrr_capable = intel_vrr_is_capable(connector);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
+ connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
+ drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
+
+ intel_dp_update_dfp(intel_dp, edid);
+ intel_dp_update_420(intel_dp);
+
+ if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
+ intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ intel_dp->has_audio = drm_detect_monitor_audio(edid);
+ }
+
+ drm_dp_cec_set_edid(&intel_dp->aux, edid);
+}
+
+static void
+intel_dp_unset_edid(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ drm_dp_cec_unset_edid(&intel_dp->aux);
+ kfree(connector->detect_edid);
+ connector->detect_edid = NULL;
+
+ intel_dp->has_hdmi_sink = false;
+ intel_dp->has_audio = false;
+
+ intel_dp->dfp.max_bpc = 0;
+ intel_dp->dfp.max_dotclock = 0;
+ intel_dp->dfp.min_tmds_clock = 0;
+ intel_dp->dfp.max_tmds_clock = 0;
+
+ intel_dp->dfp.pcon_max_frl_bw = 0;
+
+ intel_dp->dfp.ycbcr_444_to_420 = false;
+ connector->base.ycbcr_420_allowed = false;
+
+ drm_connector_set_vrr_capable_property(&connector->base,
+ false);
+}
+
+static int
+intel_dp_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
+ enum drm_connector_status status;
+
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ drm_WARN_ON(&dev_priv->drm,
+ !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+
+ if (!INTEL_DISPLAY_ENABLED(dev_priv))
+ return connector_status_disconnected;
+
+ /* Can't disconnect eDP */
+ if (intel_dp_is_edp(intel_dp))
+ status = edp_detect(intel_dp);
+ else if (intel_digital_port_connected(encoder))
+ status = intel_dp_detect_dpcd(intel_dp);
+ else
+ status = connector_status_disconnected;
+
+ if (status == connector_status_disconnected) {
+ memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
+ memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
+
+ if (intel_dp->is_mst) {
+ drm_dbg_kms(&dev_priv->drm,
+ "MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst,
+ intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+ intel_dp->is_mst);
+ }
+
+ goto out;
+ }
+
+ /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
+ if (DISPLAY_VER(dev_priv) >= 11)
+ intel_dp_get_dsc_sink_cap(intel_dp);
+
+ intel_dp_configure_mst(intel_dp);
+
+ /*
+ * TODO: Reset link params when switching to MST mode, until MST
+ * supports link training fallback params.
+ */
+ if (intel_dp->reset_link_params || intel_dp->is_mst) {
+ intel_dp_reset_max_link_params(intel_dp);
+ intel_dp->reset_link_params = false;
+ }
+
+ intel_dp_print_rates(intel_dp);
+
+ if (intel_dp->is_mst) {
+ /*
+ * If we are in MST mode then this connector
+ * won't appear connected or have anything
+ * with EDID on it
+ */
+ status = connector_status_disconnected;
+ goto out;
+ }
+
+ /*
+ * Some external monitors do not signal loss of link synchronization
+ * with an IRQ_HPD, so force a link status check.
+ */
+ if (!intel_dp_is_edp(intel_dp)) {
+ int ret;
+
+ ret = intel_dp_retrain_link(encoder, ctx);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Clearing NACK and defer counts to get their exact values
+ * while reading EDID which are required by Compliance tests
+ * 4.2.2.4 and 4.2.2.5
+ */
+ intel_dp->aux.i2c_nack_count = 0;
+ intel_dp->aux.i2c_defer_count = 0;
+
+ intel_dp_set_edid(intel_dp);
+ if (intel_dp_is_edp(intel_dp) ||
+ to_intel_connector(connector)->detect_edid)
+ status = connector_status_connected;
+
+ intel_dp_check_device_service_irq(intel_dp);
+
+out:
+ if (status != connector_status_connected && !intel_dp->is_mst)
+ intel_dp_unset_edid(intel_dp);
+
+ /*
+ * Make sure the refs for power wells enabled during detect are
+ * dropped to avoid a new detect cycle triggered by HPD polling.
+ */
+ intel_display_power_flush_work(dev_priv);
+
+ if (!intel_dp_is_edp(intel_dp))
+ drm_dp_set_subconnector_property(connector,
+ status,
+ intel_dp->dpcd,
+ intel_dp->downstream_ports);
+ return status;
+}
+
+static void
+intel_dp_force(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &dig_port->base;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+ enum intel_display_power_domain aux_domain =
+ intel_aux_power_domain(dig_port);
+ intel_wakeref_t wakeref;
+
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ intel_dp_unset_edid(intel_dp);
+
+ if (connector->status != connector_status_connected)
+ return;
+
+ wakeref = intel_display_power_get(dev_priv, aux_domain);
+
+ intel_dp_set_edid(intel_dp);
+
+ intel_display_power_put(dev_priv, aux_domain, wakeref);
+}
+
+static int intel_dp_get_modes(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct edid *edid;
+ int num_modes = 0;
+
+ edid = intel_connector->detect_edid;
+ if (edid)
+ num_modes = intel_connector_update_modes(connector, edid);
+
+ /* Also add fixed mode, which may or may not be present in EDID */
+ if (intel_dp_is_edp(intel_attached_dp(intel_connector)))
+ num_modes += intel_panel_get_modes(intel_connector);
+
+ if (num_modes)
+ return num_modes;
+
+ if (!edid) {
+ struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
+ struct drm_display_mode *mode;
+
+ mode = drm_dp_downstream_mode(connector->dev,
+ intel_dp->dpcd,
+ intel_dp->downstream_ports);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num_modes++;
+ }
+ }
+
+ return num_modes;
+}
+
+static int
+intel_dp_connector_register(struct drm_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
+ int ret;
+
+ ret = intel_connector_register(connector);
+ if (ret)
+ return ret;
+
+ drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
+ intel_dp->aux.name, connector->kdev->kobj.name);
+
+ intel_dp->aux.dev = connector->kdev;
+ ret = drm_dp_aux_register(&intel_dp->aux);
+ if (!ret)
+ drm_dp_cec_register_connector(&intel_dp->aux, connector);
+
+ if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
+ return ret;
+
+ /*
+ * ToDo: Clean this up to handle lspcon init and resume more
+ * efficiently and streamlined.
+ */
+ if (lspcon_init(dig_port)) {
+ lspcon_detect_hdr_capability(lspcon);
+ if (lspcon->hdr_supported)
+ drm_connector_attach_hdr_output_metadata_property(connector);
+ }
+
+ return ret;
+}
+
+static void
+intel_dp_connector_unregister(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
+
+ drm_dp_cec_unregister_connector(&intel_dp->aux);
+ drm_dp_aux_unregister(&intel_dp->aux);
+ intel_connector_unregister(connector);
+}
+
+void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
+ struct intel_dp *intel_dp = &dig_port->dp;
+
+ intel_dp_mst_encoder_cleanup(dig_port);
+
+ intel_pps_vdd_off_sync(intel_dp);
+
+ intel_dp_aux_fini(intel_dp);
+}
+
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+
+ intel_pps_vdd_off_sync(intel_dp);
+}
+
+void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+
+ intel_pps_wait_power_cycle(intel_dp);
+}
+
+static int intel_modeset_tile_group(struct intel_atomic_state *state,
+ int tile_group_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+ int ret = 0;
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *conn_state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (!connector->has_tile ||
+ connector->tile_group->id != tile_group_id)
+ continue;
+
+ conn_state = drm_atomic_get_connector_state(&state->base,
+ connector);
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
+ break;
+ }
+
+ crtc = to_intel_crtc(conn_state->crtc);
+
+ if (!crtc)
+ continue;
+
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ crtc_state->uapi.mode_changed = true;
+
+ ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+ if (ret)
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return ret;
+}
+
+static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ if (transcoders == 0)
+ return 0;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+ int ret;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (!crtc_state->hw.enable)
+ continue;
+
+ if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
+ continue;
+
+ crtc_state->uapi.mode_changed = true;
+
+ ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+ if (ret)
+ return ret;
+
+ transcoders &= ~BIT(crtc_state->cpu_transcoder);
+ }
+
+ drm_WARN_ON(&dev_priv->drm, transcoders != 0);
+
+ return 0;
+}
+
+static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
+ struct drm_connector *connector)
+{
+ const struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(&state->base, connector);
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc *crtc;
+ u8 transcoders;
+
+ crtc = to_intel_crtc(old_conn_state->crtc);
+ if (!crtc)
+ return 0;
+
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+
+ if (!old_crtc_state->hw.active)
+ return 0;
+
+ transcoders = old_crtc_state->sync_mode_slaves_mask;
+ if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
+ transcoders |= BIT(old_crtc_state->master_transcoder);
+
+ return intel_modeset_affected_transcoders(state,
+ transcoders);
+}
+
+static int intel_dp_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(conn->dev);
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
+ struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn);
+ struct intel_connector *intel_conn = to_intel_connector(conn);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder);
+ int ret;
+
+ ret = intel_digital_connector_atomic_check(conn, &state->base);
+ if (ret)
+ return ret;
+
+ if (intel_dp_mst_source_support(intel_dp)) {
+ ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst_mgr);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * We don't enable port sync on BDW due to missing w/as and
+ * due to not having adjusted the modeset sequence appropriately.
+ */
+ if (DISPLAY_VER(dev_priv) < 9)
+ return 0;
+
+ if (!intel_connector_needs_modeset(state, conn))
+ return 0;
+
+ if (conn->has_tile) {
+ ret = intel_modeset_tile_group(state, conn->tile_group->id);
+ if (ret)
+ return ret;
+ }
+
+ return intel_modeset_synced_crtcs(state, conn);
+}
+
+static void intel_dp_oob_hotplug_event(struct drm_connector *connector)
+{
+ struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin);
+ spin_unlock_irq(&i915->irq_lock);
+ queue_delayed_work(system_wq, &i915->display.hotplug.hotplug_work, 0);
+}
+
+static const struct drm_connector_funcs intel_dp_connector_funcs = {
+ .force = intel_dp_force,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_digital_connector_atomic_get_property,
+ .atomic_set_property = intel_digital_connector_atomic_set_property,
+ .late_register = intel_dp_connector_register,
+ .early_unregister = intel_dp_connector_unregister,
+ .destroy = intel_connector_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state,
+ .oob_hotplug_event = intel_dp_oob_hotplug_event,
+};
+
+static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
+ .detect_ctx = intel_dp_detect,
+ .get_modes = intel_dp_get_modes,
+ .mode_valid = intel_dp_mode_valid,
+ .atomic_check = intel_dp_connector_atomic_check,
+};
+
+enum irqreturn
+intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *intel_dp = &dig_port->dp;
+
+ if (dig_port->base.type == INTEL_OUTPUT_EDP &&
+ (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) {
+ /*
+ * vdd off can generate a long/short pulse on eDP which
+ * would require vdd on to handle it, and thus we
+ * would end up in an endless cycle of
+ * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
+ */
+ drm_dbg_kms(&i915->drm,
+ "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
+ long_hpd ? "long" : "short",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ return IRQ_HANDLED;
+ }
+
+ drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name,
+ long_hpd ? "long" : "short");
+
+ if (long_hpd) {
+ intel_dp->reset_link_params = true;
+ return IRQ_NONE;
+ }
+
+ if (intel_dp->is_mst) {
+ if (!intel_dp_check_mst_status(intel_dp))
+ return IRQ_NONE;
+ } else if (!intel_dp_short_pulse(intel_dp)) {
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* check the VBT to see whether the eDP is on another port */
+bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
+{
+ /*
+ * eDP not supported on g4x. so bail out early just
+ * for a bit extra safety in case the VBT is bonkers.
+ */
+ if (DISPLAY_VER(dev_priv) < 5)
+ return false;
+
+ if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A)
+ return true;
+
+ return intel_bios_is_port_edp(dev_priv, port);
+}
+
+static bool
+has_gamut_metadata_dip(struct drm_i915_private *i915, enum port port)
+{
+ if (intel_bios_is_lspcon_present(i915, port))
+ return false;
+
+ if (DISPLAY_VER(i915) >= 11)
+ return true;
+
+ if (port == PORT_A)
+ return false;
+
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
+ DISPLAY_VER(i915) >= 9)
+ return true;
+
+ return false;
+}
+
+static void
+intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
+
+ if (!intel_dp_is_edp(intel_dp))
+ drm_connector_attach_dp_subconnector_property(connector);
+
+ if (!IS_G4X(dev_priv) && port != PORT_A)
+ intel_attach_force_audio_property(connector);
+
+ intel_attach_broadcast_rgb_property(connector);
+ if (HAS_GMCH(dev_priv))
+ drm_connector_attach_max_bpc_property(connector, 6, 10);
+ else if (DISPLAY_VER(dev_priv) >= 5)
+ drm_connector_attach_max_bpc_property(connector, 6, 12);
+
+ /* Register HDMI colorspace for case of lspcon */
+ if (intel_bios_is_lspcon_present(dev_priv, port)) {
+ drm_connector_attach_content_type_property(connector);
+ intel_attach_hdmi_colorspace_property(connector);
+ } else {
+ intel_attach_dp_colorspace_property(connector);
+ }
+
+ if (has_gamut_metadata_dip(dev_priv, port))
+ drm_connector_attach_hdr_output_metadata_property(connector);
+
+ if (intel_dp_is_edp(intel_dp)) {
+ u32 allowed_scalers;
+
+ allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
+ if (!HAS_GMCH(dev_priv))
+ allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
+
+ drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
+
+ connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+
+ }
+
+ if (HAS_VRR(dev_priv))
+ drm_connector_attach_vrr_capable_property(connector);
+}
+
+static void
+intel_edp_add_properties(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_preferred_fixed_mode(connector);
+
+ if (!fixed_mode)
+ return;
+
+ drm_connector_set_panel_orientation_with_quirk(&connector->base,
+ i915->display.vbt.orientation,
+ fixed_mode->hdisplay,
+ fixed_mode->vdisplay);
+}
+
+static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ struct intel_connector *intel_connector)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector *connector = &intel_connector->base;
+ struct drm_display_mode *fixed_mode;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ bool has_dpcd;
+ enum pipe pipe = INVALID_PIPE;
+ struct edid *edid;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return true;
+
+ /*
+ * On IBX/CPT we may get here with LVDS already registered. Since the
+ * driver uses the only internal power sequencer available for both
+ * eDP and LVDS bail out early in this case to prevent interfering
+ * with an already powered-on LVDS power sequencer.
+ */
+ if (intel_get_lvds_encoder(dev_priv)) {
+ drm_WARN_ON(dev,
+ !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+ drm_info(&dev_priv->drm,
+ "LVDS was detected, not registering eDP\n");
+
+ return false;
+ }
+
+ intel_bios_init_panel_early(dev_priv, &intel_connector->panel,
+ encoder->devdata);
+
+ intel_pps_init(intel_dp);
+
+ /* Cache DPCD and EDID for edp. */
+ has_dpcd = intel_edp_init_dpcd(intel_dp);
+
+ if (!has_dpcd) {
+ /* if this fails, presume the device is a ghost */
+ drm_info(&dev_priv->drm,
+ "failed to retrieve link info, disabling eDP\n");
+ goto out_vdd_off;
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+ edid = drm_get_edid(connector, &intel_dp->aux.ddc);
+ if (!edid) {
+ /* Fallback to EDID from ACPI OpRegion, if any */
+ edid = intel_opregion_get_edid(intel_connector);
+ if (edid)
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] Using OpRegion EDID\n",
+ connector->base.id, connector->name);
+ }
+ if (edid) {
+ if (drm_add_edid_modes(connector, edid)) {
+ drm_connector_update_edid_property(connector, edid);
+ } else {
+ kfree(edid);
+ edid = ERR_PTR(-EINVAL);
+ }
+ } else {
+ edid = ERR_PTR(-ENOENT);
+ }
+ intel_connector->edid = edid;
+
+ intel_bios_init_panel_late(dev_priv, &intel_connector->panel,
+ encoder->devdata, IS_ERR(edid) ? NULL : edid);
+
+ intel_panel_add_edid_fixed_modes(intel_connector, true);
+
+ /* MSO requires information from the EDID */
+ intel_edp_mso_init(intel_dp);
+
+ /* multiply the mode clock and horizontal timings for MSO */
+ list_for_each_entry(fixed_mode, &intel_connector->panel.fixed_modes, head)
+ intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
+
+ /* fallback to VBT if available for eDP */
+ if (!intel_panel_preferred_fixed_mode(intel_connector))
+ intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ /*
+ * Figure out the current pipe for the initial backlight setup.
+ * If the current pipe isn't valid, try the PPS pipe, and if that
+ * fails just assume pipe A.
+ */
+ pipe = vlv_active_pipe(intel_dp);
+
+ if (pipe != PIPE_A && pipe != PIPE_B)
+ pipe = intel_dp->pps.pps_pipe;
+
+ if (pipe != PIPE_A && pipe != PIPE_B)
+ pipe = PIPE_A;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "using pipe %c for initial backlight setup\n",
+ pipe_name(pipe));
+ }
+
+ intel_panel_init(intel_connector);
+
+ intel_backlight_setup(intel_connector, pipe);
+
+ intel_edp_add_properties(intel_dp);
+
+ intel_pps_init_late(intel_dp);
+
+ return true;
+
+out_vdd_off:
+ intel_pps_vdd_off_sync(intel_dp);
+
+ return false;
+}
+
+static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
+{
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+
+ intel_connector = container_of(work, typeof(*intel_connector),
+ modeset_retry_work);
+ connector = &intel_connector->base;
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id,
+ connector->name);
+
+ /* Grab the locks before changing connector property*/
+ mutex_lock(&connector->dev->mode_config.mutex);
+ /* Set connector link status to BAD and send a Uevent to notify
+ * userspace to do a modeset.
+ */
+ drm_connector_set_link_status_property(connector,
+ DRM_MODE_LINK_STATUS_BAD);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+ /* Send Hotplug uevent so userspace can reprobe */
+ drm_kms_helper_connector_hotplug_event(connector);
+}
+
+bool
+intel_dp_init_connector(struct intel_digital_port *dig_port,
+ struct intel_connector *intel_connector)
+{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_encoder *intel_encoder = &dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum port port = intel_encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+ int type;
+
+ /* Initialize the work for modeset in case of link train failure */
+ INIT_WORK(&intel_connector->modeset_retry_work,
+ intel_dp_modeset_retry_work_fn);
+
+ if (drm_WARN(dev, dig_port->max_lanes < 1,
+ "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
+ dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
+ return false;
+
+ intel_dp->reset_link_params = true;
+ intel_dp->pps.pps_pipe = INVALID_PIPE;
+ intel_dp->pps.active_pipe = INVALID_PIPE;
+
+ /* Preserve the current hw state. */
+ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
+ intel_dp->attached_connector = intel_connector;
+
+ if (intel_dp_is_port_edp(dev_priv, port)) {
+ /*
+ * Currently we don't support eDP on TypeC ports, although in
+ * theory it could work on TypeC legacy ports.
+ */
+ drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
+ type = DRM_MODE_CONNECTOR_eDP;
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+
+ /* eDP only on port B and/or C on vlv/chv */
+ if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) &&
+ port != PORT_B && port != PORT_C))
+ return false;
+ } else {
+ type = DRM_MODE_CONNECTOR_DisplayPort;
+ }
+
+ intel_dp_set_default_sink_rates(intel_dp);
+ intel_dp_set_default_max_sink_lane_count(intel_dp);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Adding %s connector on [ENCODER:%d:%s]\n",
+ type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
+ intel_encoder->base.base.id, intel_encoder->base.name);
+
+ drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
+ drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+
+ if (!HAS_GMCH(dev_priv))
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = 0;
+
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ intel_dp_aux_init(intel_dp);
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+
+ if (HAS_DDI(dev_priv))
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ else
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ if (!intel_edp_init_connector(intel_dp, intel_connector)) {
+ intel_dp_aux_fini(intel_dp);
+ goto fail;
+ }
+
+ intel_dp_set_source_rates(intel_dp);
+ intel_dp_set_common_rates(intel_dp);
+ intel_dp_reset_max_link_params(intel_dp);
+
+ /* init MST on ports that can support it */
+ intel_dp_mst_encoder_init(dig_port,
+ intel_connector->base.base.id);
+
+ intel_dp_add_properties(intel_dp, connector);
+
+ if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
+ int ret = intel_dp_hdcp_init(dig_port, intel_connector);
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP init failed, skipping.\n");
+ }
+
+ /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+ */
+ if (IS_G45(dev_priv)) {
+ u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
+ intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
+ (temp & ~0xf) | 0xd);
+ }
+
+ intel_dp->frl.is_trained = false;
+ intel_dp->frl.trained_rate_gbps = 0;
+
+ intel_psr_init(intel_dp);
+
+ return true;
+
+fail:
+ drm_connector_cleanup(connector);
+
+ return false;
+}
+
+void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp;
+
+ if (encoder->type != INTEL_OUTPUT_DDI)
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ if (!intel_dp_mst_source_support(intel_dp))
+ continue;
+
+ if (intel_dp->is_mst)
+ drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
+ }
+}
+
+void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp;
+ int ret;
+
+ if (encoder->type != INTEL_OUTPUT_DDI)
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ if (!intel_dp_mst_source_support(intel_dp))
+ continue;
+
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
+ true);
+ if (ret) {
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+ false);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
new file mode 100644
index 000000000..a54902c71
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_H__
+#define __INTEL_DP_H__
+
+#include <linux/types.h>
+
+enum intel_output_format;
+enum pipe;
+enum port;
+struct drm_connector_state;
+struct drm_encoder;
+struct drm_i915_private;
+struct drm_modeset_acquire_ctx;
+struct drm_dp_vsc_sdp;
+struct intel_atomic_state;
+struct intel_connector;
+struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_dp;
+struct intel_encoder;
+
+struct link_config_limits {
+ int min_rate, max_rate;
+ int min_lane_count, max_lane_count;
+ int min_bpp, max_bpp;
+};
+
+void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp);
+void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct link_config_limits *limits);
+bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+int intel_dp_min_bpp(enum intel_output_format output_format);
+bool intel_dp_init_connector(struct intel_digital_port *dig_port,
+ struct intel_connector *intel_connector);
+void intel_dp_set_link_params(struct intel_dp *intel_dp,
+ int link_rate, int lane_count);
+int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
+ int link_rate, u8 lane_count);
+int intel_dp_retrain_link(struct intel_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx);
+void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable);
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
+void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder);
+void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
+int intel_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state);
+bool intel_dp_is_edp(struct intel_dp *intel_dp);
+bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
+bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
+ bool long_hpd);
+void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
+void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp);
+void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
+void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
+int intel_dp_max_link_rate(struct intel_dp *intel_dp);
+int intel_dp_max_lane_count(struct intel_dp *intel_dp);
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
+
+void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
+ u8 *link_bw, u8 *rate_select);
+bool intel_dp_source_supports_tps3(struct drm_i915_private *i915);
+bool intel_dp_source_supports_tps4(struct drm_i915_private *i915);
+
+bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
+int intel_dp_link_required(int pixel_clock, int bpp);
+int intel_dp_max_data_rate(int max_link_rate, int max_lanes);
+bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
+bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state,
+ struct drm_dp_vsc_sdp *vsc);
+void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_dp_vsc_sdp *vsc);
+void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_read_dp_sdp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ unsigned int type);
+bool intel_digital_port_connected(struct intel_encoder *encoder);
+
+static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
+{
+ return ~((1 << lane_count) - 1) & 0xf;
+}
+
+u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
+
+void intel_ddi_update_pipe(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+
+bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
+void intel_dp_sync_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+
+void intel_dp_check_frl_training(struct intel_dp *intel_dp);
+void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_dp_phy_test(struct intel_encoder *encoder);
+
+void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
+
+#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
new file mode 100644
index 000000000..ab357161c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -0,0 +1,771 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_display_types.h"
+#include "intel_dp_aux.h"
+#include "intel_pps.h"
+#include "intel_tc.h"
+
+static u32 intel_dp_aux_pack(const u8 *src, int src_bytes)
+{
+ int i;
+ u32 v = 0;
+
+ if (src_bytes > 4)
+ src_bytes = 4;
+ for (i = 0; i < src_bytes; i++)
+ v |= ((u32)src[i]) << ((3 - i) * 8);
+ return v;
+}
+
+static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes)
+{
+ int i;
+
+ if (dst_bytes > 4)
+ dst_bytes = 4;
+ for (i = 0; i < dst_bytes; i++)
+ dst[i] = src >> ((3 - i) * 8);
+}
+
+static u32
+intel_dp_aux_wait_done(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ const unsigned int timeout_ms = 10;
+ u32 status;
+ bool done;
+
+#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ done = wait_event_timeout(i915->display.gmbus.wait_queue, C,
+ msecs_to_jiffies_timeout(timeout_ms));
+
+ /* just trace the final value */
+ trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+
+ if (!done)
+ drm_err(&i915->drm,
+ "%s: did not complete or timeout within %ums (status 0x%08x)\n",
+ intel_dp->aux.name, timeout_ms, status);
+#undef C
+
+ return status;
+}
+
+static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (index)
+ return 0;
+
+ /*
+ * The clock divider is based off the hrawclk, and would like to run at
+ * 2MHz. So, take the hrawclk value and divide by 2000 and use that
+ */
+ return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
+}
+
+static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 freq;
+
+ if (index)
+ return 0;
+
+ /*
+ * The clock divider is based off the cdclk or PCH rawclk, and would
+ * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
+ * divide by 2000 and use that
+ */
+ if (dig_port->aux_ch == AUX_CH_A)
+ freq = dev_priv->display.cdclk.hw.cdclk;
+ else
+ freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
+ return DIV_ROUND_CLOSEST(freq, 2000);
+}
+
+static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+
+ if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
+ /* Workaround for non-ULT HSW */
+ switch (index) {
+ case 0: return 63;
+ case 1: return 72;
+ default: return 0;
+ }
+ }
+
+ return ilk_get_aux_clock_divider(intel_dp, index);
+}
+
+static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+{
+ /*
+ * SKL doesn't need us to program the AUX clock divider (Hardware will
+ * derive the clock from CDCLK automatically). We still implement the
+ * get_aux_clock_divider vfunc to plug-in into the existing code.
+ */
+ return index ? 0 : 1;
+}
+
+static int intel_dp_aux_sync_len(void)
+{
+ int precharge = 16; /* 10-16 */
+ int preamble = 16;
+
+ return precharge + preamble;
+}
+
+static int intel_dp_aux_fw_sync_len(void)
+{
+ int precharge = 10; /* 10-16 */
+ int preamble = 8;
+
+ return precharge + preamble;
+}
+
+static int g4x_dp_aux_precharge_len(void)
+{
+ int precharge_min = 10;
+ int preamble = 16;
+
+ /* HW wants the length of the extra precharge in 2us units */
+ return (intel_dp_aux_sync_len() -
+ precharge_min - preamble) / 2;
+}
+
+static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+ int send_bytes,
+ u32 aux_clock_divider)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv =
+ to_i915(dig_port->base.base.dev);
+ u32 timeout;
+
+ /* Max timeout value on G4x-BDW: 1.6ms */
+ if (IS_BROADWELL(dev_priv))
+ timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
+ else
+ timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
+
+ return DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_INTERRUPT |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ timeout |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (g4x_dp_aux_precharge_len() << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
+}
+
+static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+ int send_bytes,
+ u32 unused)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ u32 ret;
+
+ /*
+ * Max timeout values:
+ * SKL-GLK: 1.6ms
+ * ICL+: 4ms
+ */
+ ret = DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_INTERRUPT |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_TIME_OUT_MAX |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) |
+ DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len());
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ ret |= DP_AUX_CH_CTL_TBT_IO;
+
+ /*
+ * Power request bit is already set during aux power well enable.
+ * Preserve the bit across aux transactions.
+ */
+ if (DISPLAY_VER(i915) >= 14)
+ ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST;
+
+ return ret;
+}
+
+static int
+intel_dp_aux_xfer(struct intel_dp *intel_dp,
+ const u8 *send, int send_bytes,
+ u8 *recv, int recv_size,
+ u32 aux_send_ctl_flags)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 =
+ to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+ bool is_tc_port = intel_phy_is_tc(i915, phy);
+ i915_reg_t ch_ctl, ch_data[5];
+ u32 aux_clock_divider;
+ enum intel_display_power_domain aux_domain;
+ intel_wakeref_t aux_wakeref;
+ intel_wakeref_t pps_wakeref;
+ int i, ret, recv_bytes;
+ int try, clock = 0;
+ u32 status;
+ bool vdd;
+
+ ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ for (i = 0; i < ARRAY_SIZE(ch_data); i++)
+ ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
+
+ if (is_tc_port)
+ intel_tc_port_lock(dig_port);
+
+ aux_domain = intel_aux_power_domain(dig_port);
+
+ aux_wakeref = intel_display_power_get(i915, aux_domain);
+ pps_wakeref = intel_pps_lock(intel_dp);
+
+ /*
+ * We will be called with VDD already enabled for dpcd/edid/oui reads.
+ * In such cases we want to leave VDD enabled and it's up to upper layers
+ * to turn it off. But for eg. i2c-dev access we need to turn it on/off
+ * ourselves.
+ */
+ vdd = intel_pps_vdd_on_unlocked(intel_dp);
+
+ /*
+ * dp aux is extremely sensitive to irq latency, hence request the
+ * lowest possible wakeup latency and so prevent the cpu from going into
+ * deep sleep states.
+ */
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
+
+ intel_pps_check_power_unlocked(intel_dp);
+
+ /* Try to wait for any previous AUX channel activity */
+ for (try = 0; try < 3; try++) {
+ status = intel_uncore_read_notrace(uncore, ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+ /* just trace the final value */
+ trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+
+ if (try == 3) {
+ const u32 status = intel_uncore_read(uncore, ch_ctl);
+
+ if (status != intel_dp->aux_busy_last_status) {
+ drm_WARN(&i915->drm, 1,
+ "%s: not started (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ intel_dp->aux_busy_last_status = status;
+ }
+
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Only 5 data registers! */
+ if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
+ u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
+ send_bytes,
+ aux_clock_divider);
+
+ send_ctl |= aux_send_ctl_flags;
+
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4)
+ intel_uncore_write(uncore,
+ ch_data[i >> 2],
+ intel_dp_aux_pack(send + i,
+ send_bytes - i));
+
+ /* Send the command and wait for it to complete */
+ intel_uncore_write(uncore, ch_ctl, send_ctl);
+
+ status = intel_dp_aux_wait_done(intel_dp);
+
+ /* Clear done status and any errors */
+ intel_uncore_write(uncore,
+ ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ /*
+ * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
+ * 400us delay required for errors and timeouts
+ * Timeout errors from the HW already meet this
+ * requirement so skip to next iteration
+ */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
+ continue;
+
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ usleep_range(400, 500);
+ continue;
+ }
+ if (status & DP_AUX_CH_CTL_DONE)
+ goto done;
+ }
+ }
+
+ if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+ drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ ret = -EBUSY;
+ goto out;
+ }
+
+done:
+ /*
+ * Check for timeout or receive error. Timeouts occur when the sink is
+ * not connected.
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * Timeouts occur when the device isn't connected, so they're "normal"
+ * -- don't fill the kernel log with these
+ */
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+ drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
+ intel_dp->aux.name, status);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* Unload any bytes sent back from the other side */
+ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+
+ /*
+ * By BSpec: "Message sizes of 0 or >20 are not allowed."
+ * We have no idea of what happened so we return -EBUSY so
+ * drm layer takes care for the necessary retries.
+ */
+ if (recv_bytes == 0 || recv_bytes > 20) {
+ drm_dbg_kms(&i915->drm,
+ "%s: Forbidden recv_bytes = %d on aux transaction\n",
+ intel_dp->aux.name, recv_bytes);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ for (i = 0; i < recv_bytes; i += 4)
+ intel_dp_aux_unpack(intel_uncore_read(uncore, ch_data[i >> 2]),
+ recv + i, recv_bytes - i);
+
+ ret = recv_bytes;
+out:
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ if (vdd)
+ intel_pps_vdd_off_unlocked(intel_dp, false);
+
+ intel_pps_unlock(intel_dp, pps_wakeref);
+ intel_display_power_put_async(i915, aux_domain, aux_wakeref);
+
+ if (is_tc_port)
+ intel_tc_port_unlock(dig_port);
+
+ return ret;
+}
+
+#define BARE_ADDRESS_SIZE 3
+#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
+
+static void
+intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
+ const struct drm_dp_aux_msg *msg)
+{
+ txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
+ txbuf[1] = (msg->address >> 8) & 0xff;
+ txbuf[2] = msg->address & 0xff;
+ txbuf[3] = msg->size - 1;
+}
+
+static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
+{
+ /*
+ * If we're trying to send the HDCP Aksv, we need to set a the Aksv
+ * select bit to inform the hardware to send the Aksv after our header
+ * since we can't access that data from software.
+ */
+ if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
+ msg->address == DP_AUX_HDCP_AKSV)
+ return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
+
+ return 0;
+}
+
+static ssize_t
+intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 txbuf[20], rxbuf[20];
+ size_t txsize, rxsize;
+ u32 flags = intel_dp_aux_xfer_flags(msg);
+ int ret;
+
+ intel_dp_aux_header(txbuf, msg);
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+ txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
+ rxsize = 2; /* 0 or 1 data bytes */
+
+ if (drm_WARN_ON(&i915->drm, txsize > 20))
+ return -E2BIG;
+
+ drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
+
+ if (msg->buffer)
+ memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
+
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+ rxbuf, rxsize, flags);
+ if (ret > 0) {
+ msg->reply = rxbuf[0] >> 4;
+
+ if (ret > 1) {
+ /* Number of bytes written in a short write. */
+ ret = clamp_t(int, rxbuf[1], 0, msg->size);
+ } else {
+ /* Return payload size. */
+ ret = msg->size;
+ }
+ }
+ break;
+
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
+ rxsize = msg->size + 1;
+
+ if (drm_WARN_ON(&i915->drm, rxsize > 20))
+ return -E2BIG;
+
+ ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
+ rxbuf, rxsize, flags);
+ if (ret > 0) {
+ msg->reply = rxbuf[0] >> 4;
+ /*
+ * Assume happy day, and copy the data. The caller is
+ * expected to check msg->reply before touching it.
+ *
+ * Return payload size.
+ */
+ ret--;
+ memcpy(msg->buffer, rxbuf + 1, ret);
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_B);
+ }
+}
+
+static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_B, index);
+ }
+}
+
+static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ return DP_AUX_CH_CTL(aux_ch);
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return PCH_DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return PCH_DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ case AUX_CH_E:
+ case AUX_CH_F:
+ return DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ case AUX_CH_E:
+ case AUX_CH_F:
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */
+ case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */
+ return DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */
+ case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */
+ return DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ return XELPDP_DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return XELPDP_DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ return XELPDP_DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return XELPDP_DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
+void intel_dp_aux_fini(struct intel_dp *intel_dp)
+{
+ if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
+ cpu_latency_qos_remove_request(&intel_dp->pm_qos);
+
+ kfree(intel_dp->aux.name);
+}
+
+void intel_dp_aux_init(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
+ intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
+ } else if (DISPLAY_VER(dev_priv) >= 9) {
+ intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = skl_aux_data_reg;
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
+ } else {
+ intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
+ else
+ intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
+ else
+ intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
+
+ intel_dp->aux.drm_dev = &dev_priv->drm;
+ drm_dp_aux_init(&intel_dp->aux);
+
+ /* Failure to allocate our preferred name is not critical */
+ if (DISPLAY_VER(dev_priv) >= 13 && aux_ch >= AUX_CH_D_XELPD)
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
+ aux_ch_name(aux_ch - AUX_CH_D_XELPD + AUX_CH_D),
+ encoder->base.name);
+ else if (DISPLAY_VER(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1)
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s",
+ aux_ch - AUX_CH_USBC1 + '1',
+ encoder->base.name);
+ else
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s",
+ aux_ch_name(aux_ch),
+ encoder->base.name);
+
+ intel_dp->aux.transfer = intel_dp_aux_transfer;
+ cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
new file mode 100644
index 000000000..738577537
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_AUX_H__
+#define __INTEL_DP_AUX_H__
+
+struct intel_dp;
+
+void intel_dp_aux_fini(struct intel_dp *intel_dp);
+void intel_dp_aux_init(struct intel_dp *intel_dp);
+
+#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
new file mode 100644
index 000000000..83af95bce
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -0,0 +1,519 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Laptops with Intel GPUs which have panels that support controlling the
+ * backlight through DP AUX can actually use two different interfaces: Intel's
+ * proprietary DP AUX backlight interface, and the standard VESA backlight
+ * interface. Unfortunately, at the time of writing this a lot of laptops will
+ * advertise support for the standard VESA backlight interface when they
+ * don't properly support it. However, on these systems the Intel backlight
+ * interface generally does work properly. Additionally, these systems will
+ * usually just indicate that they use PWM backlight controls in their VBIOS
+ * for some reason.
+ */
+
+#include "i915_drv.h"
+#include "intel_backlight.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_dp_aux_backlight.h"
+
+/* TODO:
+ * Implement HDR, right now we just implement the bare minimum to bring us back into SDR mode so we
+ * can make people's backlights work in the mean time
+ */
+
+/*
+ * DP AUX registers for Intel's proprietary HDR backlight interface. We define
+ * them here since we'll likely be the only driver to ever use these.
+ */
+#define INTEL_EDP_HDR_TCON_CAP0 0x340
+
+#define INTEL_EDP_HDR_TCON_CAP1 0x341
+# define INTEL_EDP_HDR_TCON_2084_DECODE_CAP BIT(0)
+# define INTEL_EDP_HDR_TCON_2020_GAMUT_CAP BIT(1)
+# define INTEL_EDP_HDR_TCON_TONE_MAPPING_CAP BIT(2)
+# define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_CAP BIT(3)
+# define INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP BIT(4)
+# define INTEL_EDP_HDR_TCON_OPTIMIZATION_CAP BIT(5)
+# define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_CAP BIT(6)
+# define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_CONVERSION_CAP BIT(7)
+
+#define INTEL_EDP_HDR_TCON_CAP2 0x342
+# define INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP BIT(0)
+
+#define INTEL_EDP_HDR_TCON_CAP3 0x343
+
+#define INTEL_EDP_HDR_GETSET_CTRL_PARAMS 0x344
+# define INTEL_EDP_HDR_TCON_2084_DECODE_ENABLE BIT(0)
+# define INTEL_EDP_HDR_TCON_2020_GAMUT_ENABLE BIT(1)
+# define INTEL_EDP_HDR_TCON_TONE_MAPPING_ENABLE BIT(2) /* Pre-TGL+ */
+# define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_ENABLE BIT(3)
+# define INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE BIT(4)
+# define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_ENABLE BIT(5)
+/* Bit 6 is reserved */
+# define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_ENABLE BIT(7)
+
+#define INTEL_EDP_HDR_CONTENT_LUMINANCE 0x346 /* Pre-TGL+ */
+#define INTEL_EDP_HDR_PANEL_LUMINANCE_OVERRIDE 0x34A
+#define INTEL_EDP_SDR_LUMINANCE_LEVEL 0x352
+#define INTEL_EDP_BRIGHTNESS_NITS_LSB 0x354
+#define INTEL_EDP_BRIGHTNESS_NITS_MSB 0x355
+#define INTEL_EDP_BRIGHTNESS_DELAY_FRAMES 0x356
+#define INTEL_EDP_BRIGHTNESS_PER_FRAME_STEPS 0x357
+
+#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_0 0x358
+# define INTEL_EDP_TCON_USAGE_MASK GENMASK(0, 3)
+# define INTEL_EDP_TCON_USAGE_UNKNOWN 0x0
+# define INTEL_EDP_TCON_USAGE_DESKTOP 0x1
+# define INTEL_EDP_TCON_USAGE_FULL_SCREEN_MEDIA 0x2
+# define INTEL_EDP_TCON_USAGE_FULL_SCREEN_GAMING 0x3
+# define INTEL_EDP_TCON_POWER_MASK BIT(4)
+# define INTEL_EDP_TCON_POWER_DC (0 << 4)
+# define INTEL_EDP_TCON_POWER_AC (1 << 4)
+# define INTEL_EDP_TCON_OPTIMIZATION_STRENGTH_MASK GENMASK(5, 7)
+
+#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359
+
+enum intel_dp_aux_backlight_modparam {
+ INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
+ INTEL_DP_AUX_BACKLIGHT_OFF = 0,
+ INTEL_DP_AUX_BACKLIGHT_ON = 1,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
+};
+
+/* Intel EDP backlight callbacks */
+static bool
+intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+ u8 tcon_cap[4];
+
+ intel_dp_wait_source_oui(intel_dp);
+
+ ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap));
+ if (ret != sizeof(tcon_cap))
+ return false;
+
+ if (!(tcon_cap[1] & INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP))
+ return false;
+
+ if (tcon_cap[0] >= 1) {
+ drm_dbg_kms(&i915->drm, "Detected Intel HDR backlight interface version %d\n",
+ tcon_cap[0]);
+ } else {
+ drm_dbg_kms(&i915->drm, "Detected unsupported HDR backlight interface version %d\n",
+ tcon_cap[0]);
+ return false;
+ }
+
+ /*
+ * If we don't have HDR static metadata there is no way to
+ * runtime detect used range for nits based control. For now
+ * do not use Intel proprietary eDP backlight control if we
+ * don't have this data in panel EDID. In case we find panel
+ * which supports only nits based control, but doesn't provide
+ * HDR static metadata we need to start maintaining table of
+ * ranges for such panels.
+ */
+ if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
+ !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
+ BIT(HDMI_STATIC_METADATA_TYPE1))) {
+ drm_info(&i915->drm,
+ "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
+ return false;
+ }
+
+ panel->backlight.edp.intel.sdr_uses_aux =
+ tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP;
+
+ return true;
+}
+
+static u32
+intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ u8 tmp;
+ u8 buf[2] = { 0 };
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) != 1) {
+ drm_err(&i915->drm, "Failed to read current backlight mode from DPCD\n");
+ return 0;
+ }
+
+ if (!(tmp & INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE)) {
+ if (!panel->backlight.edp.intel.sdr_uses_aux) {
+ u32 pwm_level = panel->backlight.pwm_funcs->get(connector, pipe);
+
+ return intel_backlight_level_from_pwm(connector, pwm_level);
+ }
+
+ /* Assume 100% brightness if backlight controls aren't enabled yet */
+ return panel->backlight.max;
+ }
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf,
+ sizeof(buf)) != sizeof(buf)) {
+ drm_err(&i915->drm, "Failed to read brightness from DPCD\n");
+ return 0;
+ }
+
+ return (buf[1] << 8 | buf[0]);
+}
+
+static void
+intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_device *dev = connector->base.dev;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ u8 buf[4] = { 0 };
+
+ buf[0] = level & 0xFF;
+ buf[1] = (level & 0xFF00) >> 8;
+
+ if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf,
+ sizeof(buf)) != sizeof(buf))
+ drm_err(dev, "Failed to write brightness level to DPCD\n");
+}
+
+static void
+intel_dp_aux_hdr_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->backlight.edp.intel.sdr_uses_aux) {
+ intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
+ } else {
+ const u32 pwm_level = intel_backlight_level_to_pwm(connector, level);
+
+ intel_backlight_set_pwm_level(conn_state, pwm_level);
+ }
+}
+
+static void
+intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ int ret;
+ u8 old_ctrl, ctrl;
+
+ intel_dp_wait_source_oui(intel_dp);
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
+ if (ret != 1) {
+ drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
+ return;
+ }
+
+ ctrl = old_ctrl;
+ if (panel->backlight.edp.intel.sdr_uses_aux) {
+ ctrl |= INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
+ intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
+ } else {
+ u32 pwm_level = intel_backlight_level_to_pwm(connector, level);
+
+ panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
+
+ ctrl &= ~INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
+ }
+
+ if (ctrl != old_ctrl)
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1)
+ drm_err(&i915->drm, "Failed to configure DPCD brightness controls\n");
+}
+
+static void
+intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ /* Nothing to do for AUX based backlight controls */
+ if (panel->backlight.edp.intel.sdr_uses_aux)
+ return;
+
+ /* Note we want the actual pwm_level to be 0, regardless of pwm_min */
+ panel->backlight.pwm_funcs->disable(conn_state, intel_backlight_invert_pwm_level(connector, 0));
+}
+
+static int
+intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ struct drm_luminance_range_info *luminance_range =
+ &connector->base.display_info.luminance_range;
+ int ret;
+
+ if (panel->backlight.edp.intel.sdr_uses_aux) {
+ drm_dbg_kms(&i915->drm, "SDR backlight is controlled through DPCD\n");
+ } else {
+ drm_dbg_kms(&i915->drm, "SDR backlight is controlled through PWM\n");
+
+ ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+ if (ret < 0) {
+ drm_err(&i915->drm,
+ "Failed to setup SDR backlight controls through PWM: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (luminance_range->max_luminance) {
+ panel->backlight.max = luminance_range->max_luminance;
+ panel->backlight.min = luminance_range->min_luminance;
+ } else {
+ panel->backlight.max = 512;
+ panel->backlight.min = 0;
+ }
+
+ drm_dbg_kms(&i915->drm, "Using backlight range %d..%d\n", panel->backlight.min,
+ panel->backlight.max);
+
+ panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe);
+ panel->backlight.enabled = panel->backlight.level != 0;
+
+ return 0;
+}
+
+/* VESA backlight callbacks */
+static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ return connector->panel.backlight.level;
+}
+
+static void
+intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+
+ if (!panel->backlight.edp.vesa.info.aux_set) {
+ const u32 pwm_level = intel_backlight_level_to_pwm(connector, level);
+
+ intel_backlight_set_pwm_level(conn_state, pwm_level);
+ }
+
+ drm_edp_backlight_set_level(&intel_dp->aux, &panel->backlight.edp.vesa.info, level);
+}
+
+static void
+intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+
+ if (!panel->backlight.edp.vesa.info.aux_enable) {
+ u32 pwm_level;
+
+ if (!panel->backlight.edp.vesa.info.aux_set)
+ pwm_level = intel_backlight_level_to_pwm(connector, level);
+ else
+ pwm_level = intel_backlight_invert_pwm_level(connector,
+ panel->backlight.pwm_level_max);
+
+ panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
+ }
+
+ drm_edp_backlight_enable(&intel_dp->aux, &panel->backlight.edp.vesa.info, level);
+}
+
+static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state *old_conn_state,
+ u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+
+ drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info);
+
+ if (!panel->backlight.edp.vesa.info.aux_enable)
+ panel->backlight.pwm_funcs->disable(old_conn_state,
+ intel_backlight_invert_pwm_level(connector, 0));
+}
+
+static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_panel *panel = &connector->panel;
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u16 current_level;
+ u8 current_mode;
+ int ret;
+
+ ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info,
+ panel->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd,
+ &current_level, &current_mode);
+ if (ret < 0)
+ return ret;
+
+ if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) {
+ ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+ if (ret < 0) {
+ drm_err(&i915->drm,
+ "Failed to setup PWM backlight controls for eDP backlight: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (panel->backlight.edp.vesa.info.aux_set) {
+ panel->backlight.max = panel->backlight.edp.vesa.info.max;
+ panel->backlight.min = 0;
+ if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
+ panel->backlight.level = current_level;
+ panel->backlight.enabled = panel->backlight.level != 0;
+ } else {
+ panel->backlight.level = panel->backlight.max;
+ panel->backlight.enabled = false;
+ }
+ } else {
+ panel->backlight.max = panel->backlight.pwm_level_max;
+ panel->backlight.min = panel->backlight.pwm_level_min;
+ if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) {
+ panel->backlight.level = panel->backlight.pwm_funcs->get(connector, pipe);
+ panel->backlight.enabled = panel->backlight.pwm_enabled;
+ } else {
+ panel->backlight.level = panel->backlight.max;
+ panel->backlight.enabled = false;
+ }
+ }
+
+ return 0;
+}
+
+static bool
+intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
+ drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
+ return true;
+ }
+ return false;
+}
+
+static const struct intel_panel_bl_funcs intel_dp_hdr_bl_funcs = {
+ .setup = intel_dp_aux_hdr_setup_backlight,
+ .enable = intel_dp_aux_hdr_enable_backlight,
+ .disable = intel_dp_aux_hdr_disable_backlight,
+ .set = intel_dp_aux_hdr_set_backlight,
+ .get = intel_dp_aux_hdr_get_backlight,
+};
+
+static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = {
+ .setup = intel_dp_aux_vesa_setup_backlight,
+ .enable = intel_dp_aux_vesa_enable_backlight,
+ .disable = intel_dp_aux_vesa_disable_backlight,
+ .set = intel_dp_aux_vesa_set_backlight,
+ .get = intel_dp_aux_vesa_get_backlight,
+};
+
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct intel_panel *panel = &connector->panel;
+ struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ bool try_intel_interface = false, try_vesa_interface = false;
+
+ /* Check the VBT and user's module parameters to figure out which
+ * interfaces to probe
+ */
+ switch (i915->params.enable_dpcd_backlight) {
+ case INTEL_DP_AUX_BACKLIGHT_OFF:
+ return -ENODEV;
+ case INTEL_DP_AUX_BACKLIGHT_AUTO:
+ switch (panel->vbt.backlight.type) {
+ case INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE:
+ try_vesa_interface = true;
+ break;
+ case INTEL_BACKLIGHT_DISPLAY_DDI:
+ try_intel_interface = true;
+ break;
+ default:
+ return -ENODEV;
+ }
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_ON:
+ if (panel->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
+ try_intel_interface = true;
+
+ try_vesa_interface = true;
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_FORCE_VESA:
+ try_vesa_interface = true;
+ break;
+ case INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL:
+ try_intel_interface = true;
+ break;
+ }
+
+ /*
+ * Since Intel has their own backlight control interface, the majority of machines out there
+ * using DPCD backlight controls with Intel GPUs will be using this interface as opposed to
+ * the VESA interface. However, other GPUs (such as Nvidia's) will always use the VESA
+ * interface. This means that there's quite a number of panels out there that will advertise
+ * support for both interfaces, primarily systems with Intel/Nvidia hybrid GPU setups.
+ *
+ * There's a catch to this though: on many panels that advertise support for both
+ * interfaces, the VESA backlight interface will stop working once we've programmed the
+ * panel with Intel's OUI - which is also required for us to be able to detect Intel's
+ * backlight interface at all. This means that the only sensible way for us to detect both
+ * interfaces is to probe for Intel's first, and VESA's second.
+ */
+ if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) {
+ drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n");
+ panel->backlight.funcs = &intel_dp_hdr_bl_funcs;
+ return 0;
+ }
+
+ if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) {
+ drm_dbg_kms(dev, "Using VESA eDP backlight controls\n");
+ panel->backlight.funcs = &intel_dp_vesa_bl_funcs;
+ return 0;
+ }
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.h b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.h
new file mode 100644
index 000000000..ed60c2858
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_AUX_BACKLIGHT_H__
+#define __INTEL_DP_AUX_BACKLIGHT_H__
+
+struct intel_connector;
+
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
+
+#endif /* __INTEL_DP_AUX_BACKLIGHT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
new file mode 100644
index 000000000..88689124c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -0,0 +1,823 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020 Google, Inc.
+ *
+ * Authors:
+ * Sean Paul <seanpaul@chromium.org>
+ */
+
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dp_mst_helper.h>
+#include <drm/display/drm_hdcp_helper.h>
+#include <drm/drm_print.h>
+
+#include "intel_ddi.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_dp_hdcp.h"
+#include "intel_hdcp.h"
+#include "intel_hdcp_regs.h"
+
+static unsigned int transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
+{
+ u32 stream_enc_mask;
+
+ switch (cpu_transcoder) {
+ case TRANSCODER_A:
+ stream_enc_mask = HDCP_STATUS_STREAM_A_ENC;
+ break;
+ case TRANSCODER_B:
+ stream_enc_mask = HDCP_STATUS_STREAM_B_ENC;
+ break;
+ case TRANSCODER_C:
+ stream_enc_mask = HDCP_STATUS_STREAM_C_ENC;
+ break;
+ case TRANSCODER_D:
+ stream_enc_mask = HDCP_STATUS_STREAM_D_ENC;
+ break;
+ default:
+ stream_enc_mask = 0;
+ }
+
+ return stream_enc_mask;
+}
+
+static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
+{
+ long ret;
+
+#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
+ ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
+ msecs_to_jiffies(timeout));
+
+ if (!ret)
+ DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
+}
+
+static
+int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
+ u8 *an)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ u8 aksv[DRM_HDCP_KSV_LEN] = {};
+ ssize_t dpcd_ret;
+
+ /* Output An first, that's easy */
+ dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN,
+ an, DRM_HDCP_AN_LEN);
+ if (dpcd_ret != DRM_HDCP_AN_LEN) {
+ drm_dbg_kms(&i915->drm,
+ "Failed to write An over DP/AUX (%zd)\n",
+ dpcd_ret);
+ return dpcd_ret >= 0 ? -EIO : dpcd_ret;
+ }
+
+ /*
+ * Since Aksv is Oh-So-Secret, we can't access it in software. So we
+ * send an empty buffer of the correct length through the DP helpers. On
+ * the other side, in the transfer hook, we'll generate a flag based on
+ * the destination address which will tickle the hardware to output the
+ * Aksv on our behalf after the header is sent.
+ */
+ dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AKSV,
+ aksv, DRM_HDCP_KSV_LEN);
+ if (dpcd_ret != DRM_HDCP_KSV_LEN) {
+ drm_dbg_kms(&i915->drm,
+ "Failed to write Aksv over DP/AUX (%zd)\n",
+ dpcd_ret);
+ return dpcd_ret >= 0 ? -EIO : dpcd_ret;
+ }
+ return 0;
+}
+
+static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port,
+ u8 *bksv)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
+ DRM_HDCP_KSV_LEN);
+ if (ret != DRM_HDCP_KSV_LEN) {
+ drm_dbg_kms(&i915->drm,
+ "Read Bksv from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
+ u8 *bstatus)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+
+ /*
+ * For some reason the HDMI and DP HDCP specs call this register
+ * definition by different names. In the HDMI spec, it's called BSTATUS,
+ * but in DP it's called BINFO.
+ */
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO,
+ bstatus, DRM_HDCP_BSTATUS_LEN);
+ if (ret != DRM_HDCP_BSTATUS_LEN) {
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port,
+ u8 *bcaps)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
+ bcaps, 1);
+ if (ret != 1) {
+ drm_dbg_kms(&i915->drm,
+ "Read bcaps from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static
+int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
+ bool *repeater_present)
+{
+ ssize_t ret;
+ u8 bcaps;
+
+ ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
+ if (ret)
+ return ret;
+
+ *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
+ u8 *ri_prime)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
+ ri_prime, DRM_HDCP_RI_LEN);
+ if (ret != DRM_HDCP_RI_LEN) {
+ drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n",
+ ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
+ bool *ksv_ready)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+ u8 bstatus;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+ &bstatus, 1);
+ if (ret != 1) {
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ *ksv_ready = bstatus & DP_BSTATUS_READY;
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
+ int num_downstream, u8 *ksv_fifo)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+ int i;
+
+ /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
+ for (i = 0; i < num_downstream; i += 3) {
+ size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+ DP_AUX_HDCP_KSV_FIFO,
+ ksv_fifo + i * DRM_HDCP_KSV_LEN,
+ len);
+ if (ret != len) {
+ drm_dbg_kms(&i915->drm,
+ "Read ksv[%d] from DP/AUX failed (%zd)\n",
+ i, ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
+ int i, u32 *part)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+
+ if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
+ return -EINVAL;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+ DP_AUX_HDCP_V_PRIME(i), part,
+ DRM_HDCP_V_PRIME_PART_LEN);
+ if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
+ drm_dbg_kms(&i915->drm,
+ "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
+ enum transcoder cpu_transcoder,
+ bool enable)
+{
+ /* Not used for single stream DisplayPort setups */
+ return 0;
+}
+
+static
+bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+ u8 bstatus;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+ &bstatus, 1);
+ if (ret != 1) {
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return false;
+ }
+
+ return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
+}
+
+static
+int intel_dp_hdcp_capable(struct intel_digital_port *dig_port,
+ bool *hdcp_capable)
+{
+ ssize_t ret;
+ u8 bcaps;
+
+ ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
+ if (ret)
+ return ret;
+
+ *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
+ return 0;
+}
+
+struct hdcp2_dp_errata_stream_type {
+ u8 msg_id;
+ u8 stream_type;
+} __packed;
+
+struct hdcp2_dp_msg_data {
+ u8 msg_id;
+ u32 offset;
+ bool msg_detectable;
+ u32 timeout;
+ u32 timeout2; /* Added for non_paired situation */
+ /* Timeout to read entire msg */
+ u32 msg_read_timeout;
+};
+
+static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
+ { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0, 0},
+ { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
+ false, HDCP_2_2_CERT_TIMEOUT_MS, 0, HDCP_2_2_DP_CERT_READ_TIMEOUT_MS},
+ { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
+ false, 0, 0, 0 },
+ { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
+ false, 0, 0, 0 },
+ { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
+ true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+ HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS, HDCP_2_2_DP_HPRIME_READ_TIMEOUT_MS},
+ { HDCP_2_2_AKE_SEND_PAIRING_INFO,
+ DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
+ HDCP_2_2_PAIRING_TIMEOUT_MS, 0, HDCP_2_2_DP_PAIRING_READ_TIMEOUT_MS },
+ { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0, 0 },
+ { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
+ false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0, 0 },
+ { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
+ 0, 0, 0 },
+ { HDCP_2_2_REP_SEND_RECVID_LIST,
+ DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
+ HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0, 0 },
+ { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
+ 0, 0, 0 },
+ { HDCP_2_2_REP_STREAM_MANAGE,
+ DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
+ 0, 0, 0},
+ { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
+ false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0, 0 },
+/* local define to shovel this through the write_2_2 interface */
+#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
+ { HDCP_2_2_ERRATA_DP_STREAM_TYPE,
+ DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
+ 0, 0 },
+};
+
+static int
+intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port,
+ u8 *rx_status)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
+ HDCP_2_2_DP_RXSTATUS_LEN);
+ if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static
+int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
+ u8 msg_id, bool *msg_ready)
+{
+ u8 rx_status;
+ int ret;
+
+ *msg_ready = false;
+ ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
+ if (ret < 0)
+ return ret;
+
+ switch (msg_id) {
+ case HDCP_2_2_AKE_SEND_HPRIME:
+ if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
+ *msg_ready = true;
+ break;
+ case HDCP_2_2_AKE_SEND_PAIRING_INFO:
+ if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
+ *msg_ready = true;
+ break;
+ case HDCP_2_2_REP_SEND_RECVID_LIST:
+ if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+ *msg_ready = true;
+ break;
+ default:
+ DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t
+intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
+ const struct hdcp2_dp_msg_data *hdcp2_msg_data)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
+ u8 msg_id = hdcp2_msg_data->msg_id;
+ int ret, timeout;
+ bool msg_ready = false;
+
+ if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
+ timeout = hdcp2_msg_data->timeout2;
+ else
+ timeout = hdcp2_msg_data->timeout;
+
+ /*
+ * There is no way to detect the CERT, LPRIME and STREAM_READY
+ * availability. So Wait for timeout and read the msg.
+ */
+ if (!hdcp2_msg_data->msg_detectable) {
+ mdelay(timeout);
+ ret = 0;
+ } else {
+ /*
+ * As we want to check the msg availability at timeout, Ignoring
+ * the timeout at wait for CP_IRQ.
+ */
+ intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
+ ret = hdcp2_detect_msg_availability(dig_port,
+ msg_id, &msg_ready);
+ if (!msg_ready)
+ ret = -ETIMEDOUT;
+ }
+
+ if (ret)
+ drm_dbg_kms(&i915->drm,
+ "msg_id %d, ret %d, timeout(mSec): %d\n",
+ hdcp2_msg_data->msg_id, ret, timeout);
+
+ return ret;
+}
+
+static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
+ if (hdcp2_dp_msg_data[i].msg_id == msg_id)
+ return &hdcp2_dp_msg_data[i];
+
+ return NULL;
+}
+
+static
+int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
+ void *buf, size_t size)
+{
+ unsigned int offset;
+ u8 *byte = buf;
+ ssize_t ret, bytes_to_write, len;
+ const struct hdcp2_dp_msg_data *hdcp2_msg_data;
+
+ hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
+ if (!hdcp2_msg_data)
+ return -EINVAL;
+
+ offset = hdcp2_msg_data->offset;
+
+ /* No msg_id in DP HDCP2.2 msgs */
+ bytes_to_write = size - 1;
+ byte++;
+
+ while (bytes_to_write) {
+ len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
+ DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
+
+ ret = drm_dp_dpcd_write(&dig_port->dp.aux,
+ offset, (void *)byte, len);
+ if (ret < 0)
+ return ret;
+
+ bytes_to_write -= ret;
+ byte += ret;
+ offset += ret;
+ }
+
+ return size;
+}
+
+static
+ssize_t get_receiver_id_list_rx_info(struct intel_digital_port *dig_port, u32 *dev_cnt, u8 *byte)
+{
+ ssize_t ret;
+ u8 *rx_info = byte;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RXINFO_OFFSET,
+ (void *)rx_info, HDCP_2_2_RXINFO_LEN);
+ if (ret != HDCP_2_2_RXINFO_LEN)
+ return ret >= 0 ? -EIO : ret;
+
+ *dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
+ HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
+
+ if (*dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
+ *dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
+
+ return ret;
+}
+
+static
+int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
+ u8 msg_id, void *buf, size_t size)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
+ unsigned int offset;
+ u8 *byte = buf;
+ ssize_t ret, bytes_to_recv, len;
+ const struct hdcp2_dp_msg_data *hdcp2_msg_data;
+ ktime_t msg_end = ktime_set(0, 0);
+ bool msg_expired;
+ u32 dev_cnt;
+
+ hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
+ if (!hdcp2_msg_data)
+ return -EINVAL;
+ offset = hdcp2_msg_data->offset;
+
+ ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data);
+ if (ret < 0)
+ return ret;
+
+ hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
+
+ /* DP adaptation msgs has no msg_id */
+ byte++;
+
+ if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
+ ret = get_receiver_id_list_rx_info(dig_port, &dev_cnt, byte);
+ if (ret < 0)
+ return ret;
+
+ byte += ret;
+ size = sizeof(struct hdcp2_rep_send_receiverid_list) -
+ HDCP_2_2_RXINFO_LEN - HDCP_2_2_RECEIVER_IDS_MAX_LEN +
+ (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
+ offset += HDCP_2_2_RXINFO_LEN;
+ }
+
+ bytes_to_recv = size - 1;
+
+ while (bytes_to_recv) {
+ len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
+ DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
+
+ /* Entire msg read timeout since initiate of msg read */
+ if (bytes_to_recv == size - 1 && hdcp2_msg_data->msg_read_timeout > 0)
+ msg_end = ktime_add_ms(ktime_get_raw(),
+ hdcp2_msg_data->msg_read_timeout);
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset,
+ (void *)byte, len);
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n",
+ msg_id, ret);
+ return ret;
+ }
+
+ bytes_to_recv -= ret;
+ byte += ret;
+ offset += ret;
+ }
+
+ if (hdcp2_msg_data->msg_read_timeout > 0) {
+ msg_expired = ktime_after(ktime_get_raw(), msg_end);
+ if (msg_expired) {
+ drm_dbg_kms(&i915->drm, "msg_id %d, entire msg read timeout(mSec): %d\n",
+ msg_id, hdcp2_msg_data->msg_read_timeout);
+ return -ETIMEDOUT;
+ }
+ }
+
+ byte = buf;
+ *byte = msg_id;
+
+ return size;
+}
+
+static
+int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port,
+ bool is_repeater, u8 content_type)
+{
+ int ret;
+ struct hdcp2_dp_errata_stream_type stream_type_msg;
+
+ if (is_repeater)
+ return 0;
+
+ /*
+ * Errata for DP: As Stream type is used for encryption, Receiver
+ * should be communicated with stream type for the decryption of the
+ * content.
+ * Repeater will be communicated with stream type as a part of it's
+ * auth later in time.
+ */
+ stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
+ stream_type_msg.stream_type = content_type;
+
+ ret = intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg,
+ sizeof(stream_type_msg));
+
+ return ret < 0 ? ret : 0;
+
+}
+
+static
+int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
+{
+ u8 rx_status;
+ int ret;
+
+ ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
+ if (ret)
+ return ret;
+
+ if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
+ ret = HDCP_REAUTH_REQUEST;
+ else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
+ ret = HDCP_LINK_INTEGRITY_FAILURE;
+ else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+ ret = HDCP_TOPOLOGY_CHANGE;
+
+ return ret;
+}
+
+static
+int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port,
+ bool *capable)
+{
+ u8 rx_caps[3];
+ int ret;
+
+ *capable = false;
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
+ rx_caps, HDCP_2_2_RXCAPS_LEN);
+ if (ret != HDCP_2_2_RXCAPS_LEN)
+ return ret >= 0 ? -EIO : ret;
+
+ if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
+ HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
+ *capable = true;
+
+ return 0;
+}
+
+static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
+ .write_an_aksv = intel_dp_hdcp_write_an_aksv,
+ .read_bksv = intel_dp_hdcp_read_bksv,
+ .read_bstatus = intel_dp_hdcp_read_bstatus,
+ .repeater_present = intel_dp_hdcp_repeater_present,
+ .read_ri_prime = intel_dp_hdcp_read_ri_prime,
+ .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
+ .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
+ .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
+ .toggle_signalling = intel_dp_hdcp_toggle_signalling,
+ .check_link = intel_dp_hdcp_check_link,
+ .hdcp_capable = intel_dp_hdcp_capable,
+ .write_2_2_msg = intel_dp_hdcp2_write_msg,
+ .read_2_2_msg = intel_dp_hdcp2_read_msg,
+ .config_stream_type = intel_dp_hdcp2_config_stream_type,
+ .check_2_2_link = intel_dp_hdcp2_check_link,
+ .hdcp_2_2_capable = intel_dp_hdcp2_capable,
+ .protocol = HDCP_PROTOCOL_DP,
+};
+
+static int
+intel_dp_mst_toggle_hdcp_stream_select(struct intel_connector *connector,
+ bool enable)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base,
+ hdcp->stream_transcoder, enable,
+ TRANS_DDI_HDCP_SELECT);
+ if (ret)
+ drm_err(&i915->drm, "%s HDCP stream select failed (%d)\n",
+ enable ? "Enable" : "Disable", ret);
+ return ret;
+}
+
+static int
+intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
+ bool enable)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = dig_port->base.port;
+ enum transcoder cpu_transcoder = hdcp->stream_transcoder;
+ u32 stream_enc_status;
+ int ret;
+
+ ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
+ if (ret)
+ return ret;
+
+ stream_enc_status = transcoder_to_stream_enc_status(cpu_transcoder);
+ if (!stream_enc_status)
+ return -EINVAL;
+
+ /* Wait for encryption confirmation */
+ if (intel_de_wait_for_register(i915,
+ HDCP_STATUS(i915, cpu_transcoder, port),
+ stream_enc_status,
+ enable ? stream_enc_status : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
+ transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int
+intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
+ bool enable)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum transcoder cpu_transcoder = hdcp->stream_transcoder;
+ enum pipe pipe = (enum pipe)cpu_transcoder;
+ enum port port = dig_port->base.port;
+ int ret;
+
+ drm_WARN_ON(&i915->drm, enable &&
+ !!(intel_de_read(i915, HDCP2_AUTH_STREAM(i915, cpu_transcoder, port))
+ & AUTH_STREAM_TYPE) != data->streams[0].stream_type);
+
+ ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
+ if (ret)
+ return ret;
+
+ /* Wait for encryption confirmation */
+ if (intel_de_wait_for_register(i915,
+ HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe),
+ STREAM_ENCRYPTION_STATUS,
+ enable ? STREAM_ENCRYPTION_STATUS : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
+ transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static
+int intel_dp_mst_hdcp2_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ /*
+ * We do need to do the Link Check only for the connector involved with
+ * HDCP port authentication and encryption.
+ * We can re-use the hdcp->is_repeater flag to know that the connector
+ * involved with HDCP port authentication and encryption.
+ */
+ if (hdcp->is_repeater) {
+ ret = intel_dp_hdcp2_check_link(dig_port, connector);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
+ .write_an_aksv = intel_dp_hdcp_write_an_aksv,
+ .read_bksv = intel_dp_hdcp_read_bksv,
+ .read_bstatus = intel_dp_hdcp_read_bstatus,
+ .repeater_present = intel_dp_hdcp_repeater_present,
+ .read_ri_prime = intel_dp_hdcp_read_ri_prime,
+ .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
+ .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
+ .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
+ .toggle_signalling = intel_dp_hdcp_toggle_signalling,
+ .stream_encryption = intel_dp_mst_hdcp_stream_encryption,
+ .check_link = intel_dp_hdcp_check_link,
+ .hdcp_capable = intel_dp_hdcp_capable,
+ .write_2_2_msg = intel_dp_hdcp2_write_msg,
+ .read_2_2_msg = intel_dp_hdcp2_read_msg,
+ .config_stream_type = intel_dp_hdcp2_config_stream_type,
+ .stream_2_2_encryption = intel_dp_mst_hdcp2_stream_encryption,
+ .check_2_2_link = intel_dp_mst_hdcp2_check_link,
+ .hdcp_2_2_capable = intel_dp_hdcp2_capable,
+ .protocol = HDCP_PROTOCOL_DP,
+};
+
+int intel_dp_hdcp_init(struct intel_digital_port *dig_port,
+ struct intel_connector *intel_connector)
+{
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_encoder *intel_encoder = &dig_port->base;
+ enum port port = intel_encoder->port;
+ struct intel_dp *intel_dp = &dig_port->dp;
+
+ if (!is_hdcp_supported(dev_priv, port))
+ return 0;
+
+ if (intel_connector->mst_port)
+ return intel_hdcp_init(intel_connector, dig_port,
+ &intel_dp_mst_hdcp_shim);
+ else if (!intel_dp_is_edp(intel_dp))
+ return intel_hdcp_init(intel_connector, dig_port,
+ &intel_dp_hdcp_shim);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.h b/drivers/gpu/drm/i915/display/intel_dp_hdcp.h
new file mode 100644
index 000000000..eff5ec5c5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_HDCP___
+#define __INTEL_DP_HDCP___
+
+struct intel_connector;
+struct intel_digital_port;
+
+int intel_dp_hdcp_init(struct intel_digital_port *dig_port,
+ struct intel_connector *intel_connector);
+
+#endif /* __INTEL_DP_HDCP___ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
new file mode 100644
index 000000000..3d3efcf02
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -0,0 +1,1456 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_dp_link_training.h"
+
+static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
+{
+ memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
+}
+
+static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
+{
+ intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
+}
+
+static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
+ enum drm_dp_phy dp_phy)
+{
+ return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1];
+}
+
+static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ enum drm_dp_phy dp_phy)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
+
+ if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dpcd, dp_phy, phy_caps) < 0) {
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "[ENCODER:%d:%s][%s] failed to read the PHY caps\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
+ return;
+ }
+
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "[ENCODER:%d:%s][%s] PHY capabilities: %*ph\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy),
+ (int)sizeof(intel_dp->lttpr_phy_caps[0]),
+ phy_caps);
+}
+
+static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int ret;
+
+ ret = drm_dp_read_lttpr_common_caps(&intel_dp->aux, dpcd,
+ intel_dp->lttpr_common_caps);
+ if (ret < 0)
+ goto reset_caps;
+
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "[ENCODER:%d:%s] LTTPR common capabilities: %*ph\n",
+ encoder->base.base.id, encoder->base.name,
+ (int)sizeof(intel_dp->lttpr_common_caps),
+ intel_dp->lttpr_common_caps);
+
+ /* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */
+ if (intel_dp->lttpr_common_caps[0] < 0x14)
+ goto reset_caps;
+
+ return true;
+
+reset_caps:
+ intel_dp_reset_lttpr_common_caps(intel_dp);
+ return false;
+}
+
+static bool
+intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
+{
+ u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
+ DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
+
+ return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
+}
+
+static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ int lttpr_count;
+ int i;
+
+ if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd))
+ return 0;
+
+ lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
+ /*
+ * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
+ * detected as this breaks link training at least on the Dell WD19TB
+ * dock.
+ */
+ if (lttpr_count == 0)
+ return 0;
+
+ /*
+ * See DP Standard v2.0 3.6.6.1. about the explicit disabling of
+ * non-transparent mode and the disable->enable non-transparent mode
+ * sequence.
+ */
+ intel_dp_set_lttpr_transparent_mode(intel_dp, true);
+
+ /*
+ * In case of unsupported number of LTTPRs or failing to switch to
+ * non-transparent mode fall-back to transparent link training mode,
+ * still taking into account any LTTPR common lane- rate/count limits.
+ */
+ if (lttpr_count < 0)
+ return 0;
+
+ if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n",
+ encoder->base.base.id, encoder->base.name);
+
+ intel_dp_set_lttpr_transparent_mode(intel_dp, true);
+ intel_dp_reset_lttpr_count(intel_dp);
+
+ return 0;
+ }
+
+ for (i = 0; i < lttpr_count; i++)
+ intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i));
+
+ return lttpr_count;
+}
+
+/**
+ * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
+ * @intel_dp: Intel DP struct
+ *
+ * Read the LTTPR common and DPRX capabilities and switch to non-transparent
+ * link training mode if any is detected and read the PHY capabilities for all
+ * detected LTTPRs. In case of an LTTPR detection error or if the number of
+ * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
+ * transparent mode link training mode.
+ *
+ * Returns:
+ * >0 if LTTPRs were detected and the non-transparent LT mode was set. The
+ * DPRX capabilities are read out.
+ * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a
+ * detection failure and the transparent LT mode was set. The DPRX
+ * capabilities are read out.
+ * <0 Reading out the DPRX capabilities failed.
+ */
+int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int lttpr_count = 0;
+
+ /*
+ * Detecting LTTPRs must be avoided on platforms with an AUX timeout
+ * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
+ */
+ if (!intel_dp_is_edp(intel_dp) &&
+ (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) {
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+ if (drm_dp_dpcd_probe(&intel_dp->aux, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
+ return -EIO;
+
+ if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
+ return -EIO;
+
+ lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd);
+ }
+
+ /*
+ * The DPTX shall read the DPRX caps after LTTPR detection, so re-read
+ * it here.
+ */
+ if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
+ intel_dp_reset_lttpr_common_caps(intel_dp);
+ return -EIO;
+ }
+
+ return lttpr_count;
+}
+
+static u8 dp_voltage_max(u8 preemph)
+{
+ switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ case DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ case DP_TRAIN_PRE_EMPH_LEVEL_2:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+ case DP_TRAIN_PRE_EMPH_LEVEL_3:
+ default:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+ }
+}
+
+static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp,
+ enum drm_dp_phy dp_phy)
+{
+ const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
+
+ if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps))
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ else
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+}
+
+static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp,
+ enum drm_dp_phy dp_phy)
+{
+ const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
+
+ if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps))
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
+ else
+ return DP_TRAIN_PRE_EMPH_LEVEL_2;
+}
+
+static bool
+intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
+ enum drm_dp_phy dp_phy)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
+
+ drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
+
+ return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
+}
+
+static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 voltage_max;
+
+ /*
+ * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from
+ * the DPRX_PHY we train.
+ */
+ if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
+ voltage_max = intel_dp->voltage_max(intel_dp, crtc_state);
+ else
+ voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);
+
+ drm_WARN_ON_ONCE(&i915->drm,
+ voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
+ voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
+
+ return voltage_max;
+}
+
+static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
+ enum drm_dp_phy dp_phy)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 preemph_max;
+
+ /*
+ * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from
+ * the DPRX_PHY we train.
+ */
+ if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
+ preemph_max = intel_dp->preemph_max(intel_dp);
+ else
+ preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);
+
+ drm_WARN_ON_ONCE(&i915->drm,
+ preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
+ preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
+
+ return preemph_max;
+}
+
+static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
+ enum drm_dp_phy dp_phy)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
+ DISPLAY_VER(i915) >= 11;
+}
+
+/* 128b/132b */
+static u8 intel_dp_get_lane_adjust_tx_ffe_preset(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ u8 tx_ffe = 0;
+
+ if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
+ lane = min(lane, crtc_state->lane_count - 1);
+ tx_ffe = drm_dp_get_adjust_tx_ffe_preset(link_status, lane);
+ } else {
+ for (lane = 0; lane < crtc_state->lane_count; lane++)
+ tx_ffe = max(tx_ffe, drm_dp_get_adjust_tx_ffe_preset(link_status, lane));
+ }
+
+ return tx_ffe;
+}
+
+/* 8b/10b */
+static u8 intel_dp_get_lane_adjust_vswing_preemph(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ u8 v = 0;
+ u8 p = 0;
+ u8 voltage_max;
+ u8 preemph_max;
+
+ if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
+ lane = min(lane, crtc_state->lane_count - 1);
+
+ v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+ } else {
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
+ p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
+ }
+ }
+
+ preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy);
+ if (p >= preemph_max)
+ p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ v = min(v, dp_voltage_max(p));
+
+ voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy);
+ if (v >= voltage_max)
+ v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
+
+ return v | p;
+}
+
+static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ if (intel_dp_is_uhbr(crtc_state))
+ return intel_dp_get_lane_adjust_tx_ffe_preset(intel_dp, crtc_state,
+ dp_phy, link_status, lane);
+ else
+ return intel_dp_get_lane_adjust_vswing_preemph(intel_dp, crtc_state,
+ dp_phy, link_status, lane);
+}
+
+#define TRAIN_REQ_FMT "%d/%d/%d/%d"
+#define _TRAIN_REQ_VSWING_ARGS(link_status, lane) \
+ (drm_dp_get_adjust_request_voltage((link_status), (lane)) >> DP_TRAIN_VOLTAGE_SWING_SHIFT)
+#define TRAIN_REQ_VSWING_ARGS(link_status) \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 0), \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 1), \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 2), \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 3)
+#define _TRAIN_REQ_PREEMPH_ARGS(link_status, lane) \
+ (drm_dp_get_adjust_request_pre_emphasis((link_status), (lane)) >> DP_TRAIN_PRE_EMPHASIS_SHIFT)
+#define TRAIN_REQ_PREEMPH_ARGS(link_status) \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 0), \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 1), \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 2), \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 3)
+#define _TRAIN_REQ_TX_FFE_ARGS(link_status, lane) \
+ drm_dp_get_adjust_tx_ffe_preset((link_status), (lane))
+#define TRAIN_REQ_TX_FFE_ARGS(link_status) \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 0), \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 1), \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 3)
+
+void
+intel_dp_get_adjust_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ int lane;
+
+ if (intel_dp_is_uhbr(crtc_state)) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
+ "TX FFE request: " TRAIN_REQ_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy),
+ crtc_state->lane_count,
+ TRAIN_REQ_TX_FFE_ARGS(link_status));
+ } else {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 8b/10b, lanes: %d, "
+ "vswing request: " TRAIN_REQ_FMT ", "
+ "pre-emphasis request: " TRAIN_REQ_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy),
+ crtc_state->lane_count,
+ TRAIN_REQ_VSWING_ARGS(link_status),
+ TRAIN_REQ_PREEMPH_ARGS(link_status));
+ }
+
+ for (lane = 0; lane < 4; lane++)
+ intel_dp->train_set[lane] =
+ intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
+ dp_phy, link_status, lane);
+}
+
+static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
+ enum drm_dp_phy dp_phy)
+{
+ return dp_phy == DP_PHY_DPRX ?
+ DP_TRAINING_PATTERN_SET :
+ DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
+}
+
+static bool
+intel_dp_set_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ u8 dp_train_pat)
+{
+ int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
+ u8 buf[sizeof(intel_dp->train_set) + 1];
+ int len;
+
+ intel_dp_program_link_training_pattern(intel_dp, crtc_state,
+ dp_phy, dp_train_pat);
+
+ buf[0] = dp_train_pat;
+ /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
+ memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count);
+ len = crtc_state->lane_count + 1;
+
+ return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
+}
+
+static char dp_training_pattern_name(u8 train_pat)
+{
+ switch (train_pat) {
+ case DP_TRAINING_PATTERN_1:
+ case DP_TRAINING_PATTERN_2:
+ case DP_TRAINING_PATTERN_3:
+ return '0' + train_pat;
+ case DP_TRAINING_PATTERN_4:
+ return '4';
+ default:
+ MISSING_CASE(train_pat);
+ return '?';
+ }
+}
+
+void
+intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ u8 dp_train_pat)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
+
+ if (train_pat != DP_TRAINING_PATTERN_DISABLE)
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Using DP training pattern TPS%c\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy),
+ dp_training_pattern_name(train_pat));
+
+ intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
+}
+
+#define TRAIN_SET_FMT "%d%s/%d%s/%d%s/%d%s"
+#define _TRAIN_SET_VSWING_ARGS(train_set) \
+ ((train_set) & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT, \
+ (train_set) & DP_TRAIN_MAX_SWING_REACHED ? "(max)" : ""
+#define TRAIN_SET_VSWING_ARGS(train_set) \
+ _TRAIN_SET_VSWING_ARGS((train_set)[0]), \
+ _TRAIN_SET_VSWING_ARGS((train_set)[1]), \
+ _TRAIN_SET_VSWING_ARGS((train_set)[2]), \
+ _TRAIN_SET_VSWING_ARGS((train_set)[3])
+#define _TRAIN_SET_PREEMPH_ARGS(train_set) \
+ ((train_set) & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT, \
+ (train_set) & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? "(max)" : ""
+#define TRAIN_SET_PREEMPH_ARGS(train_set) \
+ _TRAIN_SET_PREEMPH_ARGS((train_set)[0]), \
+ _TRAIN_SET_PREEMPH_ARGS((train_set)[1]), \
+ _TRAIN_SET_PREEMPH_ARGS((train_set)[2]), \
+ _TRAIN_SET_PREEMPH_ARGS((train_set)[3])
+#define _TRAIN_SET_TX_FFE_ARGS(train_set) \
+ ((train_set) & DP_TX_FFE_PRESET_VALUE_MASK), ""
+#define TRAIN_SET_TX_FFE_ARGS(train_set) \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[0]), \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[1]), \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[2]), \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[3])
+
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ if (intel_dp_is_uhbr(crtc_state)) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
+ "TX FFE presets: " TRAIN_SET_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy),
+ crtc_state->lane_count,
+ TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set));
+ } else {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 8b/10b, lanes: %d, "
+ "vswing levels: " TRAIN_SET_FMT ", "
+ "pre-emphasis levels: " TRAIN_SET_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy),
+ crtc_state->lane_count,
+ TRAIN_SET_VSWING_ARGS(intel_dp->train_set),
+ TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set));
+ }
+
+ if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
+ encoder->set_signal_levels(encoder, crtc_state);
+}
+
+static bool
+intel_dp_reset_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ u8 dp_train_pat)
+{
+ memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+ intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
+ return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
+}
+
+static bool
+intel_dp_update_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ int reg = dp_phy == DP_PHY_DPRX ?
+ DP_TRAINING_LANE0_SET :
+ DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
+ int ret;
+
+ intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
+
+ ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
+ intel_dp->train_set, crtc_state->lane_count);
+
+ return ret == crtc_state->lane_count;
+}
+
+/* 128b/132b */
+static bool intel_dp_lane_max_tx_ffe_reached(u8 train_set_lane)
+{
+ return (train_set_lane & DP_TX_FFE_PRESET_VALUE_MASK) ==
+ DP_TX_FFE_PRESET_VALUE_MASK;
+}
+
+/*
+ * 8b/10b
+ *
+ * FIXME: The DP spec is very confusing here, also the Link CTS spec seems to
+ * have self contradicting tests around this area.
+ *
+ * In lieu of better ideas let's just stop when we've reached the max supported
+ * vswing with its max pre-emphasis, which is either 2+1 or 3+0 depending on
+ * whether vswing level 3 is supported or not.
+ */
+static bool intel_dp_lane_max_vswing_reached(u8 train_set_lane)
+{
+ u8 v = (train_set_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ u8 p = (train_set_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if ((train_set_lane & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ return false;
+
+ if (v + p != 3)
+ return false;
+
+ return true;
+}
+
+static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ int lane;
+
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ u8 train_set_lane = intel_dp->train_set[lane];
+
+ if (intel_dp_is_uhbr(crtc_state)) {
+ if (!intel_dp_lane_max_tx_ffe_reached(train_set_lane))
+ return false;
+ } else {
+ if (!intel_dp_lane_max_vswing_reached(train_set_lane))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * Prepare link training by configuring the link parameters. On DDI platforms
+ * also enable the port here.
+ */
+static bool
+intel_dp_prepare_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u8 link_config[2];
+ u8 link_bw, rate_select;
+
+ if (intel_dp->prepare_link_retrain)
+ intel_dp->prepare_link_retrain(intel_dp, crtc_state);
+
+ intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
+ &link_bw, &rate_select);
+
+ /*
+ * WaEdpLinkRateDataReload
+ *
+ * Parade PS8461E MUX (used on varius TGL+ laptops) needs
+ * to snoop the link rates reported by the sink when we
+ * use LINK_RATE_SET in order to operate in jitter cleaning
+ * mode (as opposed to redriver mode). Unfortunately it
+ * loses track of the snooped link rates when powered down,
+ * so we need to make it re-snoop often. Without this high
+ * link rates are not stable.
+ */
+ if (!link_bw) {
+ struct intel_connector *connector = intel_dp->attached_connector;
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n",
+ connector->base.base.id, connector->base.name);
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
+ sink_rates, sizeof(sink_rates));
+ }
+
+ if (link_bw)
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n",
+ encoder->base.base.id, encoder->base.name, link_bw);
+ else
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Using LINK_RATE_SET value %02x\n",
+ encoder->base.base.id, encoder->base.name, rate_select);
+
+ /* Write the link configuration data */
+ link_config[0] = link_bw;
+ link_config[1] = crtc_state->lane_count;
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+
+ /* eDP 1.4 rate select method. */
+ if (!link_bw)
+ drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
+ &rate_select, 1);
+
+ link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
+ link_config[1] = intel_dp_is_uhbr(crtc_state) ?
+ DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B;
+ drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
+
+ return true;
+}
+
+static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state,
+ const u8 old_link_status[DP_LINK_STATUS_SIZE],
+ const u8 new_link_status[DP_LINK_STATUS_SIZE])
+{
+ int lane;
+
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ u8 old, new;
+
+ if (intel_dp_is_uhbr(crtc_state)) {
+ old = drm_dp_get_adjust_tx_ffe_preset(old_link_status, lane);
+ new = drm_dp_get_adjust_tx_ffe_preset(new_link_status, lane);
+ } else {
+ old = drm_dp_get_adjust_request_voltage(old_link_status, lane) |
+ drm_dp_get_adjust_request_pre_emphasis(old_link_status, lane);
+ new = drm_dp_get_adjust_request_voltage(new_link_status, lane) |
+ drm_dp_get_adjust_request_pre_emphasis(new_link_status, lane);
+ }
+
+ if (old != new)
+ return true;
+ }
+
+ return false;
+}
+
+void
+intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy),
+ link_status[0], link_status[1], link_status[2],
+ link_status[3], link_status[4], link_status[5]);
+}
+
+/*
+ * Perform the link training clock recovery phase on the given DP PHY using
+ * training pattern 1.
+ */
+static bool
+intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u8 old_link_status[DP_LINK_STATUS_SIZE] = {};
+ int voltage_tries, cr_tries, max_cr_tries;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ bool max_vswing_reached = false;
+ int delay_us;
+
+ delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux,
+ intel_dp->dpcd, dp_phy,
+ intel_dp_is_uhbr(crtc_state));
+
+ /* clock recovery */
+ if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE)) {
+ drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to enable link training\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
+ return false;
+ }
+
+ /*
+ * The DP 1.4 spec defines the max clock recovery retries value
+ * as 10 but for pre-DP 1.4 devices we set a very tolerant
+ * retry limit of 80 (4 voltage levels x 4 preemphasis levels x
+ * x 5 identical voltage retries). Since the previous specs didn't
+ * define a limit and created the possibility of an infinite loop
+ * we want to prevent any sync from triggering that corner case.
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
+ max_cr_tries = 10;
+ else
+ max_cr_tries = 80;
+
+ voltage_tries = 1;
+ for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
+ usleep_range(delay_us, 2 * delay_us);
+
+ if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
+ link_status) < 0) {
+ drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to get link status\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
+ return false;
+ }
+
+ if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Clock recovery OK\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
+ return true;
+ }
+
+ if (voltage_tries == 5) {
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Same voltage tried 5 times\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
+ return false;
+ }
+
+ if (max_vswing_reached) {
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Max Voltage Swing reached\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
+ return false;
+ }
+
+ /* Update training set as requested by target */
+ intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
+ link_status);
+ if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s][%s] Failed to update link training\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
+ return false;
+ }
+
+ if (!intel_dp_adjust_request_changed(crtc_state, old_link_status, link_status))
+ ++voltage_tries;
+ else
+ voltage_tries = 1;
+
+ memcpy(old_link_status, link_status, sizeof(link_status));
+
+ if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state))
+ max_vswing_reached = true;
+ }
+
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s][%s] Failed clock recovery %d times, giving up!\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy), max_cr_tries);
+
+ return false;
+}
+
+/*
+ * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2
+ * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or
+ * 1.2 devices that support it, TPS2 otherwise.
+ */
+static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ bool source_tps3, sink_tps3, source_tps4, sink_tps4;
+
+ /* UHBR+ use separate 128b/132b TPS2 */
+ if (intel_dp_is_uhbr(crtc_state))
+ return DP_TRAINING_PATTERN_2;
+
+ /*
+ * TPS4 support is mandatory for all downstream devices that
+ * support HBR3. There are no known eDP panels that support
+ * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
+ * LTTPRs must support TPS4.
+ */
+ source_tps4 = intel_dp_source_supports_tps4(i915);
+ sink_tps4 = dp_phy != DP_PHY_DPRX ||
+ drm_dp_tps4_supported(intel_dp->dpcd);
+ if (source_tps4 && sink_tps4) {
+ return DP_TRAINING_PATTERN_4;
+ } else if (crtc_state->port_clock == 810000) {
+ if (!source_tps4)
+ drm_dbg_kms(&i915->drm,
+ "8.1 Gbps link rate without source TPS4 support\n");
+ if (!sink_tps4)
+ drm_dbg_kms(&i915->drm,
+ "8.1 Gbps link rate without sink TPS4 support\n");
+ }
+
+ /*
+ * TPS3 support is mandatory for downstream devices that
+ * support HBR2. However, not all sinks follow the spec.
+ */
+ source_tps3 = intel_dp_source_supports_tps3(i915);
+ sink_tps3 = dp_phy != DP_PHY_DPRX ||
+ drm_dp_tps3_supported(intel_dp->dpcd);
+ if (source_tps3 && sink_tps3) {
+ return DP_TRAINING_PATTERN_3;
+ } else if (crtc_state->port_clock >= 540000) {
+ if (!source_tps3)
+ drm_dbg_kms(&i915->drm,
+ ">=5.4/6.48 Gbps link rate without source TPS3 support\n");
+ if (!sink_tps3)
+ drm_dbg_kms(&i915->drm,
+ ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
+ }
+
+ return DP_TRAINING_PATTERN_2;
+}
+
+/*
+ * Perform the link training channel equalization phase on the given DP PHY
+ * using one of training pattern 2, 3 or 4 depending on the source and
+ * sink capabilities.
+ */
+static bool
+intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ int tries;
+ u32 training_pattern;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ bool channel_eq = false;
+ int delay_us;
+
+ delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux,
+ intel_dp->dpcd, dp_phy,
+ intel_dp_is_uhbr(crtc_state));
+
+ training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
+ /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
+ if (training_pattern != DP_TRAINING_PATTERN_4)
+ training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
+
+ /* channel equalization */
+ if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
+ training_pattern)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s][%s] Failed to start channel equalization\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
+ return false;
+ }
+
+ for (tries = 0; tries < 5; tries++) {
+ usleep_range(delay_us, 2 * delay_us);
+
+ if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
+ link_status) < 0) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s][%s] Failed to get link status\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
+ break;
+ }
+
+ /* Make sure clock is still ok */
+ if (!drm_dp_clock_recovery_ok(link_status,
+ crtc_state->lane_count)) {
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Clock recovery check failed, cannot "
+ "continue channel equalization\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
+ break;
+ }
+
+ if (drm_dp_channel_eq_ok(link_status,
+ crtc_state->lane_count)) {
+ channel_eq = true;
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Channel EQ done. DP Training successful\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
+ break;
+ }
+
+ /* Update training set as requested by target */
+ intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
+ link_status);
+ if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s][%s] Failed to update link training\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
+ break;
+ }
+ }
+
+ /* Try 5 times, else fail and try at lower BW */
+ if (tries == 5) {
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Channel equalization failed 5 times\n",
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
+ }
+
+ return channel_eq;
+}
+
+static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
+ enum drm_dp_phy dp_phy)
+{
+ int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
+ u8 val = DP_TRAINING_PATTERN_DISABLE;
+
+ return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1;
+}
+
+static int
+intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 sink_status;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_STATUS, &sink_status);
+ if (ret != 1) {
+ drm_dbg_kms(&i915->drm, "Failed to read sink status\n");
+ return ret < 0 ? ret : -EIO;
+ }
+
+ return sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION ? 1 : 0;
+}
+
+/**
+ * intel_dp_stop_link_train - stop link training
+ * @intel_dp: DP struct
+ * @crtc_state: state for CRTC attached to the encoder
+ *
+ * Stop the link training of the @intel_dp port, disabling the training
+ * pattern in the sink's DPCD, and disabling the test pattern symbol
+ * generation on the port.
+ *
+ * What symbols are output on the port after this point is
+ * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
+ * with the pipe being disabled, on older platforms it's HW specific if/how an
+ * idle pattern is generated, as the pipe is already enabled here for those.
+ *
+ * This function must be called after intel_dp_start_link_train().
+ */
+void intel_dp_stop_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ intel_dp->link_trained = true;
+
+ intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
+ intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
+ DP_TRAINING_PATTERN_DISABLE);
+
+ if (intel_dp_is_uhbr(crtc_state) &&
+ wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] 128b/132b intra-hop not clearing\n",
+ encoder->base.base.id, encoder->base.name);
+ }
+}
+
+static bool
+intel_dp_link_train_phy(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ bool ret = false;
+
+ if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
+ goto out;
+
+ if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy))
+ goto out;
+
+ ret = true;
+
+out:
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] Link Training %s at link rate = %d, lane count = %d\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy),
+ ret ? "passed" : "failed",
+ crtc_state->port_clock, crtc_state->lane_count);
+
+ return ret;
+}
+
+static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ if (intel_dp->hobl_active) {
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "[ENCODER:%d:%s] Link Training failed with HOBL active, "
+ "not enabling it from now on",
+ encoder->base.base.id, encoder->base.name);
+ intel_dp->hobl_failed = true;
+ } else if (intel_dp_get_link_train_fallback_values(intel_dp,
+ crtc_state->port_clock,
+ crtc_state->lane_count)) {
+ return;
+ }
+
+ /* Schedule a Hotplug Uevent to userspace to start modeset */
+ schedule_work(&intel_connector->modeset_retry_work);
+}
+
+/* Perform the link training on all LTTPRs and the DPRX on a link. */
+static bool
+intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int lttpr_count)
+{
+ bool ret = true;
+ int i;
+
+ for (i = lttpr_count - 1; i >= 0; i--) {
+ enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);
+
+ ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy);
+ intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy);
+
+ if (!ret)
+ break;
+ }
+
+ if (ret)
+ ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
+
+ if (intel_dp->set_idle_link_train)
+ intel_dp->set_idle_link_train(intel_dp, crtc_state);
+
+ return ret;
+}
+
+/*
+ * 128b/132b DP LANEx_EQ_DONE Sequence (DP 2.0 E11 3.5.2.16.1)
+ */
+static bool
+intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ int delay_us;
+ int try, max_tries = 20;
+ unsigned long deadline;
+ bool timeout = false;
+
+ /*
+ * Reset signal levels. Start transmitting 128b/132b TPS1.
+ *
+ * Put DPRX and LTTPRs (if any) into intra-hop AUX mode by writing TPS1
+ * in DP_TRAINING_PATTERN_SET.
+ */
+ if (!intel_dp_reset_link_train(intel_dp, crtc_state, DP_PHY_DPRX,
+ DP_TRAINING_PATTERN_1)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to start 128b/132b TPS1\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
+
+ /* Read the initial TX FFE settings. */
+ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to read TX FFE presets\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ /* Update signal levels and training set as requested. */
+ intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
+ if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to set initial TX FFE settings\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ /* Start transmitting 128b/132b TPS2. */
+ if (!intel_dp_set_link_train(intel_dp, crtc_state, DP_PHY_DPRX,
+ DP_TRAINING_PATTERN_2)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to start 128b/132b TPS2\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ /* Time budget for the LANEx_EQ_DONE Sequence */
+ deadline = jiffies + msecs_to_jiffies_timeout(400);
+
+ for (try = 0; try < max_tries; try++) {
+ usleep_range(delay_us, 2 * delay_us);
+
+ /*
+ * The delay may get updated. The transmitter shall read the
+ * delay before link status during link training.
+ */
+ delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
+
+ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to read link status\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (drm_dp_128b132b_link_training_failed(link_status)) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Downstream link training failure\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (drm_dp_128b132b_lane_channel_eq_done(link_status, crtc_state->lane_count)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Lane channel eq done\n",
+ encoder->base.base.id, encoder->base.name);
+ break;
+ }
+
+ if (timeout) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Lane channel eq timeout\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (time_after(jiffies, deadline))
+ timeout = true; /* try one last time after deadline */
+
+ /* Update signal levels and training set as requested. */
+ intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
+ if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to update TX FFE settings\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+ }
+
+ if (try == max_tries) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Max loop count reached\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ for (;;) {
+ if (time_after(jiffies, deadline))
+ timeout = true; /* try one last time after deadline */
+
+ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to read link status\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (drm_dp_128b132b_link_training_failed(link_status)) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Downstream link training failure\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (drm_dp_128b132b_eq_interlane_align_done(link_status)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Interlane align done\n",
+ encoder->base.base.id, encoder->base.name);
+ break;
+ }
+
+ if (timeout) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Interlane align timeout\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ usleep_range(2000, 3000);
+ }
+
+ return true;
+}
+
+/*
+ * 128b/132b DP LANEx_CDS_DONE Sequence (DP 2.0 E11 3.5.2.16.2)
+ */
+static bool
+intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int lttpr_count)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ unsigned long deadline;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_2_CDS) != 1) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to start 128b/132b TPS2 CDS\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ /* Time budget for the LANEx_CDS_DONE Sequence */
+ deadline = jiffies + msecs_to_jiffies_timeout((lttpr_count + 1) * 20);
+
+ for (;;) {
+ bool timeout = false;
+
+ if (time_after(jiffies, deadline))
+ timeout = true; /* try one last time after deadline */
+
+ usleep_range(2000, 3000);
+
+ if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to read link status\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (drm_dp_128b132b_eq_interlane_align_done(link_status) &&
+ drm_dp_128b132b_cds_interlane_align_done(link_status) &&
+ drm_dp_128b132b_lane_symbol_locked(link_status, crtc_state->lane_count)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] CDS interlane align done\n",
+ encoder->base.base.id, encoder->base.name);
+ break;
+ }
+
+ if (drm_dp_128b132b_link_training_failed(link_status)) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Downstream link training failure\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (timeout) {
+ intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] CDS timeout\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+ }
+
+ /* FIXME: Should DP_TRAINING_PATTERN_DISABLE be written first? */
+ if (intel_dp->set_idle_link_train)
+ intel_dp->set_idle_link_train(intel_dp, crtc_state);
+
+ return true;
+}
+
+/*
+ * 128b/132b link training sequence. (DP 2.0 E11 SCR on link training.)
+ */
+static bool
+intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int lttpr_count)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ bool passed = false;
+
+ if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] 128b/132b intra-hop not clear\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) &&
+ intel_dp_128b132b_lane_cds(intel_dp, crtc_state, lttpr_count))
+ passed = true;
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] 128b/132b Link Training %s at link rate = %d, lane count = %d\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ passed ? "passed" : "failed",
+ crtc_state->port_clock, crtc_state->lane_count);
+
+ return passed;
+}
+
+/**
+ * intel_dp_start_link_train - start link training
+ * @intel_dp: DP struct
+ * @crtc_state: state for CRTC attached to the encoder
+ *
+ * Start the link training of the @intel_dp port, scheduling a fallback
+ * retraining with reduced link rate/lane parameters if the link training
+ * fails.
+ * After calling this function intel_dp_stop_link_train() must be called.
+ */
+void intel_dp_start_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ bool passed;
+ /*
+ * TODO: Reiniting LTTPRs here won't be needed once proper connector
+ * HW state readout is added.
+ */
+ int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
+
+ if (lttpr_count < 0)
+ /* Still continue with enabling the port and link training. */
+ lttpr_count = 0;
+
+ intel_dp_prepare_link_train(intel_dp, crtc_state);
+
+ if (intel_dp_is_uhbr(crtc_state))
+ passed = intel_dp_128b132b_link_train(intel_dp, crtc_state, lttpr_count);
+ else
+ passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count);
+
+ if (!passed)
+ intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
new file mode 100644
index 000000000..7fa1c0833
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_LINK_TRAINING_H__
+#define __INTEL_DP_LINK_TRAINING_H__
+
+#include <drm/display/drm_dp_helper.h>
+
+struct intel_crtc_state;
+struct intel_dp;
+
+int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp);
+
+void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE]);
+void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ u8 dp_train_pat);
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy);
+void intel_dp_start_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_dp_stop_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+
+void
+intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE]);
+
+/* Get the TPSx symbol type of the value programmed to DP_TRAINING_PATTERN_SET */
+static inline u8 intel_dp_training_pattern_symbol(u8 pattern)
+{
+ return pattern & ~DP_LINK_SCRAMBLING_DISABLE;
+}
+
+#endif /* __INTEL_DP_LINK_TRAINING_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
new file mode 100644
index 000000000..eec32f682
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -0,0 +1,1076 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ * 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_crtc.h"
+#include "intel_ddi.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_dp_hdcp.h"
+#include "intel_dp_mst.h"
+#include "intel_dpio_phy.h"
+#include "intel_hdcp.h"
+#include "intel_hotplug.h"
+#include "skl_scaler.h"
+
+static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct drm_dp_mst_topology_state *mst_state;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int bpp, slots = -EINVAL;
+
+ mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ crtc_state->lane_count = limits->max_lane_count;
+ crtc_state->port_clock = limits->max_rate;
+
+ // TODO: Handle pbn_div changes by adding a new MST helper
+ if (!mst_state->pbn_div) {
+ mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
+ limits->max_rate,
+ limits->max_lane_count);
+ }
+
+ for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ crtc_state->pipe_bpp = bpp;
+
+ crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
+ crtc_state->pipe_bpp,
+ false);
+ slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
+ connector->port, crtc_state->pbn);
+ if (slots == -EDEADLK)
+ return slots;
+ if (slots >= 0)
+ break;
+ }
+
+ if (slots < 0) {
+ drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n",
+ slots);
+ return slots;
+ }
+
+ intel_link_compute_m_n(crtc_state->pipe_bpp,
+ crtc_state->lane_count,
+ adjusted_mode->crtc_clock,
+ crtc_state->port_clock,
+ &crtc_state->dp_m_n,
+ crtc_state->fec_enable);
+ crtc_state->dp_m_n.tu = slots;
+
+ return 0;
+}
+
+static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
+ struct drm_dp_mst_topology_state *topology_state;
+ u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
+ DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B;
+
+ topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr);
+ if (IS_ERR(topology_state)) {
+ drm_dbg_kms(&i915->drm, "slot update failed\n");
+ return PTR_ERR(topology_state);
+ }
+
+ drm_dp_mst_update_slots(topology_state, link_coding_cap);
+
+ return 0;
+}
+
+static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+ struct link_config_limits limits;
+ int ret;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return -EINVAL;
+
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ pipe_config->has_pch_encoder = false;
+
+ if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+ pipe_config->has_audio = connector->port->has_audio;
+ else
+ pipe_config->has_audio =
+ intel_conn_state->force_audio == HDMI_AUDIO_ON;
+
+ /*
+ * for MST we always configure max link bw - the spec doesn't
+ * seem to suggest we should do otherwise.
+ */
+ limits.min_rate =
+ limits.max_rate = intel_dp_max_link_rate(intel_dp);
+
+ limits.min_lane_count =
+ limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
+
+ limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
+ /*
+ * FIXME: If all the streams can't fit into the link with
+ * their current pipe_bpp we should reduce pipe_bpp across
+ * the board until things start to fit. Until then we
+ * limit to <= 8bpc since that's what was hardcoded for all
+ * MST streams previously. This hack should be removed once
+ * we have the proper retry logic in place.
+ */
+ limits.max_bpp = min(pipe_config->pipe_bpp, 24);
+
+ intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
+
+ ret = intel_dp_mst_compute_link_config(encoder, pipe_config,
+ conn_state, &limits);
+ if (ret)
+ return ret;
+
+ ret = intel_dp_mst_update_slots(encoder, pipe_config, conn_state);
+ if (ret)
+ return ret;
+
+ pipe_config->limited_color_range =
+ intel_dp_limited_color_range(pipe_config, conn_state);
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ pipe_config->lane_lat_optim_mask =
+ bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
+
+ intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+
+ return 0;
+}
+
+/*
+ * Iterate over all connectors and return a mask of
+ * all CPU transcoders streaming over the same DP link.
+ */
+static unsigned int
+intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
+ struct intel_dp *mst_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_digital_connector_state *conn_state;
+ struct intel_connector *connector;
+ u8 transcoders = 0;
+ int i;
+
+ if (DISPLAY_VER(dev_priv) < 12)
+ return 0;
+
+ for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (connector->mst_port != mst_port || !conn_state->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(conn_state->base.crtc);
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ transcoders |= BIT(crtc_state->cpu_transcoder);
+ }
+
+ return transcoders;
+}
+
+static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+
+ /* lowest numbered transcoder will be designated master */
+ crtc_state->mst_master_transcoder =
+ ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1;
+
+ return 0;
+}
+
+/*
+ * If one of the connectors in a MST stream needs a modeset, mark all CRTCs
+ * that shares the same MST stream as mode changed,
+ * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do
+ * a fastset when possible.
+ */
+static int
+intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
+ struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_connector_list_iter connector_list_iter;
+ struct intel_connector *connector_iter;
+ int ret = 0;
+
+ if (DISPLAY_VER(dev_priv) < 12)
+ return 0;
+
+ if (!intel_connector_needs_modeset(state, &connector->base))
+ return 0;
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &connector_list_iter);
+ for_each_intel_connector_iter(connector_iter, &connector_list_iter) {
+ struct intel_digital_connector_state *conn_iter_state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (connector_iter->mst_port != connector->mst_port ||
+ connector_iter == connector)
+ continue;
+
+ conn_iter_state = intel_atomic_get_digital_connector_state(state,
+ connector_iter);
+ if (IS_ERR(conn_iter_state)) {
+ ret = PTR_ERR(conn_iter_state);
+ break;
+ }
+
+ if (!conn_iter_state->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(conn_iter_state->base.crtc);
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ break;
+ }
+
+ ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+ if (ret)
+ break;
+ crtc_state->uapi.mode_changed = true;
+ }
+ drm_connector_list_iter_end(&connector_list_iter);
+
+ return ret;
+}
+
+static int
+intel_dp_mst_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *_state)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
+ struct intel_connector *intel_connector =
+ to_intel_connector(connector);
+ int ret;
+
+ ret = intel_digital_connector_atomic_check(connector, &state->base);
+ if (ret)
+ return ret;
+
+ ret = intel_dp_mst_atomic_master_trans_check(intel_connector, state);
+ if (ret)
+ return ret;
+
+ return drm_dp_atomic_release_time_slots(&state->base,
+ &intel_connector->mst_port->mst_mgr,
+ intel_connector->port);
+}
+
+static void clear_act_sent(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ intel_de_write(i915, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_ACT_SENT);
+}
+
+static void wait_for_act_sent(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+
+ if (intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_ACT_SENT, 1))
+ drm_err(&i915->drm, "Timed out waiting for ACT sent\n");
+
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
+}
+
+static void intel_mst_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_connector *connector =
+ to_intel_connector(old_conn_state->connector);
+ struct drm_dp_mst_topology_state *old_mst_state =
+ drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ struct drm_dp_mst_topology_state *new_mst_state =
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ const struct drm_dp_mst_atomic_payload *old_payload =
+ drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
+ struct drm_dp_mst_atomic_payload *new_payload =
+ drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ drm_dbg_kms(&i915->drm, "active links %d\n",
+ intel_dp->active_mst_links);
+
+ intel_hdcp_disable(intel_mst->connector);
+
+ drm_dp_remove_payload(&intel_dp->mst_mgr, new_mst_state,
+ old_payload, new_payload);
+
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+}
+
+static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_connector *connector =
+ to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ bool last_mst_stream;
+
+ intel_dp->active_mst_links--;
+ last_mst_stream = intel_dp->active_mst_links == 0;
+ drm_WARN_ON(&dev_priv->drm,
+ DISPLAY_VER(dev_priv) >= 12 && last_mst_stream &&
+ !intel_dp_mst_is_master_trans(old_crtc_state));
+
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_disable_transcoder(old_crtc_state);
+
+ clear_act_sent(encoder, old_crtc_state);
+
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
+
+ wait_for_act_sent(encoder, old_crtc_state);
+
+ intel_ddi_disable_transcoder_func(old_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ skl_scaler_disable(old_crtc_state);
+ else
+ ilk_pfit_disable(old_crtc_state);
+
+ /*
+ * Power down mst path before disabling the port, otherwise we end
+ * up getting interrupts from the sink upon detecting link loss.
+ */
+ drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
+ false);
+
+ /*
+ * BSpec 4287: disable DIP after the transcoder is disabled and before
+ * the transcoder clock select is set to none.
+ */
+ if (last_mst_stream)
+ intel_dp_set_infoframes(&dig_port->base, false,
+ old_crtc_state, NULL);
+ /*
+ * From TGL spec: "If multi-stream slave transcoder: Configure
+ * Transcoder Clock Select to direct no clock to the transcoder"
+ *
+ * From older GENs spec: "Configure Transcoder Clock Select to direct
+ * no clock to the transcoder"
+ */
+ if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
+
+ intel_mst->connector = NULL;
+ if (last_mst_stream)
+ dig_port->base.post_disable(state, &dig_port->base,
+ old_crtc_state, NULL);
+
+ drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ intel_dp->active_mst_links);
+}
+
+static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
+
+ if (intel_dp->active_mst_links == 0)
+ dig_port->base.pre_pll_enable(state, &dig_port->base,
+ pipe_config, NULL);
+}
+
+static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct drm_dp_mst_topology_state *mst_state =
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ int ret;
+ bool first_mst_stream;
+
+ /* MST encoders are bound to a crtc, not to a connector,
+ * force the mapping here for get_hw_state.
+ */
+ connector->encoder = encoder;
+ intel_mst->connector = connector;
+ first_mst_stream = intel_dp->active_mst_links == 0;
+ drm_WARN_ON(&dev_priv->drm,
+ DISPLAY_VER(dev_priv) >= 12 && first_mst_stream &&
+ !intel_dp_mst_is_master_trans(pipe_config));
+
+ drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ intel_dp->active_mst_links);
+
+ if (first_mst_stream)
+ intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+
+ drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
+
+ if (first_mst_stream)
+ dig_port->base.pre_enable(state, &dig_port->base,
+ pipe_config, NULL);
+
+ intel_dp->active_mst_links++;
+
+ ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state,
+ drm_atomic_get_mst_payload_state(mst_state, connector->port));
+ if (ret < 0)
+ drm_err(&dev_priv->drm, "Failed to create MST payload for %s: %d\n",
+ connector->base.name, ret);
+
+ /*
+ * Before Gen 12 this is not done as part of
+ * dig_port->base.pre_enable() and should be done here. For
+ * Gen 12+ the step in which this should be done is different for the
+ * first MST stream, so it's done on the DDI for the first stream and
+ * here for the following ones.
+ */
+ if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
+ intel_ddi_enable_pipe_clock(encoder, pipe_config);
+
+ intel_ddi_set_dp_msa(pipe_config, conn_state);
+}
+
+static void intel_mst_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_dp_mst_topology_state *mst_state =
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ enum transcoder trans = pipe_config->cpu_transcoder;
+
+ drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
+
+ clear_act_sent(encoder, pipe_config);
+
+ if (intel_dp_is_uhbr(pipe_config)) {
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+ u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock);
+
+ intel_de_write(dev_priv, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder),
+ TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24));
+ intel_de_write(dev_priv, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder),
+ TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
+ }
+
+ intel_ddi_enable_transcoder_func(encoder, pipe_config);
+
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(trans), 0,
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+
+ drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ intel_dp->active_mst_links);
+
+ wait_for_act_sent(encoder, pipe_config);
+
+ drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base,
+ drm_atomic_get_mst_payload_state(mst_state, connector->port));
+
+ if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->fec_enable)
+ intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(trans), 0,
+ FECSTALL_DIS_DPTSTREAM_DPTTG);
+ else if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable)
+ intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0,
+ FECSTALL_DIS_DPTSTREAM_DPTTG);
+
+ intel_enable_transcoder(pipe_config);
+
+ intel_crtc_vblank_on(pipe_config);
+
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
+
+ /* Enable hdcp if it's desired */
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ intel_hdcp_enable(to_intel_connector(conn_state->connector),
+ pipe_config,
+ (u8)conn_state->hdcp_content_type);
+}
+
+static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ *pipe = intel_mst->pipe;
+ if (intel_mst->connector)
+ return true;
+ return false;
+}
+
+static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_digital_port *dig_port = intel_mst->primary;
+
+ dig_port->base.get_config(&dig_port->base, pipe_config);
+}
+
+static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_digital_port *dig_port = intel_mst->primary;
+
+ return intel_dp_initial_fastset_check(&dig_port->base, crtc_state);
+}
+
+static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct edid *edid;
+ int ret;
+
+ if (drm_connector_is_unregistered(connector))
+ return intel_connector_update_modes(connector, NULL);
+
+ edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
+
+static int
+intel_dp_mst_connector_late_register(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ int ret;
+
+ ret = drm_dp_mst_connector_late_register(connector,
+ intel_connector->port);
+ if (ret < 0)
+ return ret;
+
+ ret = intel_connector_register(connector);
+ if (ret < 0)
+ drm_dp_mst_connector_early_unregister(connector,
+ intel_connector->port);
+
+ return ret;
+}
+
+static void
+intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ intel_connector_unregister(connector);
+ drm_dp_mst_connector_early_unregister(connector,
+ intel_connector->port);
+}
+
+static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_digital_connector_atomic_get_property,
+ .atomic_set_property = intel_digital_connector_atomic_set_property,
+ .late_register = intel_dp_mst_connector_late_register,
+ .early_unregister = intel_dp_mst_connector_early_unregister,
+ .destroy = intel_connector_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state,
+};
+
+static int intel_dp_mst_get_modes(struct drm_connector *connector)
+{
+ return intel_dp_mst_get_ddc_modes(connector);
+}
+
+static int
+intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx,
+ enum drm_mode_status *status)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
+ struct drm_dp_mst_port *port = intel_connector->port;
+ const int min_bpp = 18;
+ int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int max_rate, mode_rate, max_lanes, max_link_clock;
+ int ret;
+
+ if (drm_connector_is_unregistered(connector)) {
+ *status = MODE_ERROR;
+ return 0;
+ }
+
+ *status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+ if (*status != MODE_OK)
+ return 0;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+ *status = MODE_NO_DBLESCAN;
+ return 0;
+ }
+
+ max_link_clock = intel_dp_max_link_rate(intel_dp);
+ max_lanes = intel_dp_max_lane_count(intel_dp);
+
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+ mode_rate = intel_dp_link_required(mode->clock, min_bpp);
+
+ ret = drm_modeset_lock(&mgr->base.lock, ctx);
+ if (ret)
+ return ret;
+
+ if (mode_rate > max_rate || mode->clock > max_dotclk ||
+ drm_dp_calc_pbn_mode(mode->clock, min_bpp, false) > port->full_pbn) {
+ *status = MODE_CLOCK_HIGH;
+ return 0;
+ }
+
+ if (mode->clock < 10000) {
+ *status = MODE_CLOCK_LOW;
+ return 0;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ *status = MODE_H_ILLEGAL;
+ return 0;
+ }
+
+ *status = intel_mode_valid_max_plane_size(dev_priv, mode, false);
+ return 0;
+}
+
+static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
+ connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc);
+
+ return &intel_dp->mst_encoders[crtc->pipe]->base.base;
+}
+
+static int
+intel_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+
+ if (!INTEL_DISPLAY_ENABLED(i915))
+ return connector_status_disconnected;
+
+ if (drm_connector_is_unregistered(connector))
+ return connector_status_disconnected;
+
+ return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
+ intel_connector->port);
+}
+
+static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
+ .get_modes = intel_dp_mst_get_modes,
+ .mode_valid_ctx = intel_dp_mst_mode_valid_ctx,
+ .atomic_best_encoder = intel_mst_atomic_best_encoder,
+ .atomic_check = intel_dp_mst_atomic_check,
+ .detect_ctx = intel_dp_mst_detect,
+};
+
+static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder));
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_mst);
+}
+
+static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
+ .destroy = intel_dp_mst_encoder_destroy,
+};
+
+static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
+{
+ if (intel_attached_encoder(connector) && connector->base.state->crtc) {
+ enum pipe pipe;
+ if (!intel_attached_encoder(connector)->get_hw_state(intel_attached_encoder(connector), &pipe))
+ return false;
+ return true;
+ }
+ return false;
+}
+
+static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
+{
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ enum pipe pipe;
+ int ret;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector)
+ return NULL;
+
+ intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+ intel_connector->mst_port = intel_dp;
+ intel_connector->port = port;
+ drm_dp_mst_get_port_malloc(port);
+
+ connector = &intel_connector->base;
+ ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ drm_dp_mst_put_port_malloc(port);
+ intel_connector_free(intel_connector);
+ return NULL;
+ }
+
+ drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
+
+ for_each_pipe(dev_priv, pipe) {
+ struct drm_encoder *enc =
+ &intel_dp->mst_encoders[pipe]->base.base;
+
+ ret = drm_connector_attach_encoder(&intel_connector->base, enc);
+ if (ret)
+ goto err;
+ }
+
+ drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+ drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
+
+ ret = drm_connector_set_path_property(connector, pathprop);
+ if (ret)
+ goto err;
+
+ intel_attach_force_audio_property(connector);
+ intel_attach_broadcast_rgb_property(connector);
+
+ ret = intel_dp_hdcp_init(dig_port, intel_connector);
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
+ connector->name, connector->base.id);
+ /*
+ * Reuse the prop from the SST connector because we're
+ * not allowed to create new props after device registration.
+ */
+ connector->max_bpc_property =
+ intel_dp->attached_connector->base.max_bpc_property;
+ if (connector->max_bpc_property)
+ drm_connector_attach_max_bpc_property(connector, 6, 12);
+
+ return connector;
+
+err:
+ drm_connector_cleanup(connector);
+ return NULL;
+}
+
+static void
+intel_dp_mst_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+
+ intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
+}
+
+static const struct drm_dp_mst_topology_cbs mst_cbs = {
+ .add_connector = intel_dp_add_mst_connector,
+ .poll_hpd_irq = intel_dp_mst_poll_hpd_irq,
+};
+
+static struct intel_dp_mst_encoder *
+intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe pipe)
+{
+ struct intel_dp_mst_encoder *intel_mst;
+ struct intel_encoder *intel_encoder;
+ struct drm_device *dev = dig_port->base.base.dev;
+
+ intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
+
+ if (!intel_mst)
+ return NULL;
+
+ intel_mst->pipe = pipe;
+ intel_encoder = &intel_mst->base;
+ intel_mst->primary = dig_port;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
+ DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
+
+ intel_encoder->type = INTEL_OUTPUT_DP_MST;
+ intel_encoder->power_domain = dig_port->base.power_domain;
+ intel_encoder->port = dig_port->base.port;
+ intel_encoder->cloneable = 0;
+ /*
+ * This is wrong, but broken userspace uses the intersection
+ * of possible_crtcs of all the encoders of a given connector
+ * to figure out which crtcs can drive said connector. What
+ * should be used instead is the union of possible_crtcs.
+ * To keep such userspace functioning we must misconfigure
+ * this to make sure the intersection is not empty :(
+ */
+ intel_encoder->pipe_mask = ~0;
+
+ intel_encoder->compute_config = intel_dp_mst_compute_config;
+ intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
+ intel_encoder->disable = intel_mst_disable_dp;
+ intel_encoder->post_disable = intel_mst_post_disable_dp;
+ intel_encoder->update_pipe = intel_ddi_update_pipe;
+ intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
+ intel_encoder->pre_enable = intel_mst_pre_enable_dp;
+ intel_encoder->enable = intel_mst_enable_dp;
+ intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
+ intel_encoder->get_config = intel_dp_mst_enc_get_config;
+ intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check;
+
+ return intel_mst;
+
+}
+
+static bool
+intel_dp_create_fake_mst_encoders(struct intel_digital_port *dig_port)
+{
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe)
+ intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(dig_port, pipe);
+ return true;
+}
+
+int
+intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
+{
+ return dig_port->dp.active_mst_links;
+}
+
+int
+intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *intel_dp = &dig_port->dp;
+ enum port port = dig_port->base.port;
+ int ret;
+
+ if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
+ return 0;
+
+ if (DISPLAY_VER(i915) < 12 && port == PORT_A)
+ return 0;
+
+ if (DISPLAY_VER(i915) < 11 && port == PORT_E)
+ return 0;
+
+ intel_dp->mst_mgr.cbs = &mst_cbs;
+
+ /* create encoders */
+ intel_dp_create_fake_mst_encoders(dig_port);
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
+ &intel_dp->aux, 16, 3, conn_base_id);
+ if (ret) {
+ intel_dp->mst_mgr.cbs = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+bool intel_dp_mst_source_support(struct intel_dp *intel_dp)
+{
+ return intel_dp->mst_mgr.cbs;
+}
+
+void
+intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port)
+{
+ struct intel_dp *intel_dp = &dig_port->dp;
+
+ if (!intel_dp_mst_source_support(intel_dp))
+ return;
+
+ drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
+ /* encoders will get killed by normal cleanup */
+
+ intel_dp->mst_mgr.cbs = NULL;
+}
+
+bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder;
+}
+
+bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->mst_master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder;
+}
+
+/**
+ * intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector
+ * @state: atomic state
+ * @connector: connector to add the state for
+ * @crtc: the CRTC @connector is attached to
+ *
+ * Add the MST topology state for @connector to @state.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int
+intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ struct intel_crtc *crtc)
+{
+ struct drm_dp_mst_topology_state *mst_state;
+
+ if (!connector->mst_port)
+ return 0;
+
+ mst_state = drm_atomic_get_mst_topology_state(&state->base,
+ &connector->mst_port->mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ mst_state->pending_crtc_mask |= drm_crtc_mask(&crtc->base);
+
+ return 0;
+}
+
+/**
+ * intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC
+ * @state: atomic state
+ * @crtc: CRTC to add the state for
+ *
+ * Add the MST topology state for @crtc to @state.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_connector *_connector;
+ struct drm_connector_state *conn_state;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
+ struct intel_connector *connector = to_intel_connector(_connector);
+ int ret;
+
+ if (conn_state->crtc != &crtc->base)
+ continue;
+
+ ret = intel_dp_mst_add_topology_state_for_connector(state, connector, crtc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
new file mode 100644
index 000000000..f1815bb72
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_MST_H__
+#define __INTEL_DP_MST_H__
+
+#include <linux/types.h>
+
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_dp;
+
+int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
+void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
+int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
+bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
+bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
+bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
+int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+
+#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
new file mode 100644
index 000000000..8732b8722
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -0,0 +1,1106 @@
+/*
+ * Copyright © 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
+#include "intel_de.h"
+#include "intel_display_power_well.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_dpio_phy.h"
+#include "vlv_sideband.h"
+
+/**
+ * DOC: DPIO
+ *
+ * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
+ * ports. DPIO is the name given to such a display PHY. These PHYs
+ * don't follow the standard programming model using direct MMIO
+ * registers, and instead their registers must be accessed trough IOSF
+ * sideband. VLV has one such PHY for driving ports B and C, and CHV
+ * adds another PHY for driving port D. Each PHY responds to specific
+ * IOSF-SB port.
+ *
+ * Each display PHY is made up of one or two channels. Each channel
+ * houses a common lane part which contains the PLL and other common
+ * logic. CH0 common lane also contains the IOSF-SB logic for the
+ * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
+ * must be running when any DPIO registers are accessed.
+ *
+ * In addition to having their own registers, the PHYs are also
+ * controlled through some dedicated signals from the display
+ * controller. These include PLL reference clock enable, PLL enable,
+ * and CRI clock selection, for example.
+ *
+ * Eeach channel also has two splines (also called data lanes), and
+ * each spline is made up of one Physical Access Coding Sub-Layer
+ * (PCS) block and two TX lanes. So each channel has two PCS blocks
+ * and four TX lanes. The TX lanes are used as DP lanes or TMDS
+ * data/clock pairs depending on the output type.
+ *
+ * Additionally the PHY also contains an AUX lane with AUX blocks
+ * for each channel. This is used for DP AUX communication, but
+ * this fact isn't really relevant for the driver since AUX is
+ * controlled from the display controller side. No DPIO registers
+ * need to be accessed during AUX communication,
+ *
+ * Generally on VLV/CHV the common lane corresponds to the pipe and
+ * the spline (PCS/TX) corresponds to the port.
+ *
+ * For dual channel PHY (VLV/CHV):
+ *
+ * pipe A == CMN/PLL/REF CH0
+ *
+ * pipe B == CMN/PLL/REF CH1
+ *
+ * port B == PCS/TX CH0
+ *
+ * port C == PCS/TX CH1
+ *
+ * This is especially important when we cross the streams
+ * ie. drive port B with pipe B, or port C with pipe A.
+ *
+ * For single channel PHY (CHV):
+ *
+ * pipe C == CMN/PLL/REF CH0
+ *
+ * port D == PCS/TX CH0
+ *
+ * On BXT the entire PHY channel corresponds to the port. That means
+ * the PLL is also now associated with the port rather than the pipe,
+ * and so the clock needs to be routed to the appropriate transcoder.
+ * Port A PLL is directly connected to transcoder EDP and port B/C
+ * PLLs can be routed to any transcoder A/B/C.
+ *
+ * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
+ * digital port D (CHV) or port A (BXT). ::
+ *
+ *
+ * Dual channel PHY (VLV/CHV/BXT)
+ * ---------------------------------
+ * | CH0 | CH1 |
+ * | CMN/PLL/REF | CMN/PLL/REF |
+ * |---------------|---------------| Display PHY
+ * | PCS01 | PCS23 | PCS01 | PCS23 |
+ * |-------|-------|-------|-------|
+ * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
+ * ---------------------------------
+ * | DDI0 | DDI1 | DP/HDMI ports
+ * ---------------------------------
+ *
+ * Single channel PHY (CHV/BXT)
+ * -----------------
+ * | CH0 |
+ * | CMN/PLL/REF |
+ * |---------------| Display PHY
+ * | PCS01 | PCS23 |
+ * |-------|-------|
+ * |TX0|TX1|TX2|TX3|
+ * -----------------
+ * | DDI2 | DP/HDMI port
+ * -----------------
+ */
+
+/**
+ * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
+ */
+struct bxt_ddi_phy_info {
+ /**
+ * @dual_channel: true if this phy has a second channel.
+ */
+ bool dual_channel;
+
+ /**
+ * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
+ * Otherwise the GRC value will be copied from the phy indicated by
+ * this field.
+ */
+ enum dpio_phy rcomp_phy;
+
+ /**
+ * @reset_delay: delay in us to wait before setting the common reset
+ * bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy.
+ */
+ int reset_delay;
+
+ /**
+ * @pwron_mask: Mask with the appropriate bit set that would cause the
+ * punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON.
+ */
+ u32 pwron_mask;
+
+ /**
+ * @channel: struct containing per channel information.
+ */
+ struct {
+ /**
+ * @channel.port: which port maps to this channel.
+ */
+ enum port port;
+ } channel[2];
+};
+
+static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
+ [DPIO_PHY0] = {
+ .dual_channel = true,
+ .rcomp_phy = DPIO_PHY1,
+ .pwron_mask = BIT(0),
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_B },
+ [DPIO_CH1] = { .port = PORT_C },
+ }
+ },
+ [DPIO_PHY1] = {
+ .dual_channel = false,
+ .rcomp_phy = -1,
+ .pwron_mask = BIT(1),
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_A },
+ }
+ },
+};
+
+static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
+ [DPIO_PHY0] = {
+ .dual_channel = false,
+ .rcomp_phy = DPIO_PHY1,
+ .pwron_mask = BIT(0),
+ .reset_delay = 20,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_B },
+ }
+ },
+ [DPIO_PHY1] = {
+ .dual_channel = false,
+ .rcomp_phy = -1,
+ .pwron_mask = BIT(3),
+ .reset_delay = 20,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_A },
+ }
+ },
+ [DPIO_PHY2] = {
+ .dual_channel = false,
+ .rcomp_phy = DPIO_PHY1,
+ .pwron_mask = BIT(1),
+ .reset_delay = 20,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_C },
+ }
+ },
+};
+
+static const struct bxt_ddi_phy_info *
+bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
+{
+ if (IS_GEMINILAKE(dev_priv)) {
+ *count = ARRAY_SIZE(glk_ddi_phy_info);
+ return glk_ddi_phy_info;
+ } else {
+ *count = ARRAY_SIZE(bxt_ddi_phy_info);
+ return bxt_ddi_phy_info;
+ }
+}
+
+static const struct bxt_ddi_phy_info *
+bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ int count;
+ const struct bxt_ddi_phy_info *phy_list =
+ bxt_get_phy_list(dev_priv, &count);
+
+ return &phy_list[phy];
+}
+
+void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
+ enum dpio_phy *phy, enum dpio_channel *ch)
+{
+ const struct bxt_ddi_phy_info *phy_info, *phys;
+ int i, count;
+
+ phys = bxt_get_phy_list(dev_priv, &count);
+
+ for (i = 0; i < count; i++) {
+ phy_info = &phys[i];
+
+ if (port == phy_info->channel[DPIO_CH0].port) {
+ *phy = i;
+ *ch = DPIO_CH0;
+ return;
+ }
+
+ if (phy_info->dual_channel &&
+ port == phy_info->channel[DPIO_CH1].port) {
+ *phy = i;
+ *ch = DPIO_CH1;
+ return;
+ }
+ }
+
+ drm_WARN(&dev_priv->drm, 1, "PHY not found for PORT %c",
+ port_name(port));
+ *phy = DPIO_PHY0;
+ *ch = DPIO_CH0;
+}
+
+void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int level = intel_ddi_level(encoder, crtc_state, 0);
+ const struct intel_ddi_buf_trans *trans;
+ enum dpio_channel ch;
+ enum dpio_phy phy;
+ int n_entries;
+ u32 val;
+
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ return;
+
+ bxt_port_to_phy_channel(dev_priv, encoder->port, &phy, &ch);
+
+ /*
+ * While we write to the group register to program all lanes at once we
+ * can read only lane registers and we pick lanes 0/1 for that.
+ */
+ val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
+ intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+
+ val = intel_de_read(dev_priv, BXT_PORT_TX_DW2_LN0(phy, ch));
+ val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
+ val |= trans->entries[level].bxt.margin << MARGIN_000_SHIFT |
+ trans->entries[level].bxt.scale << UNIQ_TRANS_SCALE_SHIFT;
+ intel_de_write(dev_priv, BXT_PORT_TX_DW2_GRP(phy, ch), val);
+
+ val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN0(phy, ch));
+ val &= ~SCALE_DCOMP_METHOD;
+ if (trans->entries[level].bxt.enable)
+ val |= SCALE_DCOMP_METHOD;
+
+ if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
+ drm_err(&dev_priv->drm,
+ "Disabled scaling while ouniqetrangenmethod was set");
+
+ intel_de_write(dev_priv, BXT_PORT_TX_DW3_GRP(phy, ch), val);
+
+ val = intel_de_read(dev_priv, BXT_PORT_TX_DW4_LN0(phy, ch));
+ val &= ~DE_EMPHASIS;
+ val |= trans->entries[level].bxt.deemphasis << DEEMPH_SHIFT;
+ intel_de_write(dev_priv, BXT_PORT_TX_DW4_GRP(phy, ch), val);
+
+ val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
+ intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+}
+
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info;
+
+ phy_info = bxt_get_phy_info(dev_priv, phy);
+
+ if (!(intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
+ return false;
+
+ if ((intel_de_read(dev_priv, BXT_PORT_CL1CM_DW0(phy)) &
+ (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
+ drm_dbg(&dev_priv->drm,
+ "DDI PHY %d powered, but power hasn't settled\n", phy);
+
+ return false;
+ }
+
+ if (!(intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+ drm_dbg(&dev_priv->drm,
+ "DDI PHY %d powered, but still in reset\n", phy);
+
+ return false;
+ }
+
+ return true;
+}
+
+static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ u32 val = intel_de_read(dev_priv, BXT_PORT_REF_DW6(phy));
+
+ return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+}
+
+static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy),
+ GRC_DONE, 10))
+ drm_err(&dev_priv->drm, "timeout waiting for PHY%d GRC\n",
+ phy);
+}
+
+static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info;
+ u32 val;
+
+ phy_info = bxt_get_phy_info(dev_priv, phy);
+
+ if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
+ /* Still read out the GRC value for state verification */
+ if (phy_info->rcomp_phy != -1)
+ dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
+
+ if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
+ drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, "
+ "won't reprogram it\n", phy);
+ return;
+ }
+
+ drm_dbg(&dev_priv->drm,
+ "DDI PHY %d enabled with invalid state, "
+ "force reprogramming it\n", phy);
+ }
+
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
+ val |= phy_info->pwron_mask;
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
+
+ /*
+ * The PHY registers start out inaccessible and respond to reads with
+ * all 1s. Eventually they become accessible as they power up, then
+ * the reserved bit will give the default 0. Poll on the reserved bit
+ * becoming 0 to find when the PHY is accessible.
+ * The flag should get set in 100us according to the HW team, but
+ * use 1ms due to occasional timeouts observed with that.
+ */
+ if (intel_wait_for_register_fw(&dev_priv->uncore,
+ BXT_PORT_CL1CM_DW0(phy),
+ PHY_RESERVED | PHY_POWER_GOOD,
+ PHY_POWER_GOOD,
+ 1))
+ drm_err(&dev_priv->drm, "timeout during PHY%d power on\n",
+ phy);
+
+ /* Program PLL Rcomp code offset */
+ val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW9(phy));
+ val &= ~IREF0RC_OFFSET_MASK;
+ val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
+ intel_de_write(dev_priv, BXT_PORT_CL1CM_DW9(phy), val);
+
+ val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW10(phy));
+ val &= ~IREF1RC_OFFSET_MASK;
+ val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
+ intel_de_write(dev_priv, BXT_PORT_CL1CM_DW10(phy), val);
+
+ /* Program power gating */
+ val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW28(phy));
+ val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
+ SUS_CLK_CONFIG;
+ intel_de_write(dev_priv, BXT_PORT_CL1CM_DW28(phy), val);
+
+ if (phy_info->dual_channel) {
+ val = intel_de_read(dev_priv, BXT_PORT_CL2CM_DW6(phy));
+ val |= DW6_OLDO_DYN_PWR_DOWN_EN;
+ intel_de_write(dev_priv, BXT_PORT_CL2CM_DW6(phy), val);
+ }
+
+ if (phy_info->rcomp_phy != -1) {
+ u32 grc_code;
+
+ bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
+
+ /*
+ * PHY0 isn't connected to an RCOMP resistor so copy over
+ * the corresponding calibrated value from PHY1, and disable
+ * the automatic calibration on PHY0.
+ */
+ val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
+ phy_info->rcomp_phy);
+ grc_code = val << GRC_CODE_FAST_SHIFT |
+ val << GRC_CODE_SLOW_SHIFT |
+ val;
+ intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code);
+
+ val = intel_de_read(dev_priv, BXT_PORT_REF_DW8(phy));
+ val |= GRC_DIS | GRC_RDY_OVRD;
+ intel_de_write(dev_priv, BXT_PORT_REF_DW8(phy), val);
+ }
+
+ if (phy_info->reset_delay)
+ udelay(phy_info->reset_delay);
+
+ val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
+ val |= COMMON_RESET_DIS;
+ intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
+}
+
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info;
+ u32 val;
+
+ phy_info = bxt_get_phy_info(dev_priv, phy);
+
+ val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
+ val &= ~COMMON_RESET_DIS;
+ intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
+
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
+ val &= ~phy_info->pwron_mask;
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
+}
+
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info =
+ bxt_get_phy_info(dev_priv, phy);
+ enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
+ bool was_enabled;
+
+ lockdep_assert_held(&dev_priv->display.power.domains.lock);
+
+ was_enabled = true;
+ if (rcomp_phy != -1)
+ was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
+
+ /*
+ * We need to copy the GRC calibration value from rcomp_phy,
+ * so make sure it's powered up.
+ */
+ if (!was_enabled)
+ _bxt_ddi_phy_init(dev_priv, rcomp_phy);
+
+ _bxt_ddi_phy_init(dev_priv, phy);
+
+ if (!was_enabled)
+ bxt_ddi_phy_uninit(dev_priv, rcomp_phy);
+}
+
+static bool __printf(6, 7)
+__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ i915_reg_t reg, u32 mask, u32 expected,
+ const char *reg_fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ u32 val;
+
+ val = intel_de_read(dev_priv, reg);
+ if ((val & mask) == expected)
+ return true;
+
+ va_start(args, reg_fmt);
+ vaf.fmt = reg_fmt;
+ vaf.va = &args;
+
+ drm_dbg(&dev_priv->drm, "DDI PHY %d reg %pV [%08x] state mismatch: "
+ "current %08x, expected %08x (mask %08x)\n",
+ phy, &vaf, reg.reg, val, (val & ~mask) | expected,
+ mask);
+
+ va_end(args);
+
+ return false;
+}
+
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ const struct bxt_ddi_phy_info *phy_info;
+ u32 mask;
+ bool ok;
+
+ phy_info = bxt_get_phy_info(dev_priv, phy);
+
+#define _CHK(reg, mask, exp, fmt, ...) \
+ __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
+ ## __VA_ARGS__)
+
+ if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
+ return false;
+
+ ok = true;
+
+ /* PLL Rcomp code offset */
+ ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
+ IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
+ "BXT_PORT_CL1CM_DW9(%d)", phy);
+ ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
+ IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
+ "BXT_PORT_CL1CM_DW10(%d)", phy);
+
+ /* Power gating */
+ mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
+ ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
+ "BXT_PORT_CL1CM_DW28(%d)", phy);
+
+ if (phy_info->dual_channel)
+ ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
+ DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
+ "BXT_PORT_CL2CM_DW6(%d)", phy);
+
+ if (phy_info->rcomp_phy != -1) {
+ u32 grc_code = dev_priv->bxt_phy_grc;
+
+ grc_code = grc_code << GRC_CODE_FAST_SHIFT |
+ grc_code << GRC_CODE_SLOW_SHIFT |
+ grc_code;
+ mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
+ GRC_CODE_NOM_MASK;
+ ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
+ "BXT_PORT_REF_DW6(%d)", phy);
+
+ mask = GRC_DIS | GRC_RDY_OVRD;
+ ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
+ "BXT_PORT_REF_DW8(%d)", phy);
+ }
+
+ return ok;
+#undef _CHK
+}
+
+u8
+bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
+{
+ switch (lane_count) {
+ case 1:
+ return 0;
+ case 2:
+ return BIT(2) | BIT(0);
+ case 4:
+ return BIT(3) | BIT(2) | BIT(0);
+ default:
+ MISSING_CASE(lane_count);
+
+ return 0;
+ }
+}
+
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ u8 lane_lat_optim_mask)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+ int lane;
+
+ bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+
+ for (lane = 0; lane < 4; lane++) {
+ u32 val = intel_de_read(dev_priv,
+ BXT_PORT_TX_DW14_LN(phy, ch, lane));
+
+ /*
+ * Note that on CHV this flag is called UPAR, but has
+ * the same function.
+ */
+ val &= ~LATENCY_OPTIM;
+ if (lane_lat_optim_mask & BIT(lane))
+ val |= LATENCY_OPTIM;
+
+ intel_de_write(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane),
+ val);
+ }
+}
+
+u8
+bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+ int lane;
+ u8 mask;
+
+ bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+
+ mask = 0;
+ for (lane = 0; lane < 4; lane++) {
+ u32 val = intel_de_read(dev_priv,
+ BXT_PORT_TX_DW14_LN(phy, ch, lane));
+
+ if (val & LATENCY_OPTIM)
+ mask |= BIT(lane);
+ }
+
+ return mask;
+}
+
+void chv_set_phy_signal_level(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ u32 deemph_reg_value, u32 margin_reg_value,
+ bool uniq_trans_scale)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum pipe pipe = crtc->pipe;
+ u32 val;
+ int i;
+
+ vlv_dpio_get(dev_priv);
+
+ /* Clear calc init */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ if (crtc_state->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ }
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+ if (crtc_state->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+ }
+
+ /* Program swing deemph */
+ for (i = 0; i < crtc_state->lane_count; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+ val &= ~DPIO_SWING_DEEMPH9P5_MASK;
+ val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+ }
+
+ /* Program swing margin */
+ for (i = 0; i < crtc_state->lane_count; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+
+ val &= ~DPIO_SWING_MARGIN000_MASK;
+ val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
+
+ /*
+ * Supposedly this value shouldn't matter when unique transition
+ * scale is disabled, but in fact it does matter. Let's just
+ * always program the same value and hope it's OK.
+ */
+ val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+ val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
+
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ }
+
+ /*
+ * The document said it needs to set bit 27 for ch0 and bit 26
+ * for ch1. Might be a typo in the doc.
+ * For now, for this unique transition scale selection, set bit
+ * 27 for ch0 and ch1.
+ */
+ for (i = 0; i < crtc_state->lane_count; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ if (uniq_trans_scale)
+ val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ else
+ val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ }
+
+ /* Start swing calculation */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ if (crtc_state->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ }
+
+ vlv_dpio_put(dev_priv);
+}
+
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool reset)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 val;
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ if (reset)
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ else
+ val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ if (crtc_state->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ if (reset)
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ else
+ val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+ }
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ if (reset)
+ val &= ~DPIO_PCS_CLK_SOFT_RESET;
+ else
+ val |= DPIO_PCS_CLK_SOFT_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ if (crtc_state->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ if (reset)
+ val &= ~DPIO_PCS_CLK_SOFT_RESET;
+ else
+ val |= DPIO_PCS_CLK_SOFT_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+ }
+}
+
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum pipe pipe = crtc->pipe;
+ unsigned int lane_mask =
+ intel_dp_unused_lane_mask(crtc_state->lane_count);
+ u32 val;
+
+ /*
+ * Must trick the second common lane into life.
+ * Otherwise we can't even access the PLL.
+ */
+ if (ch == DPIO_CH0 && pipe == PIPE_B)
+ dig_port->release_cl2_override =
+ !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+
+ chv_phy_powergate_lanes(encoder, true, lane_mask);
+
+ vlv_dpio_get(dev_priv);
+
+ /* Assert data lane reset */
+ chv_data_lane_soft_reset(encoder, crtc_state, true);
+
+ /* program left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA1_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA1_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA2_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA2_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ /* program clock channel usage */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+ if (crtc_state->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+ }
+
+ /*
+ * This a a bit weird since generally CL
+ * matches the pipe, but here we need to
+ * pick the CL based on the port.
+ */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+ if (pipe != PIPE_B)
+ val &= ~CHV_CMN_USEDCLKCHANNEL;
+ else
+ val |= CHV_CMN_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+ vlv_dpio_put(dev_priv);
+}
+
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum pipe pipe = crtc->pipe;
+ int data, i, stagger;
+ u32 val;
+
+ vlv_dpio_get(dev_priv);
+
+ /* allow hardware to manage TX FIFO reset source */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ if (crtc_state->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ }
+
+ /* Program Tx lane latency optimal setting*/
+ for (i = 0; i < crtc_state->lane_count; i++) {
+ /* Set the upar bit */
+ if (crtc_state->lane_count == 1)
+ data = 0x0;
+ else
+ data = (i == 1) ? 0x0 : 0x1;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+ data << DPIO_UPAR_SHIFT);
+ }
+
+ /* Data lane stagger programming */
+ if (crtc_state->port_clock > 270000)
+ stagger = 0x18;
+ else if (crtc_state->port_clock > 135000)
+ stagger = 0xd;
+ else if (crtc_state->port_clock > 67500)
+ stagger = 0x7;
+ else if (crtc_state->port_clock > 33750)
+ stagger = 0x4;
+ else
+ stagger = 0x2;
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ if (crtc_state->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ }
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(6) |
+ DPIO_TX2_STAGGER_MULT(0));
+
+ if (crtc_state->lane_count > 2) {
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(7) |
+ DPIO_TX2_STAGGER_MULT(5));
+ }
+
+ /* Deassert data lane reset */
+ chv_data_lane_soft_reset(encoder, crtc_state, false);
+
+ vlv_dpio_put(dev_priv);
+}
+
+void chv_phy_release_cl2_override(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (dig_port->release_cl2_override) {
+ chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+ dig_port->release_cl2_override = false;
+ }
+}
+
+void chv_phy_post_pll_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
+ u32 val;
+
+ vlv_dpio_get(dev_priv);
+
+ /* disable left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ vlv_dpio_put(dev_priv);
+
+ /*
+ * Leave the power down bit cleared for at least one
+ * lane so that chv_powergate_phy_ch() will power
+ * on something when the channel is otherwise unused.
+ * When the port is off and the override is removed
+ * the lanes power down anyway, so otherwise it doesn't
+ * really matter what the state of power down bits is
+ * after this.
+ */
+ chv_phy_powergate_lanes(encoder, false, 0x0);
+}
+
+void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ u32 demph_reg_value, u32 preemph_reg_value,
+ u32 uniqtranscale_reg_value, u32 tx3_demph)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
+ enum pipe pipe = crtc->pipe;
+
+ vlv_dpio_get(dev_priv);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
+ uniqtranscale_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+
+ if (tx3_demph)
+ vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+
+ vlv_dpio_put(dev_priv);
+}
+
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
+ enum pipe pipe = crtc->pipe;
+
+ /* Program Tx lane resets to default */
+ vlv_dpio_get(dev_priv);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
+ DPIO_PCS_TX_LANE2_RESET |
+ DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
+ DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+ DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+ (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
+ DPIO_PCS_CLK_SOFT_RESET);
+
+ /* Fix up inter-pair skew failure */
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+
+ vlv_dpio_put(dev_priv);
+}
+
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
+ enum pipe pipe = crtc->pipe;
+ u32 val;
+
+ vlv_dpio_get(dev_priv);
+
+ /* Enable clock channels for this port */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
+ val = 0;
+ if (pipe)
+ val |= (1<<21);
+ else
+ val &= ~(1<<21);
+ val |= 0x001000c4;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+
+ /* Program lane clock */
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
+
+ vlv_dpio_put(dev_priv);
+}
+
+void vlv_phy_reset_lanes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
+ enum pipe pipe = crtc->pipe;
+
+ vlv_dpio_get(dev_priv);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
+ vlv_dpio_put(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
new file mode 100644
index 000000000..9c3d008e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DPIO_PHY_H__
+#define __INTEL_DPIO_PHY_H__
+
+#include <linux/types.h>
+
+enum dpio_channel;
+enum dpio_phy;
+enum port;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_encoder;
+
+void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
+ enum dpio_phy *phy, enum dpio_channel *ch);
+void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+ u8 lane_lat_optim_mask);
+u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+
+void chv_set_phy_signal_level(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ u32 deemph_reg_value, u32 margin_reg_value,
+ bool uniq_trans_scale);
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool reset);
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void chv_phy_release_cl2_override(struct intel_encoder *encoder);
+void chv_phy_post_pll_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state);
+
+void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ u32 demph_reg_value, u32 preemph_reg_value,
+ u32 uniqtranscale_reg_value, u32 tx3_demph);
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void vlv_phy_reset_lanes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state);
+
+#endif /* __INTEL_DPIO_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
new file mode 100644
index 000000000..b15ba78d6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -0,0 +1,2061 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/string_helpers.h>
+
+#include "intel_crtc.h"
+#include "intel_de.h"
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_dpll.h"
+#include "intel_lvds.h"
+#include "intel_panel.h"
+#include "intel_pps.h"
+#include "intel_snps_phy.h"
+#include "vlv_sideband.h"
+
+struct intel_dpll_funcs {
+ int (*crtc_compute_clock)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ int (*crtc_get_shared_dpll)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+};
+
+struct intel_limit {
+ struct {
+ int min, max;
+ } dot, vco, n, m, m1, m2, p, p1;
+
+ struct {
+ int dot_limit;
+ int p2_slow, p2_fast;
+ } p2;
+};
+static const struct intel_limit intel_limits_i8xx_dac = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 2 },
+};
+
+static const struct intel_limit intel_limits_i8xx_dvo = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 4 },
+};
+
+static const struct intel_limit intel_limits_i8xx_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 908000, .max = 1512000 },
+ .n = { .min = 2, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 1, .max = 6 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 14, .p2_fast = 7 },
+};
+
+static const struct intel_limit intel_limits_i9xx_sdvo = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_i9xx_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+ .m1 = { .min = 8, .max = 18 },
+ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 7, .max = 98 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 7 },
+};
+
+
+static const struct intel_limit intel_limits_g4x_sdvo = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 1, .max = 3},
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 10,
+ .p2_fast = 10
+ },
+};
+
+static const struct intel_limit intel_limits_g4x_hdmi = {
+ .dot = { .min = 22000, .max = 400000 },
+ .vco = { .min = 1750000, .max = 3500000},
+ .n = { .min = 1, .max = 4 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 16, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8},
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
+ .dot = { .min = 20000, .max = 115000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 14, .p2_fast = 14
+ },
+};
+
+static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
+ .dot = { .min = 80000, .max = 224000 },
+ .vco = { .min = 1750000, .max = 3500000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 104, .max = 138 },
+ .m1 = { .min = 17, .max = 23 },
+ .m2 = { .min = 5, .max = 11 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 0,
+ .p2_slow = 7, .p2_fast = 7
+ },
+};
+
+static const struct intel_limit pnv_limits_sdvo = {
+ .dot = { .min = 20000, .max = 400000},
+ .vco = { .min = 1700000, .max = 3500000 },
+ /* Pineview's Ncounter is a ring counter */
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ /* Pineview only has one combined m divider, which we treat as m2. */
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit pnv_limits_lvds = {
+ .dot = { .min = 20000, .max = 400000 },
+ .vco = { .min = 1700000, .max = 3500000 },
+ .n = { .min = 3, .max = 6 },
+ .m = { .min = 2, .max = 256 },
+ .m1 = { .min = 0, .max = 0 },
+ .m2 = { .min = 0, .max = 254 },
+ .p = { .min = 7, .max = 112 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 112000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+/* Ironlake / Sandybridge
+ *
+ * We calculate clock using (register_value + 2) for N/M1/M2, so here
+ * the range value for them is (actual_value - 2).
+ */
+static const struct intel_limit ilk_limits_dac = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 5 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 10, .p2_fast = 5 },
+};
+
+static const struct intel_limit ilk_limits_single_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 118 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+static const struct intel_limit ilk_limits_dual_lvds = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 127 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 56 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+};
+
+/* LVDS 100mhz refclk limits. */
+static const struct intel_limit ilk_limits_single_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 2 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 28, .max = 112 },
+ .p1 = { .min = 2, .max = 8 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 14, .p2_fast = 14 },
+};
+
+static const struct intel_limit ilk_limits_dual_lvds_100m = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 1760000, .max = 3510000 },
+ .n = { .min = 1, .max = 3 },
+ .m = { .min = 79, .max = 126 },
+ .m1 = { .min = 12, .max = 22 },
+ .m2 = { .min = 5, .max = 9 },
+ .p = { .min = 14, .max = 42 },
+ .p1 = { .min = 2, .max = 6 },
+ .p2 = { .dot_limit = 225000,
+ .p2_slow = 7, .p2_fast = 7 },
+};
+
+static const struct intel_limit intel_limits_vlv = {
+ /*
+ * These are based on the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 4000000, .max = 6000000 },
+ .n = { .min = 1, .max = 7 },
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
+};
+
+static const struct intel_limit intel_limits_chv = {
+ /*
+ * These are based on the data rate limits (measured in fast clocks)
+ * since those are the strictest limits we have. The fast
+ * clock and actual rate limits are more relaxed, so checking
+ * them would make no difference.
+ */
+ .dot = { .min = 25000, .max = 540000 },
+ .vco = { .min = 4800000, .max = 6480000 },
+ .n = { .min = 1, .max = 1 },
+ .m1 = { .min = 2, .max = 2 },
+ .m2 = { .min = 24 << 22, .max = 175 << 22 },
+ .p1 = { .min = 2, .max = 4 },
+ .p2 = { .p2_slow = 1, .p2_fast = 14 },
+};
+
+static const struct intel_limit intel_limits_bxt = {
+ .dot = { .min = 25000, .max = 594000 },
+ .vco = { .min = 4800000, .max = 6700000 },
+ .n = { .min = 1, .max = 1 },
+ .m1 = { .min = 2, .max = 2 },
+ /* FIXME: find real m2 limits */
+ .m2 = { .min = 2 << 22, .max = 255 << 22 },
+ .p1 = { .min = 2, .max = 4 },
+ .p2 = { .p2_slow = 1, .p2_fast = 20 },
+};
+
+/*
+ * Platform specific helpers to calculate the port PLL loopback- (clock.m),
+ * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
+ * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
+ * The helpers' return value is the rate of the clock that is fed to the
+ * display engine's pipe which can be the above fast dot clock rate or a
+ * divided-down version of it.
+ */
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+int pnv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = clock->m2 + 2;
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
+}
+
+static u32 i9xx_dpll_compute_m(const struct dpll *dpll)
+{
+ return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
+}
+
+int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = i9xx_dpll_compute_m(clock);
+ clock->p = clock->p1 * clock->p2;
+ if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
+}
+
+int vlv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2 * 5;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
+}
+
+int chv_calc_dpll_params(int refclk, struct dpll *clock)
+{
+ clock->m = clock->m1 * clock->m2;
+ clock->p = clock->p1 * clock->p2 * 5;
+ if (WARN_ON(clock->n == 0 || clock->p == 0))
+ return 0;
+ clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
+ clock->n << 22);
+ clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+ return clock->dot;
+}
+
+/*
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
+ const struct intel_limit *limit,
+ const struct dpll *clock)
+{
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ return false;
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ return false;
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ return false;
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ return false;
+
+ if (!IS_PINEVIEW(dev_priv) && !IS_LP(dev_priv))
+ if (clock->m1 <= clock->m2)
+ return false;
+
+ if (!IS_LP(dev_priv)) {
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ return false;
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ return false;
+ }
+
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ return false;
+ /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
+ * connector, etc., rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ return false;
+
+ return true;
+}
+
+static int
+i9xx_select_p2_div(const struct intel_limit *limit,
+ const struct intel_crtc_state *crtc_state,
+ int target)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ /*
+ * For LVDS just rely on its current settings for dual-channel.
+ * We haven't figured out how to reliably set up different
+ * single/dual channel state, if we even can.
+ */
+ if (intel_is_dual_link_lvds(dev_priv))
+ return limit->p2.p2_fast;
+ else
+ return limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ return limit->p2.p2_slow;
+ else
+ return limit->p2.p2_fast;
+ }
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+i9xx_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk,
+ const struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct dpll clock;
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ if (clock.m2 >= clock.m1)
+ break;
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+ i9xx_calc_dpll_params(refclk, &clock);
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return (err != target);
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+pnv_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk,
+ const struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct dpll clock;
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+ pnv_calc_dpll_params(refclk, &clock);
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return (err != target);
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
+static bool
+g4x_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk,
+ const struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
+ struct dpll clock;
+ int max_n;
+ bool found = false;
+ /* approximately equals target * 0.00585 */
+ int err_most = (target >> 8) + (target >> 9);
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
+ max_n = limit->n.max;
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ /* based on hardware requirement, prefere larger m1,m2 */
+ for (clock.m1 = limit->m1.max;
+ clock.m1 >= limit->m1.min; clock.m1--) {
+ for (clock.m2 = limit->m2.max;
+ clock.m2 >= limit->m2.min; clock.m2--) {
+ for (clock.p1 = limit->p1.max;
+ clock.p1 >= limit->p1.min; clock.p1--) {
+ int this_err;
+
+ i9xx_calc_dpll_params(refclk, &clock);
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err_most) {
+ *best_clock = clock;
+ err_most = this_err;
+ max_n = clock.n;
+ found = true;
+ }
+ }
+ }
+ }
+ }
+ return found;
+}
+
+/*
+ * Check if the calculated PLL configuration is more optimal compared to the
+ * best configuration and error found so far. Return the calculated error.
+ */
+static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
+ const struct dpll *calculated_clock,
+ const struct dpll *best_clock,
+ unsigned int best_error_ppm,
+ unsigned int *error_ppm)
+{
+ /*
+ * For CHV ignore the error and consider only the P value.
+ * Prefer a bigger P value based on HW requirements.
+ */
+ if (IS_CHERRYVIEW(to_i915(dev))) {
+ *error_ppm = 0;
+
+ return calculated_clock->p > best_clock->p;
+ }
+
+ if (drm_WARN_ON_ONCE(dev, !target_freq))
+ return false;
+
+ *error_ppm = div_u64(1000000ULL *
+ abs(target_freq - calculated_clock->dot),
+ target_freq);
+ /*
+ * Prefer a better P value over a better (smaller) error if the error
+ * is small. Ensure this preference for future configurations too by
+ * setting the error to 0.
+ */
+ if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
+ *error_ppm = 0;
+
+ return true;
+ }
+
+ return *error_ppm + 10 < best_error_ppm;
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.
+ */
+static bool
+vlv_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk,
+ const struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_device *dev = crtc->base.dev;
+ struct dpll clock;
+ unsigned int bestppm = 1000000;
+ /* min update 19.2 MHz */
+ int max_n = min(limit->n.max, refclk / 19200);
+ bool found = false;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ clock.p = clock.p1 * clock.p2 * 5;
+ /* based on hardware requirement, prefer bigger m1,m2 values */
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+ unsigned int ppm;
+
+ clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
+ refclk * clock.m1);
+
+ vlv_calc_dpll_params(refclk, &clock);
+
+ if (!intel_pll_is_valid(to_i915(dev),
+ limit,
+ &clock))
+ continue;
+
+ if (!vlv_PLL_is_optimal(dev, target,
+ &clock,
+ best_clock,
+ bestppm, &ppm))
+ continue;
+
+ *best_clock = clock;
+ bestppm = ppm;
+ found = true;
+ }
+ }
+ }
+ }
+
+ return found;
+}
+
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.
+ */
+static bool
+chv_find_best_dpll(const struct intel_limit *limit,
+ struct intel_crtc_state *crtc_state,
+ int target, int refclk,
+ const struct dpll *match_clock,
+ struct dpll *best_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_device *dev = crtc->base.dev;
+ unsigned int best_error_ppm;
+ struct dpll clock;
+ u64 m2;
+ int found = false;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+ best_error_ppm = 1000000;
+
+ /*
+ * Based on hardware doc, the n always set to 1, and m1 always
+ * set to 2. If requires to support 200Mhz refclk, we need to
+ * revisit this because n may not 1 anymore.
+ */
+ clock.n = 1;
+ clock.m1 = 2;
+
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p2 = limit->p2.p2_fast;
+ clock.p2 >= limit->p2.p2_slow;
+ clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+ unsigned int error_ppm;
+
+ clock.p = clock.p1 * clock.p2 * 5;
+
+ m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
+ refclk * clock.m1);
+
+ if (m2 > INT_MAX/clock.m1)
+ continue;
+
+ clock.m2 = m2;
+
+ chv_calc_dpll_params(refclk, &clock);
+
+ if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
+ continue;
+
+ if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
+ best_error_ppm, &error_ppm))
+ continue;
+
+ *best_clock = clock;
+ best_error_ppm = error_ppm;
+ found = true;
+ }
+ }
+
+ return found;
+}
+
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
+ struct dpll *best_clock)
+{
+ const struct intel_limit *limit = &intel_limits_bxt;
+ int refclk = 100000;
+
+ return chv_find_best_dpll(limit, crtc_state,
+ crtc_state->port_clock, refclk,
+ NULL, best_clock);
+}
+
+u32 i9xx_dpll_compute_fp(const struct dpll *dpll)
+{
+ return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
+}
+
+static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
+{
+ return (1 << dpll->n) << 16 | dpll->m2;
+}
+
+static void i9xx_update_pll_dividers(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 fp, fp2;
+
+ if (IS_PINEVIEW(dev_priv)) {
+ fp = pnv_dpll_compute_fp(clock);
+ fp2 = pnv_dpll_compute_fp(reduced_clock);
+ } else {
+ fp = i9xx_dpll_compute_fp(clock);
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
+ }
+
+ crtc_state->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp2;
+}
+
+static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll;
+
+ i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ dpll |= (crtc_state->pixel_multiplier - 1)
+ << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ if (IS_G4X(dev_priv)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ } else if (IS_PINEVIEW(dev_priv)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+ WARN_ON(reduced_clock->p1 != clock->p1);
+ } else {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ WARN_ON(reduced_clock->p1 != clock->p1);
+ }
+
+ switch (clock->p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+ WARN_ON(reduced_clock->p2 != clock->p2);
+
+ if (DISPLAY_VER(dev_priv) >= 4)
+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+ if (crtc_state->sdvo_tv_clock)
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ crtc_state->dpll_hw_state.dpll = dpll;
+
+ if (DISPLAY_VER(dev_priv) >= 4) {
+ u32 dpll_md = (crtc_state->pixel_multiplier - 1)
+ << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ crtc_state->dpll_hw_state.dpll_md = dpll_md;
+ }
+}
+
+static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll;
+
+ i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ } else {
+ if (clock->p1 == 2)
+ dpll |= PLL_P1_DIVIDE_BY_TWO;
+ else
+ dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (clock->p2 == 4)
+ dpll |= PLL_P2_DIVIDE_BY_4;
+ }
+ WARN_ON(reduced_clock->p1 != clock->p1);
+ WARN_ON(reduced_clock->p2 != clock->p2);
+
+ /*
+ * Bspec:
+ * "[Almador Errata}: For the correct operation of the muxed DVO pins
+ * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
+ * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
+ * Enable) must be set to “1” in both the DPLL A Control Register
+ * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
+ *
+ * For simplicity We simply keep both bits always enabled in
+ * both DPLLS. The spec says we should disable the DVO 2X clock
+ * when not needed, but this seems to work fine in practice.
+ */
+ if (IS_I830(dev_priv) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
+ dpll |= DPLL_DVO_2X_MODE;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ crtc_state->dpll_hw_state.dpll = dpll;
+}
+
+static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_encoder *encoder =
+ intel_get_crtc_new_encoder(state, crtc_state);
+ int ret;
+
+ if (DISPLAY_VER(dev_priv) < 11 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ ret = intel_compute_shared_dplls(state, crtc, encoder);
+ if (ret)
+ return ret;
+
+ /* FIXME this is a mess */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ /* CRT dotclock is determined via other means */
+ if (!crtc_state->has_pch_encoder)
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return 0;
+}
+
+static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_encoder *encoder =
+ intel_get_crtc_new_encoder(state, crtc_state);
+
+ if (DISPLAY_VER(dev_priv) < 11 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ return intel_reserve_shared_dplls(state, crtc, encoder);
+}
+
+static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_encoder *encoder =
+ intel_get_crtc_new_encoder(state, crtc_state);
+ int ret;
+
+ ret = intel_mpllb_calc_state(crtc_state, encoder);
+ if (ret)
+ return ret;
+
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return 0;
+}
+
+static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
+{
+ return dpll->m < factor * dpll->n;
+}
+
+static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 fp, fp2;
+ int factor;
+
+ /* Enable autotuning of the PLL clock (if permissible) */
+ factor = 21;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if ((intel_panel_use_ssc(dev_priv) &&
+ dev_priv->display.vbt.lvds_ssc_freq == 100000) ||
+ (HAS_PCH_IBX(dev_priv) &&
+ intel_is_dual_link_lvds(dev_priv)))
+ factor = 25;
+ } else if (crtc_state->sdvo_tv_clock) {
+ factor = 20;
+ }
+
+ fp = i9xx_dpll_compute_fp(clock);
+ if (ilk_needs_fb_cb_tune(clock, factor))
+ fp |= FP_CB_TUNE;
+
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
+ if (ilk_needs_fb_cb_tune(reduced_clock, factor))
+ fp2 |= FP_CB_TUNE;
+
+ crtc_state->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp2;
+}
+
+static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll;
+
+ ilk_update_pll_dividers(crtc_state, clock, reduced_clock);
+
+ dpll = 0;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+
+ dpll |= (crtc_state->pixel_multiplier - 1)
+ << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /*
+ * The high speed IO clock is only really required for
+ * SDVO/HDMI/DP, but we also enable it for CRT to make it
+ * possible to share the DPLL between CRT and HDMI. Enabling
+ * the clock needlessly does no real harm, except use up a
+ * bit of power potentially.
+ *
+ * We'll limit this to IVB with 3 pipes, since it has only two
+ * DPLLs and so DPLL sharing is the only way to get three pipes
+ * driving PCH ports at the same time. On SNB we could do this,
+ * and potentially avoid enabling the second DPLL, but it's not
+ * clear if it''s a win or loss power wise. No point in doing
+ * this on ILK at all since it has a fixed DPLL<->pipe mapping.
+ */
+ if (INTEL_NUM_PIPES(dev_priv) == 3 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ /* also FPA1 */
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+
+ switch (clock->p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+ WARN_ON(reduced_clock->p2 != clock->p2);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ crtc_state->dpll_hw_state.dpll = dpll;
+}
+
+static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_limit *limit;
+ int refclk = 120000;
+ int ret;
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (!crtc_state->has_pch_encoder)
+ return 0;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ dev_priv->display.vbt.lvds_ssc_freq);
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
+ }
+
+ if (intel_is_dual_link_lvds(dev_priv)) {
+ if (refclk == 100000)
+ limit = &ilk_limits_dual_lvds_100m;
+ else
+ limit = &ilk_limits_dual_lvds;
+ } else {
+ if (refclk == 100000)
+ limit = &ilk_limits_single_lvds_100m;
+ else
+ limit = &ilk_limits_single_lvds;
+ }
+ } else {
+ limit = &ilk_limits_dac;
+ }
+
+ if (!crtc_state->clock_set &&
+ !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll))
+ return -EINVAL;
+
+ ilk_compute_dpll(crtc_state, &crtc_state->dpll,
+ &crtc_state->dpll);
+
+ ret = intel_compute_shared_dplls(state, crtc, NULL);
+ if (ret)
+ return ret;
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return ret;
+}
+
+static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (!crtc_state->has_pch_encoder)
+ return 0;
+
+ return intel_reserve_shared_dplls(state, crtc, NULL);
+}
+
+void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ crtc_state->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (crtc->pipe != PIPE_A)
+ crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ /* DPLL not used with DSI, but still need the rest set up */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
+ DPLL_EXT_BUFFER_ENABLE_VLV;
+
+ crtc_state->dpll_hw_state.dpll_md =
+ (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+void chv_compute_dpll(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ crtc_state->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (crtc->pipe != PIPE_A)
+ crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ /* DPLL not used with DSI, but still need the rest set up */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+
+ crtc_state->dpll_hw_state.dpll_md =
+ (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+static int chv_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_limit *limit = &intel_limits_chv;
+ int refclk = 100000;
+
+ if (!crtc_state->clock_set &&
+ !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll))
+ return -EINVAL;
+
+ chv_compute_dpll(crtc_state);
+
+ /* FIXME this is a mess */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return 0;
+}
+
+static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_limit *limit = &intel_limits_vlv;
+ int refclk = 100000;
+
+ if (!crtc_state->clock_set &&
+ !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ return -EINVAL;
+ }
+
+ vlv_compute_dpll(crtc_state);
+
+ /* FIXME this is a mess */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return 0;
+}
+
+static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_limit *limit;
+ int refclk = 96000;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ if (intel_is_dual_link_lvds(dev_priv))
+ limit = &intel_limits_g4x_dual_channel_lvds;
+ else
+ limit = &intel_limits_g4x_single_channel_lvds;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+ limit = &intel_limits_g4x_hdmi;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
+ limit = &intel_limits_g4x_sdvo;
+ } else {
+ /* The option is for other outputs */
+ limit = &intel_limits_i9xx_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll))
+ return -EINVAL;
+
+ i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
+ &crtc_state->dpll);
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ /* FIXME this is a mess */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return 0;
+}
+
+static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_limit *limit;
+ int refclk = 96000;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ limit = &pnv_limits_lvds;
+ } else {
+ limit = &pnv_limits_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll))
+ return -EINVAL;
+
+ i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
+ &crtc_state->dpll);
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return 0;
+}
+
+static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_limit *limit;
+ int refclk = 96000;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ limit = &intel_limits_i9xx_lvds;
+ } else {
+ limit = &intel_limits_i9xx_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll))
+ return -EINVAL;
+
+ i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
+ &crtc_state->dpll);
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ /* FIXME this is a mess */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return 0;
+}
+
+static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_limit *limit;
+ int refclk = 48000;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
+ }
+
+ limit = &intel_limits_i8xx_lvds;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
+ limit = &intel_limits_i8xx_dvo;
+ } else {
+ limit = &intel_limits_i8xx_dac;
+ }
+
+ if (!crtc_state->clock_set &&
+ !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll))
+ return -EINVAL;
+
+ i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
+ &crtc_state->dpll);
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return 0;
+}
+
+static const struct intel_dpll_funcs dg2_dpll_funcs = {
+ .crtc_compute_clock = dg2_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs hsw_dpll_funcs = {
+ .crtc_compute_clock = hsw_crtc_compute_clock,
+ .crtc_get_shared_dpll = hsw_crtc_get_shared_dpll,
+};
+
+static const struct intel_dpll_funcs ilk_dpll_funcs = {
+ .crtc_compute_clock = ilk_crtc_compute_clock,
+ .crtc_get_shared_dpll = ilk_crtc_get_shared_dpll,
+};
+
+static const struct intel_dpll_funcs chv_dpll_funcs = {
+ .crtc_compute_clock = chv_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs vlv_dpll_funcs = {
+ .crtc_compute_clock = vlv_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs g4x_dpll_funcs = {
+ .crtc_compute_clock = g4x_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs pnv_dpll_funcs = {
+ .crtc_compute_clock = pnv_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs i9xx_dpll_funcs = {
+ .crtc_compute_clock = i9xx_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs i8xx_dpll_funcs = {
+ .crtc_compute_clock = i8xx_crtc_compute_clock,
+};
+
+int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ int ret;
+
+ drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (!crtc_state->hw.enable)
+ return 0;
+
+ ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc);
+ if (ret) {
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
+ crtc->base.base.id, crtc->base.name);
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ int ret;
+
+ drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
+ drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
+
+ if (!crtc_state->hw.enable || crtc_state->shared_dpll)
+ return 0;
+
+ if (!i915->display.funcs.dpll->crtc_get_shared_dpll)
+ return 0;
+
+ ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc);
+ if (ret) {
+ drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
+ crtc->base.base.id, crtc->base.name);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
+{
+ if (IS_DG2(dev_priv))
+ dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
+ else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
+ dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ dev_priv->display.funcs.dpll = &ilk_dpll_funcs;
+ else if (IS_CHERRYVIEW(dev_priv))
+ dev_priv->display.funcs.dpll = &chv_dpll_funcs;
+ else if (IS_VALLEYVIEW(dev_priv))
+ dev_priv->display.funcs.dpll = &vlv_dpll_funcs;
+ else if (IS_G4X(dev_priv))
+ dev_priv->display.funcs.dpll = &g4x_dpll_funcs;
+ else if (IS_PINEVIEW(dev_priv))
+ dev_priv->display.funcs.dpll = &pnv_dpll_funcs;
+ else if (DISPLAY_VER(dev_priv) != 2)
+ dev_priv->display.funcs.dpll = &i9xx_dpll_funcs;
+ else
+ dev_priv->display.funcs.dpll = &i8xx_dpll_funcs;
+}
+
+static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
+{
+ if (IS_I830(dev_priv))
+ return false;
+
+ return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
+void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll = crtc_state->dpll_hw_state.dpll;
+ enum pipe pipe = crtc->pipe;
+ int i;
+
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
+
+ /* PLL is protected by panel, make sure we can write it */
+ if (i9xx_has_pps(dev_priv))
+ assert_pps_unlocked(dev_priv, pipe);
+
+ intel_de_write(dev_priv, FP0(pipe), crtc_state->dpll_hw_state.fp0);
+ intel_de_write(dev_priv, FP1(pipe), crtc_state->dpll_hw_state.fp1);
+
+ /*
+ * Apparently we need to have VGA mode enabled prior to changing
+ * the P1/P2 dividers. Otherwise the DPLL will keep using the old
+ * dividers, even though the register value does change.
+ */
+ intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+ udelay(150);
+
+ if (DISPLAY_VER(dev_priv) >= 4) {
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ crtc_state->dpll_hw_state.dpll_md);
+ } else {
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
+ }
+
+ /* We do this three times for luck */
+ for (i = 0; i < 3; i++) {
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+ udelay(150); /* wait for warmup */
+ }
+}
+
+static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ u32 reg_val;
+
+ /*
+ * PLLB opamp always calibrates to max value of 0x3f, force enable it
+ * and set it to a reasonable value instead.
+ */
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+ reg_val &= 0xffffff00;
+ reg_val |= 0x00000030;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+ reg_val &= 0x00ffffff;
+ reg_val |= 0x8c000000;
+ vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+ reg_val &= 0xffffff00;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+ reg_val &= 0x00ffffff;
+ reg_val |= 0xb0000000;
+ vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+}
+
+static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 mdiv;
+ u32 bestn, bestm1, bestm2, bestp1, bestp2;
+ u32 coreclk, reg_val;
+
+ vlv_dpio_get(dev_priv);
+
+ bestn = crtc_state->dpll.n;
+ bestm1 = crtc_state->dpll.m1;
+ bestm2 = crtc_state->dpll.m2;
+ bestp1 = crtc_state->dpll.p1;
+ bestp2 = crtc_state->dpll.p2;
+
+ /* See eDP HDMI DPIO driver vbios notes doc */
+
+ /* PLL B needs special handling */
+ if (pipe == PIPE_B)
+ vlv_pllb_recal_opamp(dev_priv, pipe);
+
+ /* Set up Tx target for periodic Rcomp update */
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
+
+ /* Disable target IRef on PLL */
+ reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
+ reg_val &= 0x00ffffff;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
+
+ /* Disable fast lock */
+ vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
+
+ /* Set idtafcrecal before PLL is enabled */
+ mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
+ mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
+ mdiv |= ((bestn << DPIO_N_SHIFT));
+ mdiv |= (1 << DPIO_K_SHIFT);
+
+ /*
+ * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
+ * but we don't support that).
+ * Note: don't use the DAC post divider as it seems unstable.
+ */
+ mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+
+ mdiv |= DPIO_ENABLE_CALIBRATION;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+
+ /* Set HBR and RBR LPF coefficients */
+ if (crtc_state->port_clock == 162000 ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+ 0x009f0003);
+ else
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+ 0x00d0000f);
+
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ /* Use SSC source */
+ if (pipe == PIPE_A)
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df40000);
+ else
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df70000);
+ } else { /* HDMI or VGA */
+ /* Use bend source */
+ if (pipe == PIPE_A)
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df70000);
+ else
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ 0x0df40000);
+ }
+
+ coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
+ coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ coreclk |= 0x01000000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
+
+ vlv_dpio_put(dev_priv);
+}
+
+static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+ udelay(150);
+
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
+ drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
+}
+
+void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
+
+ /* PLL is protected by panel, make sure we can write it */
+ assert_pps_unlocked(dev_priv, pipe);
+
+ /* Enable Refclk */
+ intel_de_write(dev_priv, DPLL(pipe),
+ crtc_state->dpll_hw_state.dpll &
+ ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+
+ if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
+ vlv_prepare_pll(crtc_state);
+ _vlv_enable_pll(crtc_state);
+ }
+
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ crtc_state->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
+}
+
+static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 loopfilter, tribuf_calcntr;
+ u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
+ u32 dpio_val;
+ int vco;
+
+ bestn = crtc_state->dpll.n;
+ bestm2_frac = crtc_state->dpll.m2 & 0x3fffff;
+ bestm1 = crtc_state->dpll.m1;
+ bestm2 = crtc_state->dpll.m2 >> 22;
+ bestp1 = crtc_state->dpll.p1;
+ bestp2 = crtc_state->dpll.p2;
+ vco = crtc_state->dpll.vco;
+ dpio_val = 0;
+ loopfilter = 0;
+
+ vlv_dpio_get(dev_priv);
+
+ /* p1 and p2 divider */
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
+ 5 << DPIO_CHV_S1_DIV_SHIFT |
+ bestp1 << DPIO_CHV_P1_DIV_SHIFT |
+ bestp2 << DPIO_CHV_P2_DIV_SHIFT |
+ 1 << DPIO_CHV_K_DIV_SHIFT);
+
+ /* Feedback post-divider - m2 */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
+
+ /* Feedback refclk divider - n and m1 */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
+ DPIO_CHV_M1_DIV_BY_2 |
+ 1 << DPIO_CHV_N_DIV_SHIFT);
+
+ /* M2 fraction division */
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+
+ /* M2 fraction division enable */
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+ dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
+ dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
+ if (bestm2_frac)
+ dpio_val |= DPIO_CHV_FRAC_DIV_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
+
+ /* Program digital lock detect threshold */
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
+ dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
+ DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
+ dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
+ if (!bestm2_frac)
+ dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
+
+ /* Loop filter */
+ if (vco == 5400000) {
+ loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x9;
+ } else if (vco <= 6200000) {
+ loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x9;
+ } else if (vco <= 6480000) {
+ loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0x8;
+ } else {
+ /* Not supported. Apply the same limits as in the max case */
+ loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
+ loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
+ loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
+ tribuf_calcntr = 0;
+ }
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
+
+ dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
+ dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
+ dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
+
+ /* AFC Recal */
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
+ vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
+ DPIO_AFC_RECAL);
+
+ vlv_dpio_put(dev_priv);
+}
+
+static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 tmp;
+
+ vlv_dpio_get(dev_priv);
+
+ /* Enable back the 10bit clock to display controller */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ tmp |= DPIO_DCLKP_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
+
+ vlv_dpio_put(dev_priv);
+
+ /*
+ * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
+ */
+ udelay(1);
+
+ /* Enable PLL */
+ intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
+
+ /* Check PLL is locked */
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
+ drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
+}
+
+void chv_enable_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
+
+ /* PLL is protected by panel, make sure we can write it */
+ assert_pps_unlocked(dev_priv, pipe);
+
+ /* Enable Refclk and SSC */
+ intel_de_write(dev_priv, DPLL(pipe),
+ crtc_state->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+
+ if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
+ chv_prepare_pll(crtc_state);
+ _chv_enable_pll(crtc_state);
+ }
+
+ if (pipe != PIPE_A) {
+ /*
+ * WaPixelRepeatModeFixForC0:chv
+ *
+ * DPLLCMD is AWOL. Use chicken bits to propagate
+ * the value from DPLLBMD to either pipe B or C.
+ */
+ intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
+ intel_de_write(dev_priv, DPLL_MD(PIPE_B),
+ crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, CBR4_VLV, 0);
+ dev_priv->chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md;
+
+ /*
+ * DPLLB VGA mode also seems to cause problems.
+ * We should always have it disabled.
+ */
+ drm_WARN_ON(&dev_priv->drm,
+ (intel_de_read(dev_priv, DPLL(PIPE_B)) &
+ DPLL_VGA_MODE_DIS) == 0);
+ } else {
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ crtc_state->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
+ }
+}
+
+/**
+ * vlv_force_pll_on - forcibly enable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ * @dpll: PLL configuration
+ *
+ * Enable the PLL for @pipe using the supplied @dpll config. To be used
+ * in cases where we need the PLL enabled even when @pipe is not going to
+ * be enabled.
+ */
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+ const struct dpll *dpll)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = intel_crtc_state_alloc(crtc);
+ if (!crtc_state)
+ return -ENOMEM;
+
+ crtc_state->cpu_transcoder = (enum transcoder)pipe;
+ crtc_state->pixel_multiplier = 1;
+ crtc_state->dpll = *dpll;
+ crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ chv_compute_dpll(crtc_state);
+ chv_enable_pll(crtc_state);
+ } else {
+ vlv_compute_dpll(crtc_state);
+ vlv_enable_pll(crtc_state);
+ }
+
+ kfree(crtc_state);
+
+ return 0;
+}
+
+void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ u32 val;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
+
+ val = DPLL_INTEGRATED_REF_CLK_VLV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ intel_de_write(dev_priv, DPLL(pipe), val);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+}
+
+void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 val;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
+
+ val = DPLL_SSC_REF_CLK_CHV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ intel_de_write(dev_priv, DPLL(pipe), val);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+
+ vlv_dpio_get(dev_priv);
+
+ /* Disable 10bit clock to display controller */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ val &= ~DPIO_DCLKP_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+
+ vlv_dpio_put(dev_priv);
+}
+
+void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /* Don't disable pipe or pipe PLLs if needed */
+ if (IS_I830(dev_priv))
+ return;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
+
+ intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+}
+
+
+/**
+ * vlv_force_pll_off - forcibly disable just the PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe. To be used in cases where we need
+ * the PLL enabled even when @pipe is not going to be enabled.
+ */
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_disable_pll(dev_priv, pipe);
+ else
+ vlv_disable_pll(dev_priv, pipe);
+}
+
+/* Only for pre-ILK configs */
+static void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ bool cur_state;
+
+ cur_state = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
+ I915_STATE_WARN(cur_state != state,
+ "PLL state assertion failure (expected %s, current %s)\n",
+ str_on_off(state), str_on_off(cur_state));
+}
+
+void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_pll(i915, pipe, true);
+}
+
+void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_pll(i915, pipe, false);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
new file mode 100644
index 000000000..bbc30542f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_DPLL_H_
+#define _INTEL_DPLL_H_
+
+#include <linux/types.h>
+
+struct dpll;
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+enum pipe;
+
+void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
+int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int vlv_calc_dpll_params(int refclk, struct dpll *clock);
+int pnv_calc_dpll_params(int refclk, struct dpll *clock);
+int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
+u32 i9xx_dpll_compute_fp(const struct dpll *dpll);
+void vlv_compute_dpll(struct intel_crtc_state *crtc_state);
+void chv_compute_dpll(struct intel_crtc_state *crtc_state);
+
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+ const struct dpll *dpll);
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
+
+void chv_enable_pll(const struct intel_crtc_state *crtc_state);
+void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void vlv_enable_pll(const struct intel_crtc_state *crtc_state);
+void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void i9xx_enable_pll(const struct intel_crtc_state *crtc_state);
+void i9xx_disable_pll(const struct intel_crtc_state *crtc_state);
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
+ struct dpll *best_clock);
+int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
+
+void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
new file mode 100644
index 000000000..64dd603dc
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -0,0 +1,4564 @@
+/*
+ * Copyright © 2006-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/string_helpers.h>
+
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dkl_phy.h"
+#include "intel_dpio_phy.h"
+#include "intel_dpll.h"
+#include "intel_dpll_mgr.h"
+#include "intel_pch_refclk.h"
+#include "intel_tc.h"
+#include "intel_tc_phy_regs.h"
+
+/**
+ * DOC: Display PLLs
+ *
+ * Display PLLs used for driving outputs vary by platform. While some have
+ * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
+ * from a pool. In the latter scenario, it is possible that multiple pipes
+ * share a PLL if their configurations match.
+ *
+ * This file provides an abstraction over display PLLs. The function
+ * intel_shared_dpll_init() initializes the PLLs for the given platform. The
+ * users of a PLL are tracked and that tracking is integrated with the atomic
+ * modset interface. During an atomic operation, required PLLs can be reserved
+ * for a given CRTC and encoder configuration by calling
+ * intel_reserve_shared_dplls() and previously reserved PLLs can be released
+ * with intel_release_shared_dplls().
+ * Changes to the users are first staged in the atomic state, and then made
+ * effective by calling intel_shared_dpll_swap_state() during the atomic
+ * commit phase.
+ */
+
+/* platform specific hooks for managing DPLLs */
+struct intel_shared_dpll_funcs {
+ /*
+ * Hook for enabling the pll, called from intel_enable_shared_dpll() if
+ * the pll is not already enabled.
+ */
+ void (*enable)(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll);
+
+ /*
+ * Hook for disabling the pll, called from intel_disable_shared_dpll()
+ * only when it is safe to disable the pll, i.e., there are no more
+ * tracked users for it.
+ */
+ void (*disable)(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll);
+
+ /*
+ * Hook for reading the values currently programmed to the DPLL
+ * registers. This is used for initial hw state readout and state
+ * verification after a mode set.
+ */
+ bool (*get_hw_state)(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state);
+
+ /*
+ * Hook for calculating the pll's output frequency based on its passed
+ * in state.
+ */
+ int (*get_freq)(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state);
+};
+
+struct intel_dpll_mgr {
+ const struct dpll_info *dpll_info;
+
+ int (*compute_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ int (*get_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ void (*put_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*update_active_dpll)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ void (*update_ref_clks)(struct drm_i915_private *i915);
+ void (*dump_hw_state)(struct drm_i915_private *dev_priv,
+ const struct intel_dpll_hw_state *hw_state);
+};
+
+static void
+intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll_state *shared_dpll)
+{
+ enum intel_dpll_id i;
+
+ /* Copy shared dpll state */
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
+
+ shared_dpll[i] = pll->state;
+ }
+}
+
+static struct intel_shared_dpll_state *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(s);
+
+ drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
+
+ if (!state->dpll_set) {
+ state->dpll_set = true;
+
+ intel_atomic_duplicate_dpll_state(to_i915(s->dev),
+ state->shared_dpll);
+ }
+
+ return state->shared_dpll;
+}
+
+/**
+ * intel_get_shared_dpll_by_id - get a DPLL given its id
+ * @dev_priv: i915 device instance
+ * @id: pll id
+ *
+ * Returns:
+ * A pointer to the DPLL with @id
+ */
+struct intel_shared_dpll *
+intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id id)
+{
+ return &dev_priv->display.dpll.shared_dplls[id];
+}
+
+/**
+ * intel_get_shared_dpll_id - get the id of a DPLL
+ * @dev_priv: i915 device instance
+ * @pll: the DPLL
+ *
+ * Returns:
+ * The id of @pll
+ */
+enum intel_dpll_id
+intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ long pll_idx = pll - dev_priv->display.dpll.shared_dplls;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ pll_idx < 0 ||
+ pll_idx >= dev_priv->display.dpll.num_shared_dpll))
+ return -1;
+
+ return pll_idx;
+}
+
+/* For ILK+ */
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ bool state)
+{
+ bool cur_state;
+ struct intel_dpll_hw_state hw_state;
+
+ if (drm_WARN(&dev_priv->drm, !pll,
+ "asserting DPLL %s with no DPLL\n", str_on_off(state)))
+ return;
+
+ cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
+ I915_STATE_WARN(cur_state != state,
+ "%s assertion failure (expected %s, current %s)\n",
+ pll->info->name, str_on_off(state),
+ str_on_off(cur_state));
+}
+
+static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
+{
+ return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
+}
+
+enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
+{
+ return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
+}
+
+static i915_reg_t
+intel_combo_pll_enable_reg(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ if (IS_DG1(i915))
+ return DG1_DPLL_ENABLE(pll->info->id);
+ else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
+ return MG_PLL_ENABLE(0);
+
+ return ICL_DPLL_ENABLE(pll->info->id);
+}
+
+static i915_reg_t
+intel_tc_pll_enable_reg(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+
+ if (IS_ALDERLAKE_P(i915))
+ return ADLP_PORTTC_PLL_ENABLE(tc_port);
+
+ return MG_PLL_ENABLE(tc_port);
+}
+
+/**
+ * intel_enable_shared_dpll - enable a CRTC's shared DPLL
+ * @crtc_state: CRTC, and its state, which has a shared DPLL
+ *
+ * Enable the shared DPLL used by @crtc.
+ */
+void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ unsigned int pipe_mask = BIT(crtc->pipe);
+ unsigned int old_mask;
+
+ if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
+ return;
+
+ mutex_lock(&dev_priv->display.dpll.lock);
+ old_mask = pll->active_mask;
+
+ if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
+ drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
+ goto out;
+
+ pll->active_mask |= pipe_mask;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
+ pll->info->name, pll->active_mask, pll->on,
+ crtc->base.base.id, crtc->base.name);
+
+ if (old_mask) {
+ drm_WARN_ON(&dev_priv->drm, !pll->on);
+ assert_shared_dpll_enabled(dev_priv, pll);
+ goto out;
+ }
+ drm_WARN_ON(&dev_priv->drm, pll->on);
+
+ drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
+ pll->info->funcs->enable(dev_priv, pll);
+ pll->on = true;
+
+out:
+ mutex_unlock(&dev_priv->display.dpll.lock);
+}
+
+/**
+ * intel_disable_shared_dpll - disable a CRTC's shared DPLL
+ * @crtc_state: CRTC, and its state, which has a shared DPLL
+ *
+ * Disable the shared DPLL used by @crtc.
+ */
+void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ unsigned int pipe_mask = BIT(crtc->pipe);
+
+ /* PCH only available on ILK+ */
+ if (DISPLAY_VER(dev_priv) < 5)
+ return;
+
+ if (pll == NULL)
+ return;
+
+ mutex_lock(&dev_priv->display.dpll.lock);
+ if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
+ "%s not used by [CRTC:%d:%s]\n", pll->info->name,
+ crtc->base.base.id, crtc->base.name))
+ goto out;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
+ pll->info->name, pll->active_mask, pll->on,
+ crtc->base.base.id, crtc->base.name);
+
+ assert_shared_dpll_enabled(dev_priv, pll);
+ drm_WARN_ON(&dev_priv->drm, !pll->on);
+
+ pll->active_mask &= ~pipe_mask;
+ if (pll->active_mask)
+ goto out;
+
+ drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
+ pll->info->funcs->disable(dev_priv, pll);
+ pll->on = false;
+
+out:
+ mutex_unlock(&dev_priv->display.dpll.lock);
+}
+
+static struct intel_shared_dpll *
+intel_find_shared_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_dpll_hw_state *pll_state,
+ unsigned long dpll_mask)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll, *unused_pll = NULL;
+ struct intel_shared_dpll_state *shared_dpll;
+ enum intel_dpll_id i;
+
+ shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+
+ drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
+
+ for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
+ pll = &dev_priv->display.dpll.shared_dplls[i];
+
+ /* Only want to check enabled timings first */
+ if (shared_dpll[i].pipe_mask == 0) {
+ if (!unused_pll)
+ unused_pll = pll;
+ continue;
+ }
+
+ if (memcmp(pll_state,
+ &shared_dpll[i].hw_state,
+ sizeof(*pll_state)) == 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
+ crtc->base.base.id, crtc->base.name,
+ pll->info->name,
+ shared_dpll[i].pipe_mask,
+ pll->active_mask);
+ return pll;
+ }
+ }
+
+ /* Ok no matching timings, maybe there's a free one? */
+ if (unused_pll) {
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
+ crtc->base.base.id, crtc->base.name,
+ unused_pll->info->name);
+ return unused_pll;
+ }
+
+ return NULL;
+}
+
+static void
+intel_reference_shared_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_shared_dpll_state *shared_dpll;
+ const enum intel_dpll_id id = pll->info->id;
+
+ shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+
+ if (shared_dpll[id].pipe_mask == 0)
+ shared_dpll[id].hw_state = *pll_state;
+
+ drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
+ pipe_name(crtc->pipe));
+
+ shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
+}
+
+static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_shared_dpll *pll)
+{
+ struct intel_shared_dpll_state *shared_dpll;
+
+ shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+ shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
+}
+
+static void intel_put_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ new_crtc_state->shared_dpll = NULL;
+
+ if (!old_crtc_state->shared_dpll)
+ return;
+
+ intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
+}
+
+/**
+ * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
+ * @state: atomic state
+ *
+ * This is the dpll version of drm_atomic_helper_swap_state() since the
+ * helper does not handle driver-specific global state.
+ *
+ * For consistency with atomic helpers this function does a complete swap,
+ * i.e. it also puts the current state into @state, even though there is no
+ * need for that at this moment.
+ */
+void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
+ enum intel_dpll_id i;
+
+ if (!state->dpll_set)
+ return;
+
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll =
+ &dev_priv->display.dpll.shared_dplls[i];
+
+ swap(pll->state, shared_dpll[i]);
+ }
+}
+
+static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ intel_wakeref_t wakeref;
+ u32 val;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_DISPLAY_CORE);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, PCH_DPLL(id));
+ hw_state->dpll = val;
+ hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
+ hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+ return val & DPLL_VCO_ENABLE;
+}
+
+static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+ bool enabled;
+
+ I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+
+ val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
+ enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
+ DREF_SUPERSPREAD_SOURCE_MASK));
+ I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+}
+
+static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const enum intel_dpll_id id = pll->info->id;
+
+ /* PCH refclock must be enabled first */
+ ibx_assert_pch_refclk_enabled(dev_priv);
+
+ intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
+ intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
+
+ intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
+
+ /* Wait for the clocks to stabilize. */
+ intel_de_posting_read(dev_priv, PCH_DPLL(id));
+ udelay(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
+ intel_de_posting_read(dev_priv, PCH_DPLL(id));
+ udelay(200);
+}
+
+static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const enum intel_dpll_id id = pll->info->id;
+
+ intel_de_write(dev_priv, PCH_DPLL(id), 0);
+ intel_de_posting_read(dev_priv, PCH_DPLL(id));
+ udelay(200);
+}
+
+static int ibx_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ return 0;
+}
+
+static int ibx_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+
+ if (HAS_PCH_IBX(dev_priv)) {
+ /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
+ i = (enum intel_dpll_id) crtc->pipe;
+ pll = &dev_priv->display.dpll.shared_dplls[i];
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] using pre-allocated %s\n",
+ crtc->base.base.id, crtc->base.name,
+ pll->info->name);
+ } else {
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_PCH_PLL_B) |
+ BIT(DPLL_ID_PCH_PLL_A));
+ }
+
+ if (!pll)
+ return -EINVAL;
+
+ /* reference the pll */
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
+
+ crtc_state->shared_dpll = pll;
+
+ return 0;
+}
+
+static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
+ const struct intel_dpll_hw_state *hw_state)
+{
+ drm_dbg_kms(&dev_priv->drm,
+ "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ hw_state->dpll,
+ hw_state->dpll_md,
+ hw_state->fp0,
+ hw_state->fp1);
+}
+
+static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
+ .enable = ibx_pch_dpll_enable,
+ .disable = ibx_pch_dpll_disable,
+ .get_hw_state = ibx_pch_dpll_get_hw_state,
+};
+
+static const struct dpll_info pch_plls[] = {
+ { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
+ { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr pch_pll_mgr = {
+ .dpll_info = pch_plls,
+ .compute_dplls = ibx_compute_dpll,
+ .get_dplls = ibx_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .dump_hw_state = ibx_dump_hw_state,
+};
+
+static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const enum intel_dpll_id id = pll->info->id;
+
+ intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
+ intel_de_posting_read(dev_priv, WRPLL_CTL(id));
+ udelay(20);
+}
+
+static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
+ intel_de_posting_read(dev_priv, SPLL_CTL);
+ udelay(20);
+}
+
+static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ u32 val;
+
+ val = intel_de_read(dev_priv, WRPLL_CTL(id));
+ intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
+ intel_de_posting_read(dev_priv, WRPLL_CTL(id));
+
+ /*
+ * Try to set up the PCH reference clock once all DPLLs
+ * that depend on it have been shut down.
+ */
+ if (dev_priv->pch_ssc_use & BIT(id))
+ intel_init_pch_refclk(dev_priv);
+}
+
+static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ enum intel_dpll_id id = pll->info->id;
+ u32 val;
+
+ val = intel_de_read(dev_priv, SPLL_CTL);
+ intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ intel_de_posting_read(dev_priv, SPLL_CTL);
+
+ /*
+ * Try to set up the PCH reference clock once all DPLLs
+ * that depend on it have been shut down.
+ */
+ if (dev_priv->pch_ssc_use & BIT(id))
+ intel_init_pch_refclk(dev_priv);
+}
+
+static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ intel_wakeref_t wakeref;
+ u32 val;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_DISPLAY_CORE);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, WRPLL_CTL(id));
+ hw_state->wrpll = val;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+ return val & WRPLL_PLL_ENABLE;
+}
+
+static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ intel_wakeref_t wakeref;
+ u32 val;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_DISPLAY_CORE);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, SPLL_CTL);
+ hw_state->spll = val;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+ return val & SPLL_PLL_ENABLE;
+}
+
+#define LC_FREQ 2700
+#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
+
+#define P_MIN 2
+#define P_MAX 64
+#define P_INC 2
+
+/* Constraints for PLL good behavior */
+#define REF_MIN 48
+#define REF_MAX 400
+#define VCO_MIN 2400
+#define VCO_MAX 4800
+
+struct hsw_wrpll_rnp {
+ unsigned p, n2, r2;
+};
+
+static unsigned hsw_wrpll_get_budget_for_freq(int clock)
+{
+ unsigned budget;
+
+ switch (clock) {
+ case 25175000:
+ case 25200000:
+ case 27000000:
+ case 27027000:
+ case 37762500:
+ case 37800000:
+ case 40500000:
+ case 40541000:
+ case 54000000:
+ case 54054000:
+ case 59341000:
+ case 59400000:
+ case 72000000:
+ case 74176000:
+ case 74250000:
+ case 81000000:
+ case 81081000:
+ case 89012000:
+ case 89100000:
+ case 108000000:
+ case 108108000:
+ case 111264000:
+ case 111375000:
+ case 148352000:
+ case 148500000:
+ case 162000000:
+ case 162162000:
+ case 222525000:
+ case 222750000:
+ case 296703000:
+ case 297000000:
+ budget = 0;
+ break;
+ case 233500000:
+ case 245250000:
+ case 247750000:
+ case 253250000:
+ case 298000000:
+ budget = 1500;
+ break;
+ case 169128000:
+ case 169500000:
+ case 179500000:
+ case 202000000:
+ budget = 2000;
+ break;
+ case 256250000:
+ case 262500000:
+ case 270000000:
+ case 272500000:
+ case 273750000:
+ case 280750000:
+ case 281250000:
+ case 286000000:
+ case 291750000:
+ budget = 4000;
+ break;
+ case 267250000:
+ case 268500000:
+ budget = 5000;
+ break;
+ default:
+ budget = 1000;
+ break;
+ }
+
+ return budget;
+}
+
+static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
+ unsigned int r2, unsigned int n2,
+ unsigned int p,
+ struct hsw_wrpll_rnp *best)
+{
+ u64 a, b, c, d, diff, diff_best;
+
+ /* No best (r,n,p) yet */
+ if (best->p == 0) {
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ return;
+ }
+
+ /*
+ * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
+ * freq2k.
+ *
+ * delta = 1e6 *
+ * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
+ * freq2k;
+ *
+ * and we would like delta <= budget.
+ *
+ * If the discrepancy is above the PPM-based budget, always prefer to
+ * improve upon the previous solution. However, if you're within the
+ * budget, try to maximize Ref * VCO, that is N / (P * R^2).
+ */
+ a = freq2k * budget * p * r2;
+ b = freq2k * budget * best->p * best->r2;
+ diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
+ diff_best = abs_diff(freq2k * best->p * best->r2,
+ LC_FREQ_2K * best->n2);
+ c = 1000000 * diff;
+ d = 1000000 * diff_best;
+
+ if (a < c && b < d) {
+ /* If both are above the budget, pick the closer */
+ if (best->p * best->r2 * diff < p * r2 * diff_best) {
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ }
+ } else if (a >= c && b < d) {
+ /* If A is below the threshold but B is above it? Update. */
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ } else if (a >= c && b >= d) {
+ /* Both are below the limit, so pick the higher n2/(r2*r2) */
+ if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ }
+ }
+ /* Otherwise a < c && b >= d, do nothing */
+}
+
+static void
+hsw_ddi_calculate_wrpll(int clock /* in Hz */,
+ unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
+{
+ u64 freq2k;
+ unsigned p, n2, r2;
+ struct hsw_wrpll_rnp best = {};
+ unsigned budget;
+
+ freq2k = clock / 100;
+
+ budget = hsw_wrpll_get_budget_for_freq(clock);
+
+ /* Special case handling for 540 pixel clock: bypass WR PLL entirely
+ * and directly pass the LC PLL to it. */
+ if (freq2k == 5400000) {
+ *n2_out = 2;
+ *p_out = 1;
+ *r2_out = 2;
+ return;
+ }
+
+ /*
+ * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
+ * the WR PLL.
+ *
+ * We want R so that REF_MIN <= Ref <= REF_MAX.
+ * Injecting R2 = 2 * R gives:
+ * REF_MAX * r2 > LC_FREQ * 2 and
+ * REF_MIN * r2 < LC_FREQ * 2
+ *
+ * Which means the desired boundaries for r2 are:
+ * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
+ *
+ */
+ for (r2 = LC_FREQ * 2 / REF_MAX + 1;
+ r2 <= LC_FREQ * 2 / REF_MIN;
+ r2++) {
+
+ /*
+ * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
+ *
+ * Once again we want VCO_MIN <= VCO <= VCO_MAX.
+ * Injecting R2 = 2 * R and N2 = 2 * N, we get:
+ * VCO_MAX * r2 > n2 * LC_FREQ and
+ * VCO_MIN * r2 < n2 * LC_FREQ)
+ *
+ * Which means the desired boundaries for n2 are:
+ * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
+ */
+ for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
+ n2 <= VCO_MAX * r2 / LC_FREQ;
+ n2++) {
+
+ for (p = P_MIN; p <= P_MAX; p += P_INC)
+ hsw_wrpll_update_rnp(freq2k, budget,
+ r2, n2, p, &best);
+ }
+ }
+
+ *n2_out = best.n2;
+ *p_out = best.p;
+ *r2_out = best.r2;
+}
+
+static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
+{
+ int refclk;
+ int n, p, r;
+ u32 wrpll = pll_state->wrpll;
+
+ switch (wrpll & WRPLL_REF_MASK) {
+ case WRPLL_REF_SPECIAL_HSW:
+ /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
+ if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
+ refclk = dev_priv->display.dpll.ref_clks.nssc;
+ break;
+ }
+ fallthrough;
+ case WRPLL_REF_PCH_SSC:
+ /*
+ * We could calculate spread here, but our checking
+ * code only cares about 5% accuracy, and spread is a max of
+ * 0.5% downspread.
+ */
+ refclk = dev_priv->display.dpll.ref_clks.ssc;
+ break;
+ case WRPLL_REF_LCPLL:
+ refclk = 2700000;
+ break;
+ default:
+ MISSING_CASE(wrpll);
+ return 0;
+ }
+
+ r = wrpll & WRPLL_DIVIDER_REF_MASK;
+ p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
+ n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
+
+ /* Convert to KHz, p & r have a fixed point portion */
+ return (refclk * n / 10) / (p * r) * 2;
+}
+
+static int
+hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ unsigned int p, n2, r2;
+
+ hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
+
+ crtc_state->dpll_hw_state.wrpll =
+ WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
+ &crtc_state->dpll_hw_state);
+
+ return 0;
+}
+
+static struct intel_shared_dpll *
+hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ return intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_WRPLL2) |
+ BIT(DPLL_ID_WRPLL1));
+}
+
+static int
+hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ int clock = crtc_state->port_clock;
+
+ switch (clock / 2) {
+ case 81000:
+ case 135000:
+ case 270000:
+ return 0;
+ default:
+ drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
+ clock);
+ return -EINVAL;
+ }
+}
+
+static struct intel_shared_dpll *
+hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id pll_id;
+ int clock = crtc_state->port_clock;
+
+ switch (clock / 2) {
+ case 81000:
+ pll_id = DPLL_ID_LCPLL_810;
+ break;
+ case 135000:
+ pll_id = DPLL_ID_LCPLL_1350;
+ break;
+ case 270000:
+ pll_id = DPLL_ID_LCPLL_2700;
+ break;
+ default:
+ MISSING_CASE(clock / 2);
+ return NULL;
+ }
+
+ pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+
+ if (!pll)
+ return NULL;
+
+ return pll;
+}
+
+static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
+{
+ int link_clock = 0;
+
+ switch (pll->info->id) {
+ case DPLL_ID_LCPLL_810:
+ link_clock = 81000;
+ break;
+ case DPLL_ID_LCPLL_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_ID_LCPLL_2700:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "bad port clock sel\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
+static int
+hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
+ return -EINVAL;
+
+ crtc_state->dpll_hw_state.spll =
+ SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
+
+ return 0;
+}
+
+static struct intel_shared_dpll *
+hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SPLL));
+}
+
+static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
+{
+ int link_clock = 0;
+
+ switch (pll_state->spll & SPLL_FREQ_MASK) {
+ case SPLL_FREQ_810MHz:
+ link_clock = 81000;
+ break;
+ case SPLL_FREQ_1350MHz:
+ link_clock = 135000;
+ break;
+ case SPLL_FREQ_2700MHz:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "bad spll freq\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
+static int hsw_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return hsw_ddi_wrpll_compute_dpll(state, crtc);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ return hsw_ddi_lcpll_compute_dpll(crtc_state);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ return hsw_ddi_spll_compute_dpll(state, crtc);
+ else
+ return -EINVAL;
+}
+
+static int hsw_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_shared_dpll *pll = NULL;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ pll = hsw_ddi_wrpll_get_dpll(state, crtc);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ pll = hsw_ddi_lcpll_get_dpll(crtc_state);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ pll = hsw_ddi_spll_get_dpll(state, crtc);
+
+ if (!pll)
+ return -EINVAL;
+
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
+
+ crtc_state->shared_dpll = pll;
+
+ return 0;
+}
+
+static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ i915->display.dpll.ref_clks.ssc = 135000;
+ /* Non-SSC is only used on non-ULT HSW. */
+ if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
+ i915->display.dpll.ref_clks.nssc = 24000;
+ else
+ i915->display.dpll.ref_clks.nssc = 135000;
+}
+
+static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
+ const struct intel_dpll_hw_state *hw_state)
+{
+ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
+ hw_state->wrpll, hw_state->spll);
+}
+
+static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
+ .enable = hsw_ddi_wrpll_enable,
+ .disable = hsw_ddi_wrpll_disable,
+ .get_hw_state = hsw_ddi_wrpll_get_hw_state,
+ .get_freq = hsw_ddi_wrpll_get_freq,
+};
+
+static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
+ .enable = hsw_ddi_spll_enable,
+ .disable = hsw_ddi_spll_disable,
+ .get_hw_state = hsw_ddi_spll_get_hw_state,
+ .get_freq = hsw_ddi_spll_get_freq,
+};
+
+static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+}
+
+static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+}
+
+static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ return true;
+}
+
+static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
+ .enable = hsw_ddi_lcpll_enable,
+ .disable = hsw_ddi_lcpll_disable,
+ .get_hw_state = hsw_ddi_lcpll_get_hw_state,
+ .get_freq = hsw_ddi_lcpll_get_freq,
+};
+
+static const struct dpll_info hsw_plls[] = {
+ { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
+ { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
+ { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
+ { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
+ { },
+};
+
+static const struct intel_dpll_mgr hsw_pll_mgr = {
+ .dpll_info = hsw_plls,
+ .compute_dplls = hsw_compute_dpll,
+ .get_dplls = hsw_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .update_ref_clks = hsw_update_dpll_ref_clks,
+ .dump_hw_state = hsw_dump_hw_state,
+};
+
+struct skl_dpll_regs {
+ i915_reg_t ctl, cfgcr1, cfgcr2;
+};
+
+/* this array is indexed by the *shared* pll id */
+static const struct skl_dpll_regs skl_dpll_regs[4] = {
+ {
+ /* DPLL 0 */
+ .ctl = LCPLL1_CTL,
+ /* DPLL 0 doesn't support HDMI mode */
+ },
+ {
+ /* DPLL 1 */
+ .ctl = LCPLL2_CTL,
+ .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
+ .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
+ },
+ {
+ /* DPLL 2 */
+ .ctl = WRPLL_CTL(0),
+ .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
+ .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
+ },
+ {
+ /* DPLL 3 */
+ .ctl = WRPLL_CTL(1),
+ .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
+ .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
+ },
+};
+
+static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ u32 val;
+
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
+
+ val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
+ DPLL_CTRL1_SSC(id) |
+ DPLL_CTRL1_LINK_RATE_MASK(id));
+ val |= pll->state.hw_state.ctrl1 << (id * 6);
+
+ intel_de_write(dev_priv, DPLL_CTRL1, val);
+ intel_de_posting_read(dev_priv, DPLL_CTRL1);
+}
+
+static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+ const enum intel_dpll_id id = pll->info->id;
+
+ skl_ddi_pll_write_ctrl1(dev_priv, pll);
+
+ intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
+ intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
+ intel_de_posting_read(dev_priv, regs[id].cfgcr1);
+ intel_de_posting_read(dev_priv, regs[id].cfgcr2);
+
+ /* the enable bit is always bit 31 */
+ intel_de_write(dev_priv, regs[id].ctl,
+ intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
+
+ if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
+ drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
+}
+
+static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ skl_ddi_pll_write_ctrl1(dev_priv, pll);
+}
+
+static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+ const enum intel_dpll_id id = pll->info->id;
+
+ /* the enable bit is always bit 31 */
+ intel_de_write(dev_priv, regs[id].ctl,
+ intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
+ intel_de_posting_read(dev_priv, regs[id].ctl);
+}
+
+static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+}
+
+static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ u32 val;
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+ const enum intel_dpll_id id = pll->info->id;
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_DISPLAY_CORE);
+ if (!wakeref)
+ return false;
+
+ ret = false;
+
+ val = intel_de_read(dev_priv, regs[id].ctl);
+ if (!(val & LCPLL_PLL_ENABLE))
+ goto out;
+
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
+ hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
+
+ /* avoid reading back stale values if HDMI mode is not enabled */
+ if (val & DPLL_CTRL1_HDMI_MODE(id)) {
+ hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
+ hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
+ }
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+ return ret;
+}
+
+static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+ const enum intel_dpll_id id = pll->info->id;
+ intel_wakeref_t wakeref;
+ u32 val;
+ bool ret;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_DISPLAY_CORE);
+ if (!wakeref)
+ return false;
+
+ ret = false;
+
+ /* DPLL0 is always enabled since it drives CDCLK */
+ val = intel_de_read(dev_priv, regs[id].ctl);
+ if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
+ goto out;
+
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
+ hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
+
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+ return ret;
+}
+
+struct skl_wrpll_context {
+ u64 min_deviation; /* current minimal deviation */
+ u64 central_freq; /* chosen central freq */
+ u64 dco_freq; /* chosen dco freq */
+ unsigned int p; /* chosen divider */
+};
+
+/* DCO freq must be within +1%/-6% of the DCO central freq */
+#define SKL_DCO_MAX_PDEVIATION 100
+#define SKL_DCO_MAX_NDEVIATION 600
+
+static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
+ u64 central_freq,
+ u64 dco_freq,
+ unsigned int divider)
+{
+ u64 deviation;
+
+ deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
+ central_freq);
+
+ /* positive deviation */
+ if (dco_freq >= central_freq) {
+ if (deviation < SKL_DCO_MAX_PDEVIATION &&
+ deviation < ctx->min_deviation) {
+ ctx->min_deviation = deviation;
+ ctx->central_freq = central_freq;
+ ctx->dco_freq = dco_freq;
+ ctx->p = divider;
+ }
+ /* negative deviation */
+ } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
+ deviation < ctx->min_deviation) {
+ ctx->min_deviation = deviation;
+ ctx->central_freq = central_freq;
+ ctx->dco_freq = dco_freq;
+ ctx->p = divider;
+ }
+}
+
+static void skl_wrpll_get_multipliers(unsigned int p,
+ unsigned int *p0 /* out */,
+ unsigned int *p1 /* out */,
+ unsigned int *p2 /* out */)
+{
+ /* even dividers */
+ if (p % 2 == 0) {
+ unsigned int half = p / 2;
+
+ if (half == 1 || half == 2 || half == 3 || half == 5) {
+ *p0 = 2;
+ *p1 = 1;
+ *p2 = half;
+ } else if (half % 2 == 0) {
+ *p0 = 2;
+ *p1 = half / 2;
+ *p2 = 2;
+ } else if (half % 3 == 0) {
+ *p0 = 3;
+ *p1 = half / 3;
+ *p2 = 2;
+ } else if (half % 7 == 0) {
+ *p0 = 7;
+ *p1 = half / 7;
+ *p2 = 2;
+ }
+ } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
+ *p0 = 3;
+ *p1 = 1;
+ *p2 = p / 3;
+ } else if (p == 5 || p == 7) {
+ *p0 = p;
+ *p1 = 1;
+ *p2 = 1;
+ } else if (p == 15) {
+ *p0 = 3;
+ *p1 = 1;
+ *p2 = 5;
+ } else if (p == 21) {
+ *p0 = 7;
+ *p1 = 1;
+ *p2 = 3;
+ } else if (p == 35) {
+ *p0 = 7;
+ *p1 = 1;
+ *p2 = 5;
+ }
+}
+
+struct skl_wrpll_params {
+ u32 dco_fraction;
+ u32 dco_integer;
+ u32 qdiv_ratio;
+ u32 qdiv_mode;
+ u32 kdiv;
+ u32 pdiv;
+ u32 central_freq;
+};
+
+static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
+ u64 afe_clock,
+ int ref_clock,
+ u64 central_freq,
+ u32 p0, u32 p1, u32 p2)
+{
+ u64 dco_freq;
+
+ switch (central_freq) {
+ case 9600000000ULL:
+ params->central_freq = 0;
+ break;
+ case 9000000000ULL:
+ params->central_freq = 1;
+ break;
+ case 8400000000ULL:
+ params->central_freq = 3;
+ }
+
+ switch (p0) {
+ case 1:
+ params->pdiv = 0;
+ break;
+ case 2:
+ params->pdiv = 1;
+ break;
+ case 3:
+ params->pdiv = 2;
+ break;
+ case 7:
+ params->pdiv = 4;
+ break;
+ default:
+ WARN(1, "Incorrect PDiv\n");
+ }
+
+ switch (p2) {
+ case 5:
+ params->kdiv = 0;
+ break;
+ case 2:
+ params->kdiv = 1;
+ break;
+ case 3:
+ params->kdiv = 2;
+ break;
+ case 1:
+ params->kdiv = 3;
+ break;
+ default:
+ WARN(1, "Incorrect KDiv\n");
+ }
+
+ params->qdiv_ratio = p1;
+ params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
+
+ dco_freq = p0 * p1 * p2 * afe_clock;
+
+ /*
+ * Intermediate values are in Hz.
+ * Divide by MHz to match bsepc
+ */
+ params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
+ params->dco_fraction =
+ div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
+ params->dco_integer * MHz(1)) * 0x8000, MHz(1));
+}
+
+static int
+skl_ddi_calculate_wrpll(int clock /* in Hz */,
+ int ref_clock,
+ struct skl_wrpll_params *wrpll_params)
+{
+ static const u64 dco_central_freq[3] = { 8400000000ULL,
+ 9000000000ULL,
+ 9600000000ULL };
+ static const u8 even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
+ 24, 28, 30, 32, 36, 40, 42, 44,
+ 48, 52, 54, 56, 60, 64, 66, 68,
+ 70, 72, 76, 78, 80, 84, 88, 90,
+ 92, 96, 98 };
+ static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
+ static const struct {
+ const u8 *list;
+ int n_dividers;
+ } dividers[] = {
+ { even_dividers, ARRAY_SIZE(even_dividers) },
+ { odd_dividers, ARRAY_SIZE(odd_dividers) },
+ };
+ struct skl_wrpll_context ctx = {
+ .min_deviation = U64_MAX,
+ };
+ unsigned int dco, d, i;
+ unsigned int p0, p1, p2;
+ u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+
+ for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+ for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
+ for (i = 0; i < dividers[d].n_dividers; i++) {
+ unsigned int p = dividers[d].list[i];
+ u64 dco_freq = p * afe_clock;
+
+ skl_wrpll_try_divider(&ctx,
+ dco_central_freq[dco],
+ dco_freq,
+ p);
+ /*
+ * Skip the remaining dividers if we're sure to
+ * have found the definitive divider, we can't
+ * improve a 0 deviation.
+ */
+ if (ctx.min_deviation == 0)
+ goto skip_remaining_dividers;
+ }
+ }
+
+skip_remaining_dividers:
+ /*
+ * If a solution is found with an even divider, prefer
+ * this one.
+ */
+ if (d == 0 && ctx.p)
+ break;
+ }
+
+ if (!ctx.p)
+ return -EINVAL;
+
+ /*
+ * gcc incorrectly analyses that these can be used without being
+ * initialized. To be fair, it's hard to guess.
+ */
+ p0 = p1 = p2 = 0;
+ skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
+ skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
+ ctx.central_freq, p0, p1, p2);
+
+ return 0;
+}
+
+static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
+{
+ int ref_clock = i915->display.dpll.ref_clks.nssc;
+ u32 p0, p1, p2, dco_freq;
+
+ p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
+ p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
+
+ if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR2_PDIV_1:
+ p0 = 1;
+ break;
+ case DPLL_CFGCR2_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR2_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR2_PDIV_7_INVALID:
+ /*
+ * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
+ * handling it the same way as PDIV_7.
+ */
+ drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
+ fallthrough;
+ case DPLL_CFGCR2_PDIV_7:
+ p0 = 7;
+ break;
+ default:
+ MISSING_CASE(p0);
+ return 0;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR2_KDIV_5:
+ p2 = 5;
+ break;
+ case DPLL_CFGCR2_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR2_KDIV_3:
+ p2 = 3;
+ break;
+ case DPLL_CFGCR2_KDIV_1:
+ p2 = 1;
+ break;
+ default:
+ MISSING_CASE(p2);
+ return 0;
+ }
+
+ dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
+ ref_clock;
+
+ dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
+ ref_clock / 0x8000;
+
+ if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
+static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct skl_wrpll_params wrpll_params = {};
+ u32 ctrl1, cfgcr1, cfgcr2;
+ int ret;
+
+ /*
+ * See comment in intel_dpll_hw_state to understand why we always use 0
+ * as the DPLL id in this function.
+ */
+ ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+
+ ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+
+ ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+ i915->display.dpll.ref_clks.nssc, &wrpll_params);
+ if (ret)
+ return ret;
+
+ cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+ DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+ wrpll_params.dco_integer;
+
+ cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+ DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+ DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+ DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+ wrpll_params.central_freq;
+
+ crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+ crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+ crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
+
+ crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
+ &crtc_state->dpll_hw_state);
+
+ return 0;
+}
+
+static int
+skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
+{
+ u32 ctrl1;
+
+ /*
+ * See comment in intel_dpll_hw_state to understand why we always use 0
+ * as the DPLL id in this function.
+ */
+ ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+ switch (crtc_state->port_clock / 2) {
+ case 81000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
+ break;
+ case 135000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
+ break;
+ case 270000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
+ break;
+ /* eDP 1.4 rates */
+ case 162000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
+ break;
+ case 108000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
+ break;
+ case 216000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
+ break;
+ }
+
+ crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+
+ return 0;
+}
+
+static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
+{
+ int link_clock = 0;
+
+ switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
+ DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
+ case DPLL_CTRL1_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
+static int skl_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return skl_ddi_hdmi_pll_dividers(crtc_state);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ return skl_ddi_dp_set_dpll_hw_state(crtc_state);
+ else
+ return -EINVAL;
+}
+
+static int skl_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_shared_dpll *pll;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SKL_DPLL0));
+ else
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SKL_DPLL3) |
+ BIT(DPLL_ID_SKL_DPLL2) |
+ BIT(DPLL_ID_SKL_DPLL1));
+ if (!pll)
+ return -EINVAL;
+
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
+
+ crtc_state->shared_dpll = pll;
+
+ return 0;
+}
+
+static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
+{
+ /*
+ * ctrl1 register is already shifted for each pll, just use 0 to get
+ * the internal shift for each field
+ */
+ if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
+ return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
+ else
+ return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
+}
+
+static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC ref */
+ i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
+}
+
+static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
+ const struct intel_dpll_hw_state *hw_state)
+{
+ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
+ "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
+ hw_state->ctrl1,
+ hw_state->cfgcr1,
+ hw_state->cfgcr2);
+}
+
+static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
+ .enable = skl_ddi_pll_enable,
+ .disable = skl_ddi_pll_disable,
+ .get_hw_state = skl_ddi_pll_get_hw_state,
+ .get_freq = skl_ddi_pll_get_freq,
+};
+
+static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
+ .enable = skl_ddi_dpll0_enable,
+ .disable = skl_ddi_dpll0_disable,
+ .get_hw_state = skl_ddi_dpll0_get_hw_state,
+ .get_freq = skl_ddi_pll_get_freq,
+};
+
+static const struct dpll_info skl_plls[] = {
+ { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
+ { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
+ { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
+ { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr skl_pll_mgr = {
+ .dpll_info = skl_plls,
+ .compute_dplls = skl_compute_dpll,
+ .get_dplls = skl_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .update_ref_clks = skl_update_dpll_ref_clks,
+ .dump_hw_state = skl_dump_hw_state,
+};
+
+static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ u32 temp;
+ enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+
+ bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+
+ /* Non-SSC reference */
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
+ temp |= PORT_PLL_REF_SEL;
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
+ temp |= PORT_PLL_POWER_ENABLE;
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+
+ if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
+ PORT_PLL_POWER_STATE), 200))
+ drm_err(&dev_priv->drm,
+ "Power state not set for PLL:%d\n", port);
+ }
+
+ /* Disable 10 bit clock */
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
+ temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
+
+ /* Write P1 & P2 */
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
+ temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
+ temp |= pll->state.hw_state.ebb0;
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
+
+ /* Write M2 integer */
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
+ temp &= ~PORT_PLL_M2_INT_MASK;
+ temp |= pll->state.hw_state.pll0;
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
+
+ /* Write N */
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
+ temp &= ~PORT_PLL_N_MASK;
+ temp |= pll->state.hw_state.pll1;
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
+
+ /* Write M2 fraction */
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
+ temp &= ~PORT_PLL_M2_FRAC_MASK;
+ temp |= pll->state.hw_state.pll2;
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
+
+ /* Write M2 fraction enable */
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
+ temp &= ~PORT_PLL_M2_FRAC_ENABLE;
+ temp |= pll->state.hw_state.pll3;
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
+
+ /* Write coeff */
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
+ temp &= ~PORT_PLL_PROP_COEFF_MASK;
+ temp &= ~PORT_PLL_INT_COEFF_MASK;
+ temp &= ~PORT_PLL_GAIN_CTL_MASK;
+ temp |= pll->state.hw_state.pll6;
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
+
+ /* Write calibration val */
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
+ temp &= ~PORT_PLL_TARGET_CNT_MASK;
+ temp |= pll->state.hw_state.pll8;
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
+
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
+ temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
+ temp |= pll->state.hw_state.pll9;
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
+
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
+ temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
+ temp &= ~PORT_PLL_DCO_AMP_MASK;
+ temp |= pll->state.hw_state.pll10;
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
+
+ /* Recalibrate with new settings */
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
+ temp |= PORT_PLL_RECALIBRATE;
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+ temp |= pll->state.hw_state.ebb4;
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
+
+ /* Enable PLL */
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
+ temp |= PORT_PLL_ENABLE;
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
+
+ if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
+ 200))
+ drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
+ temp |= DCC_DELAY_RANGE_2;
+ intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
+ }
+
+ /*
+ * While we write to the group register to program all lanes at once we
+ * can read only lane registers and we pick lanes 0/1 for that.
+ */
+ temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
+ temp &= ~LANE_STAGGER_MASK;
+ temp &= ~LANESTAGGER_STRAP_OVRD;
+ temp |= pll->state.hw_state.pcsdw12;
+ intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
+}
+
+static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
+ temp &= ~PORT_PLL_ENABLE;
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
+ temp &= ~PORT_PLL_POWER_ENABLE;
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+
+ if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
+ PORT_PLL_POWER_STATE), 200))
+ drm_err(&dev_priv->drm,
+ "Power state not reset for PLL:%d\n", port);
+ }
+}
+
+static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
+ intel_wakeref_t wakeref;
+ enum dpio_phy phy;
+ enum dpio_channel ch;
+ u32 val;
+ bool ret;
+
+ bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_DISPLAY_CORE);
+ if (!wakeref)
+ return false;
+
+ ret = false;
+
+ val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
+ if (!(val & PORT_PLL_ENABLE))
+ goto out;
+
+ hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
+ hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
+
+ hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
+ hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
+
+ hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
+ hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
+
+ hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
+ hw_state->pll1 &= PORT_PLL_N_MASK;
+
+ hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
+ hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
+
+ hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
+ hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
+
+ hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
+ hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
+ PORT_PLL_INT_COEFF_MASK |
+ PORT_PLL_GAIN_CTL_MASK;
+
+ hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
+ hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
+
+ hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
+ hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
+
+ hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
+ hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
+ PORT_PLL_DCO_AMP_MASK;
+
+ /*
+ * While we write to the group register to program all lanes at once we
+ * can read only lane registers. We configure all lanes the same way, so
+ * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
+ */
+ hw_state->pcsdw12 = intel_de_read(dev_priv,
+ BXT_PORT_PCS_DW12_LN01(phy, ch));
+ if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
+ drm_dbg(&dev_priv->drm,
+ "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
+ hw_state->pcsdw12,
+ intel_de_read(dev_priv,
+ BXT_PORT_PCS_DW12_LN23(phy, ch)));
+ hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
+
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+ return ret;
+}
+
+/* pre-calculated values for DP linkrates */
+static const struct dpll bxt_dp_clk_val[] = {
+ /* m2 is .22 binary fixed point */
+ { .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
+ { .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
+ { .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
+ { .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
+ { .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
+ { .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
+ { .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
+};
+
+static int
+bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
+ struct dpll *clk_div)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ /* Calculate HDMI div */
+ /*
+ * FIXME: tie the following calculation into
+ * i9xx_crtc_compute_clock
+ */
+ if (!bxt_find_best_dpll(crtc_state, clk_div))
+ return -EINVAL;
+
+ drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
+
+ return 0;
+}
+
+static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
+ struct dpll *clk_div)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ int i;
+
+ *clk_div = bxt_dp_clk_val[0];
+ for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+ if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
+ *clk_div = bxt_dp_clk_val[i];
+ break;
+ }
+ }
+
+ chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
+
+ drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
+ clk_div->dot != crtc_state->port_clock);
+}
+
+static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
+ const struct dpll *clk_div)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
+ int clock = crtc_state->port_clock;
+ int vco = clk_div->vco;
+ u32 prop_coef, int_coef, gain_ctl, targ_cnt;
+ u32 lanestagger;
+
+ if (vco >= 6200000 && vco <= 6700000) {
+ prop_coef = 4;
+ int_coef = 9;
+ gain_ctl = 3;
+ targ_cnt = 8;
+ } else if ((vco > 5400000 && vco < 6200000) ||
+ (vco >= 4800000 && vco < 5400000)) {
+ prop_coef = 5;
+ int_coef = 11;
+ gain_ctl = 3;
+ targ_cnt = 9;
+ } else if (vco == 5400000) {
+ prop_coef = 3;
+ int_coef = 8;
+ gain_ctl = 1;
+ targ_cnt = 9;
+ } else {
+ drm_err(&i915->drm, "Invalid VCO\n");
+ return -EINVAL;
+ }
+
+ if (clock > 270000)
+ lanestagger = 0x18;
+ else if (clock > 135000)
+ lanestagger = 0x0d;
+ else if (clock > 67000)
+ lanestagger = 0x07;
+ else if (clock > 33000)
+ lanestagger = 0x04;
+ else
+ lanestagger = 0x02;
+
+ dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
+ dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
+ dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
+ dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
+
+ if (clk_div->m2 & 0x3fffff)
+ dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
+
+ dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
+ PORT_PLL_INT_COEFF(int_coef) |
+ PORT_PLL_GAIN_CTL(gain_ctl);
+
+ dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
+
+ dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
+
+ dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
+ PORT_PLL_DCO_AMP_OVR_EN_H;
+
+ dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
+
+ dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
+
+ return 0;
+}
+
+static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
+{
+ struct dpll clock;
+
+ clock.m1 = 2;
+ clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
+ if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
+ clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
+ clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
+ clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
+
+ return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
+}
+
+static int
+bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
+{
+ struct dpll clk_div = {};
+
+ bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
+
+ return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
+}
+
+static int
+bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct dpll clk_div = {};
+ int ret;
+
+ bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
+
+ ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
+ if (ret)
+ return ret;
+
+ crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
+ &crtc_state->dpll_hw_state);
+
+ return 0;
+}
+
+static int bxt_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
+ else
+ return -EINVAL;
+}
+
+static int bxt_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id id;
+
+ /* 1:1 mapping between ports and PLLs */
+ id = (enum intel_dpll_id) encoder->port;
+ pll = intel_get_shared_dpll_by_id(dev_priv, id);
+
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
+ crtc->base.base.id, crtc->base.name, pll->info->name);
+
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
+
+ crtc_state->shared_dpll = pll;
+
+ return 0;
+}
+
+static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ i915->display.dpll.ref_clks.ssc = 100000;
+ i915->display.dpll.ref_clks.nssc = 100000;
+ /* DSI non-SSC ref 19.2MHz */
+}
+
+static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
+ const struct intel_dpll_hw_state *hw_state)
+{
+ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+ "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
+ "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
+ hw_state->ebb0,
+ hw_state->ebb4,
+ hw_state->pll0,
+ hw_state->pll1,
+ hw_state->pll2,
+ hw_state->pll3,
+ hw_state->pll6,
+ hw_state->pll8,
+ hw_state->pll9,
+ hw_state->pll10,
+ hw_state->pcsdw12);
+}
+
+static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
+ .enable = bxt_ddi_pll_enable,
+ .disable = bxt_ddi_pll_disable,
+ .get_hw_state = bxt_ddi_pll_get_hw_state,
+ .get_freq = bxt_ddi_pll_get_freq,
+};
+
+static const struct dpll_info bxt_plls[] = {
+ { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
+ { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
+ { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr bxt_pll_mgr = {
+ .dpll_info = bxt_plls,
+ .compute_dplls = bxt_compute_dpll,
+ .get_dplls = bxt_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .update_ref_clks = bxt_update_dpll_ref_clks,
+ .dump_hw_state = bxt_dump_hw_state,
+};
+
+static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
+ int *qdiv, int *kdiv)
+{
+ /* even dividers */
+ if (bestdiv % 2 == 0) {
+ if (bestdiv == 2) {
+ *pdiv = 2;
+ *qdiv = 1;
+ *kdiv = 1;
+ } else if (bestdiv % 4 == 0) {
+ *pdiv = 2;
+ *qdiv = bestdiv / 4;
+ *kdiv = 2;
+ } else if (bestdiv % 6 == 0) {
+ *pdiv = 3;
+ *qdiv = bestdiv / 6;
+ *kdiv = 2;
+ } else if (bestdiv % 5 == 0) {
+ *pdiv = 5;
+ *qdiv = bestdiv / 10;
+ *kdiv = 2;
+ } else if (bestdiv % 14 == 0) {
+ *pdiv = 7;
+ *qdiv = bestdiv / 14;
+ *kdiv = 2;
+ }
+ } else {
+ if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
+ *pdiv = bestdiv;
+ *qdiv = 1;
+ *kdiv = 1;
+ } else { /* 9, 15, 21 */
+ *pdiv = bestdiv / 3;
+ *qdiv = 1;
+ *kdiv = 3;
+ }
+ }
+}
+
+static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
+ u32 dco_freq, u32 ref_freq,
+ int pdiv, int qdiv, int kdiv)
+{
+ u32 dco;
+
+ switch (kdiv) {
+ case 1:
+ params->kdiv = 1;
+ break;
+ case 2:
+ params->kdiv = 2;
+ break;
+ case 3:
+ params->kdiv = 4;
+ break;
+ default:
+ WARN(1, "Incorrect KDiv\n");
+ }
+
+ switch (pdiv) {
+ case 2:
+ params->pdiv = 1;
+ break;
+ case 3:
+ params->pdiv = 2;
+ break;
+ case 5:
+ params->pdiv = 4;
+ break;
+ case 7:
+ params->pdiv = 8;
+ break;
+ default:
+ WARN(1, "Incorrect PDiv\n");
+ }
+
+ WARN_ON(kdiv != 2 && qdiv != 1);
+
+ params->qdiv_ratio = qdiv;
+ params->qdiv_mode = (qdiv == 1) ? 0 : 1;
+
+ dco = div_u64((u64)dco_freq << 15, ref_freq);
+
+ params->dco_integer = dco >> 15;
+ params->dco_fraction = dco & 0x7fff;
+}
+
+/*
+ * Display WA #22010492432: ehl, tgl, adl-s, adl-p
+ * Program half of the nominal DCO divider fraction value.
+ */
+static bool
+ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
+{
+ return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
+ IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
+ IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
+ i915->display.dpll.ref_clks.nssc == 38400;
+}
+
+struct icl_combo_pll_params {
+ int clock;
+ struct skl_wrpll_params wrpll;
+};
+
+/*
+ * These values alrea already adjusted: they're the bits we write to the
+ * registers, not the logical values.
+ */
+static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
+ { 540000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 270000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 162000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 324000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 216000,
+ { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
+ { 432000,
+ { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 648000,
+ { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 810000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+};
+
+
+/* Also used for 38.4 MHz values. */
+static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
+ { 540000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 270000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 162000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 324000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 216000,
+ { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
+ { 432000,
+ { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 648000,
+ { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 810000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+};
+
+static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
+ .dco_integer = 0x151, .dco_fraction = 0x4000,
+ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
+static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
+ .dco_integer = 0x1A5, .dco_fraction = 0x7000,
+ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
+static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
+ .dco_integer = 0x54, .dco_fraction = 0x3000,
+ /* the following params are unused */
+ .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
+static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
+ .dco_integer = 0x43, .dco_fraction = 0x4000,
+ /* the following params are unused */
+};
+
+static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *pll_params)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ const struct icl_combo_pll_params *params =
+ dev_priv->display.dpll.ref_clks.nssc == 24000 ?
+ icl_dp_combo_pll_24MHz_values :
+ icl_dp_combo_pll_19_2MHz_values;
+ int clock = crtc_state->port_clock;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
+ if (clock == params[i].clock) {
+ *pll_params = params[i].wrpll;
+ return 0;
+ }
+ }
+
+ MISSING_CASE(clock);
+ return -EINVAL;
+}
+
+static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *pll_params)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ switch (dev_priv->display.dpll.ref_clks.nssc) {
+ default:
+ MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
+ fallthrough;
+ case 19200:
+ case 38400:
+ *pll_params = tgl_tbt_pll_19_2MHz_values;
+ break;
+ case 24000:
+ *pll_params = tgl_tbt_pll_24MHz_values;
+ break;
+ }
+ } else {
+ switch (dev_priv->display.dpll.ref_clks.nssc) {
+ default:
+ MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
+ fallthrough;
+ case 19200:
+ case 38400:
+ *pll_params = icl_tbt_pll_19_2MHz_values;
+ break;
+ case 24000:
+ *pll_params = icl_tbt_pll_24MHz_values;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
+{
+ /*
+ * The PLL outputs multiple frequencies at the same time, selection is
+ * made at DDI clock mux level.
+ */
+ drm_WARN_ON(&i915->drm, 1);
+
+ return 0;
+}
+
+static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
+{
+ int ref_clock = i915->display.dpll.ref_clks.nssc;
+
+ /*
+ * For ICL+, the spec states: if reference frequency is 38.4,
+ * use 19.2 because the DPLL automatically divides that by 2.
+ */
+ if (ref_clock == 38400)
+ ref_clock = 19200;
+
+ return ref_clock;
+}
+
+static int
+icl_calc_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ int ref_clock = icl_wrpll_ref_clock(i915);
+ u32 afe_clock = crtc_state->port_clock * 5;
+ u32 dco_min = 7998000;
+ u32 dco_max = 10000000;
+ u32 dco_mid = (dco_min + dco_max) / 2;
+ static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
+ 18, 20, 24, 28, 30, 32, 36, 40,
+ 42, 44, 48, 50, 52, 54, 56, 60,
+ 64, 66, 68, 70, 72, 76, 78, 80,
+ 84, 88, 90, 92, 96, 98, 100, 102,
+ 3, 5, 7, 9, 15, 21 };
+ u32 dco, best_dco = 0, dco_centrality = 0;
+ u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
+ int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
+
+ for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+ dco = afe_clock * dividers[d];
+
+ if (dco <= dco_max && dco >= dco_min) {
+ dco_centrality = abs(dco - dco_mid);
+
+ if (dco_centrality < best_dco_centrality) {
+ best_dco_centrality = dco_centrality;
+ best_div = dividers[d];
+ best_dco = dco;
+ }
+ }
+ }
+
+ if (best_div == 0)
+ return -EINVAL;
+
+ icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
+ icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
+ pdiv, qdiv, kdiv);
+
+ return 0;
+}
+
+static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
+{
+ int ref_clock = icl_wrpll_ref_clock(i915);
+ u32 dco_fraction;
+ u32 p0, p1, p2, dco_freq;
+
+ p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+ p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+
+ if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+ else
+ p1 = 1;
+
+ switch (p0) {
+ case DPLL_CFGCR1_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR1_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR1_PDIV_5:
+ p0 = 5;
+ break;
+ case DPLL_CFGCR1_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR1_KDIV_1:
+ p2 = 1;
+ break;
+ case DPLL_CFGCR1_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR1_KDIV_3:
+ p2 = 3;
+ break;
+ }
+
+ dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
+ ref_clock;
+
+ dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ DPLL_CFGCR0_DCO_FRACTION_SHIFT;
+
+ if (ehl_combo_pll_div_frac_wa_needed(i915))
+ dco_fraction *= 2;
+
+ dco_freq += (dco_fraction * ref_clock) / 0x8000;
+
+ if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
+static void icl_calc_dpll_state(struct drm_i915_private *i915,
+ const struct skl_wrpll_params *pll_params,
+ struct intel_dpll_hw_state *pll_state)
+{
+ u32 dco_fraction = pll_params->dco_fraction;
+
+ if (ehl_combo_pll_div_frac_wa_needed(i915))
+ dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
+
+ pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
+ pll_params->dco_integer;
+
+ pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
+ DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
+ DPLL_CFGCR1_KDIV(pll_params->kdiv) |
+ DPLL_CFGCR1_PDIV(pll_params->pdiv);
+
+ if (DISPLAY_VER(i915) >= 12)
+ pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
+ else
+ pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
+
+ if (i915->display.vbt.override_afc_startup)
+ pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
+}
+
+static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
+ u32 *target_dco_khz,
+ struct intel_dpll_hw_state *state,
+ bool is_dkl)
+{
+ static const u8 div1_vals[] = { 7, 5, 3, 2 };
+ u32 dco_min_freq, dco_max_freq;
+ unsigned int i;
+ int div2;
+
+ dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
+ dco_max_freq = is_dp ? 8100000 : 10000000;
+
+ for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
+ int div1 = div1_vals[i];
+
+ for (div2 = 10; div2 > 0; div2--) {
+ int dco = div1 * div2 * clock_khz * 5;
+ int a_divratio, tlinedrv, inputsel;
+ u32 hsdiv;
+
+ if (dco < dco_min_freq || dco > dco_max_freq)
+ continue;
+
+ if (div2 >= 2) {
+ /*
+ * Note: a_divratio not matching TGL BSpec
+ * algorithm but matching hardcoded values and
+ * working on HW for DP alt-mode at least
+ */
+ a_divratio = is_dp ? 10 : 5;
+ tlinedrv = is_dkl ? 1 : 2;
+ } else {
+ a_divratio = 5;
+ tlinedrv = 0;
+ }
+ inputsel = is_dp ? 0 : 1;
+
+ switch (div1) {
+ default:
+ MISSING_CASE(div1);
+ fallthrough;
+ case 2:
+ hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
+ break;
+ case 3:
+ hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
+ break;
+ case 5:
+ hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
+ break;
+ case 7:
+ hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
+ break;
+ }
+
+ *target_dco_khz = dco;
+
+ state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
+
+ state->mg_clktop2_coreclkctl1 =
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
+
+ state->mg_clktop2_hsclkctl =
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
+ hsdiv |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * The specification for this function uses real numbers, so the math had to be
+ * adapted to integer-only calculation, that's why it looks so different.
+ */
+static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
+ struct intel_dpll_hw_state *pll_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
+ int clock = crtc_state->port_clock;
+ u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
+ u32 iref_ndiv, iref_trim, iref_pulse_w;
+ u32 prop_coeff, int_coeff;
+ u32 tdc_targetcnt, feedfwgain;
+ u64 ssc_stepsize, ssc_steplen, ssc_steplog;
+ u64 tmp;
+ bool use_ssc = false;
+ bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
+ bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
+ int ret;
+
+ ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
+ pll_state, is_dkl);
+ if (ret)
+ return ret;
+
+ m1div = 2;
+ m2div_int = dco_khz / (refclk_khz * m1div);
+ if (m2div_int > 255) {
+ if (!is_dkl) {
+ m1div = 4;
+ m2div_int = dco_khz / (refclk_khz * m1div);
+ }
+
+ if (m2div_int > 255)
+ return -EINVAL;
+ }
+ m2div_rem = dco_khz % (refclk_khz * m1div);
+
+ tmp = (u64)m2div_rem * (1 << 22);
+ do_div(tmp, refclk_khz * m1div);
+ m2div_frac = tmp;
+
+ switch (refclk_khz) {
+ case 19200:
+ iref_ndiv = 1;
+ iref_trim = 28;
+ iref_pulse_w = 1;
+ break;
+ case 24000:
+ iref_ndiv = 1;
+ iref_trim = 25;
+ iref_pulse_w = 2;
+ break;
+ case 38400:
+ iref_ndiv = 2;
+ iref_trim = 28;
+ iref_pulse_w = 1;
+ break;
+ default:
+ MISSING_CASE(refclk_khz);
+ return -EINVAL;
+ }
+
+ /*
+ * tdc_res = 0.000003
+ * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
+ *
+ * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
+ * was supposed to be a division, but we rearranged the operations of
+ * the formula to avoid early divisions so we don't multiply the
+ * rounding errors.
+ *
+ * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
+ * we also rearrange to work with integers.
+ *
+ * The 0.5 transformed to 5 results in a multiplication by 10 and the
+ * last division by 10.
+ */
+ tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
+
+ /*
+ * Here we divide dco_khz by 10 in order to allow the dividend to fit in
+ * 32 bits. That's not a problem since we round the division down
+ * anyway.
+ */
+ feedfwgain = (use_ssc || m2div_rem > 0) ?
+ m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
+
+ if (dco_khz >= 9000000) {
+ prop_coeff = 5;
+ int_coeff = 10;
+ } else {
+ prop_coeff = 4;
+ int_coeff = 8;
+ }
+
+ if (use_ssc) {
+ tmp = mul_u32_u32(dco_khz, 47 * 32);
+ do_div(tmp, refclk_khz * m1div * 10000);
+ ssc_stepsize = tmp;
+
+ tmp = mul_u32_u32(dco_khz, 1000);
+ ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
+ } else {
+ ssc_stepsize = 0;
+ ssc_steplen = 0;
+ }
+ ssc_steplog = 4;
+
+ /* write pll_state calculations */
+ if (is_dkl) {
+ pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
+ DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
+ DKL_PLL_DIV0_FBPREDIV(m1div) |
+ DKL_PLL_DIV0_FBDIV_INT(m2div_int);
+ if (dev_priv->display.vbt.override_afc_startup) {
+ u8 val = dev_priv->display.vbt.override_afc_startup_val;
+
+ pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
+ }
+
+ pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
+ DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
+
+ pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
+ DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
+ DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
+ (use_ssc ? DKL_PLL_SSC_EN : 0);
+
+ pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
+ DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
+
+ pll_state->mg_pll_tdc_coldst_bias =
+ DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
+ DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
+
+ } else {
+ pll_state->mg_pll_div0 =
+ (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
+ MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
+ MG_PLL_DIV0_FBDIV_INT(m2div_int);
+
+ pll_state->mg_pll_div1 =
+ MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
+ MG_PLL_DIV1_DITHER_DIV_2 |
+ MG_PLL_DIV1_NDIVRATIO(1) |
+ MG_PLL_DIV1_FBPREDIV(m1div);
+
+ pll_state->mg_pll_lf =
+ MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
+ MG_PLL_LF_AFCCNTSEL_512 |
+ MG_PLL_LF_GAINCTRL(1) |
+ MG_PLL_LF_INT_COEFF(int_coeff) |
+ MG_PLL_LF_PROP_COEFF(prop_coeff);
+
+ pll_state->mg_pll_frac_lock =
+ MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
+ MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
+ MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
+ MG_PLL_FRAC_LOCK_DCODITHEREN |
+ MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
+ if (use_ssc || m2div_rem > 0)
+ pll_state->mg_pll_frac_lock |=
+ MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
+
+ pll_state->mg_pll_ssc =
+ (use_ssc ? MG_PLL_SSC_EN : 0) |
+ MG_PLL_SSC_TYPE(2) |
+ MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
+ MG_PLL_SSC_STEPNUM(ssc_steplog) |
+ MG_PLL_SSC_FLLEN |
+ MG_PLL_SSC_STEPSIZE(ssc_stepsize);
+
+ pll_state->mg_pll_tdc_coldst_bias =
+ MG_PLL_TDC_COLDST_COLDSTART |
+ MG_PLL_TDC_COLDST_IREFINT_EN |
+ MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
+ MG_PLL_TDC_TDCOVCCORR_EN |
+ MG_PLL_TDC_TDCSEL(3);
+
+ pll_state->mg_pll_bias =
+ MG_PLL_BIAS_BIAS_GB_SEL(3) |
+ MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
+ MG_PLL_BIAS_BIAS_BONUS(10) |
+ MG_PLL_BIAS_BIASCAL_EN |
+ MG_PLL_BIAS_CTRIM(12) |
+ MG_PLL_BIAS_VREF_RDAC(4) |
+ MG_PLL_BIAS_IREFTRIM(iref_trim);
+
+ if (refclk_khz == 38400) {
+ pll_state->mg_pll_tdc_coldst_bias_mask =
+ MG_PLL_TDC_COLDST_COLDSTART;
+ pll_state->mg_pll_bias_mask = 0;
+ } else {
+ pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ pll_state->mg_pll_bias_mask = -1U;
+ }
+
+ pll_state->mg_pll_tdc_coldst_bias &=
+ pll_state->mg_pll_tdc_coldst_bias_mask;
+ pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+ }
+
+ return 0;
+}
+
+static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
+{
+ u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
+ u64 tmp;
+
+ ref_clock = dev_priv->display.dpll.ref_clks.nssc;
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
+ m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
+ m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
+ m2_frac = pll_state->mg_pll_bias &
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ } else {
+ m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
+ m2_frac = pll_state->mg_pll_div0 &
+ MG_PLL_DIV0_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ }
+
+ switch (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
+ div1 = 2;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
+ div1 = 3;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
+ div1 = 5;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
+ div1 = 7;
+ break;
+ default:
+ MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
+ return 0;
+ }
+
+ div2 = (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
+
+ /* div2 value of 0 is same as 1 means no div */
+ if (div2 == 0)
+ div2 = 1;
+
+ /*
+ * Adjust the original formula to delay the division by 2^22 in order to
+ * minimize possible rounding errors.
+ */
+ tmp = (u64)m1 * m2_int * ref_clock +
+ (((u64)m1 * m2_frac * ref_clock) >> 22);
+ tmp = div_u64(tmp, 5 * div1 * div2);
+
+ return tmp;
+}
+
+/**
+ * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
+ * @crtc_state: state for the CRTC to select the DPLL for
+ * @port_dpll_id: the active @port_dpll_id to select
+ *
+ * Select the given @port_dpll_id instance from the DPLLs reserved for the
+ * CRTC.
+ */
+void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
+ enum icl_port_dpll_id port_dpll_id)
+{
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[port_dpll_id];
+
+ crtc_state->shared_dpll = port_dpll->pll;
+ crtc_state->dpll_hw_state = port_dpll->hw_state;
+}
+
+static void icl_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_digital_port *primary_port;
+ enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+
+ primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
+ enc_to_mst(encoder)->primary :
+ enc_to_dig_port(encoder);
+
+ if (primary_port &&
+ (intel_tc_port_in_dp_alt_mode(primary_port) ||
+ intel_tc_port_in_legacy_mode(primary_port)))
+ port_dpll_id = ICL_PORT_DPLL_MG_PHY;
+
+ icl_set_active_port_dpll(crtc_state, port_dpll_id);
+}
+
+static u32 intel_get_hti_plls(struct drm_i915_private *i915)
+{
+ if (!(i915->hti_state & HDPORT_ENABLED))
+ return 0;
+
+ return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
+}
+
+static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ struct skl_wrpll_params pll_params = {};
+ int ret;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ ret = icl_calc_wrpll(crtc_state, &pll_params);
+ else
+ ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
+
+ if (ret)
+ return ret;
+
+ icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
+
+ /* this is mainly for the fastset check */
+ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
+
+ crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
+ &port_dpll->hw_state);
+
+ return 0;
+}
+
+static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ enum port port = encoder->port;
+ unsigned long dpll_mask;
+
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ dpll_mask =
+ BIT(DPLL_ID_DG1_DPLL3) |
+ BIT(DPLL_ID_DG1_DPLL2) |
+ BIT(DPLL_ID_ICL_DPLL1) |
+ BIT(DPLL_ID_ICL_DPLL0);
+ } else if (IS_DG1(dev_priv)) {
+ if (port == PORT_D || port == PORT_E) {
+ dpll_mask =
+ BIT(DPLL_ID_DG1_DPLL2) |
+ BIT(DPLL_ID_DG1_DPLL3);
+ } else {
+ dpll_mask =
+ BIT(DPLL_ID_DG1_DPLL0) |
+ BIT(DPLL_ID_DG1_DPLL1);
+ }
+ } else if (IS_ROCKETLAKE(dev_priv)) {
+ dpll_mask =
+ BIT(DPLL_ID_EHL_DPLL4) |
+ BIT(DPLL_ID_ICL_DPLL1) |
+ BIT(DPLL_ID_ICL_DPLL0);
+ } else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
+ dpll_mask =
+ BIT(DPLL_ID_EHL_DPLL4) |
+ BIT(DPLL_ID_ICL_DPLL1) |
+ BIT(DPLL_ID_ICL_DPLL0);
+ } else {
+ dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
+ }
+
+ /* Eliminate DPLLs from consideration if reserved by HTI */
+ dpll_mask &= ~intel_get_hti_plls(dev_priv);
+
+ port_dpll->pll = intel_find_shared_dpll(state, crtc,
+ &port_dpll->hw_state,
+ dpll_mask);
+ if (!port_dpll->pll)
+ return -EINVAL;
+
+ intel_reference_shared_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
+
+ icl_update_active_dpll(state, crtc, encoder);
+
+ return 0;
+}
+
+static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ struct skl_wrpll_params pll_params = {};
+ int ret;
+
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ ret = icl_calc_tbt_pll(crtc_state, &pll_params);
+ if (ret)
+ return ret;
+
+ icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
+
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
+ ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
+ if (ret)
+ return ret;
+
+ /* this is mainly for the fastset check */
+ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
+
+ crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
+ &port_dpll->hw_state);
+
+ return 0;
+}
+
+static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ enum intel_dpll_id dpll_id;
+ int ret;
+
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ port_dpll->pll = intel_find_shared_dpll(state, crtc,
+ &port_dpll->hw_state,
+ BIT(DPLL_ID_ICL_TBTPLL));
+ if (!port_dpll->pll)
+ return -EINVAL;
+ intel_reference_shared_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
+
+
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
+ dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
+ encoder->port));
+ port_dpll->pll = intel_find_shared_dpll(state, crtc,
+ &port_dpll->hw_state,
+ BIT(dpll_id));
+ if (!port_dpll->pll) {
+ ret = -EINVAL;
+ goto err_unreference_tbt_pll;
+ }
+ intel_reference_shared_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
+
+ icl_update_active_dpll(state, crtc, encoder);
+
+ return 0;
+
+err_unreference_tbt_pll:
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
+
+ return ret;
+}
+
+static int icl_compute_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ if (intel_phy_is_combo(dev_priv, phy))
+ return icl_compute_combo_phy_dpll(state, crtc);
+ else if (intel_phy_is_tc(dev_priv, phy))
+ return icl_compute_tc_phy_dplls(state, crtc);
+
+ MISSING_CASE(phy);
+
+ return 0;
+}
+
+static int icl_get_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ if (intel_phy_is_combo(dev_priv, phy))
+ return icl_get_combo_phy_dpll(state, crtc, encoder);
+ else if (intel_phy_is_tc(dev_priv, phy))
+ return icl_get_tc_phy_dplls(state, crtc, encoder);
+
+ MISSING_CASE(phy);
+
+ return -EINVAL;
+}
+
+static void icl_put_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ enum icl_port_dpll_id id;
+
+ new_crtc_state->shared_dpll = NULL;
+
+ for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
+ const struct icl_port_dpll *old_port_dpll =
+ &old_crtc_state->icl_port_dplls[id];
+ struct icl_port_dpll *new_port_dpll =
+ &new_crtc_state->icl_port_dplls[id];
+
+ new_port_dpll->pll = NULL;
+
+ if (!old_port_dpll->pll)
+ continue;
+
+ intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
+ }
+}
+
+static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+ intel_wakeref_t wakeref;
+ bool ret = false;
+ u32 val;
+
+ i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_DISPLAY_CORE);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, enable_reg);
+ if (!(val & PLL_ENABLE))
+ goto out;
+
+ hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
+ MG_REFCLKIN_CTL(tc_port));
+ hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+
+ hw_state->mg_clktop2_coreclkctl1 =
+ intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
+ hw_state->mg_clktop2_coreclkctl1 &=
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+
+ hw_state->mg_clktop2_hsclkctl =
+ intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
+ hw_state->mg_clktop2_hsclkctl &=
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
+
+ hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
+ hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
+ hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
+ MG_PLL_FRAC_LOCK(tc_port));
+ hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
+
+ hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
+ hw_state->mg_pll_tdc_coldst_bias =
+ intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
+
+ if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
+ hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
+ hw_state->mg_pll_bias_mask = 0;
+ } else {
+ hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ hw_state->mg_pll_bias_mask = -1U;
+ }
+
+ hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
+ hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
+
+ ret = true;
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ return ret;
+}
+
+static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+ intel_wakeref_t wakeref;
+ bool ret = false;
+ u32 val;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_DISPLAY_CORE);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
+ if (!(val & PLL_ENABLE))
+ goto out;
+
+ /*
+ * All registers read here have the same HIP_INDEX_REG even though
+ * they are on different building blocks
+ */
+ hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
+ DKL_REFCLKIN_CTL(tc_port), 2);
+ hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+
+ hw_state->mg_clktop2_hsclkctl =
+ intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2);
+ hw_state->mg_clktop2_hsclkctl &=
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
+
+ hw_state->mg_clktop2_coreclkctl1 =
+ intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2);
+ hw_state->mg_clktop2_coreclkctl1 &=
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+
+ hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port), 2);
+ val = DKL_PLL_DIV0_MASK;
+ if (dev_priv->display.vbt.override_afc_startup)
+ val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
+ hw_state->mg_pll_div0 &= val;
+
+ hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port), 2);
+ hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
+ DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
+
+ hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port), 2);
+ hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
+ DKL_PLL_SSC_STEP_LEN_MASK |
+ DKL_PLL_SSC_STEP_NUM_MASK |
+ DKL_PLL_SSC_EN);
+
+ hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port), 2);
+ hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK);
+
+ hw_state->mg_pll_tdc_coldst_bias =
+ intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2);
+ hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
+ DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
+
+ ret = true;
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ return ret;
+}
+
+static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state,
+ i915_reg_t enable_reg)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ intel_wakeref_t wakeref;
+ bool ret = false;
+ u32 val;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_DISPLAY_CORE);
+ if (!wakeref)
+ return false;
+
+ val = intel_de_read(dev_priv, enable_reg);
+ if (!(val & PLL_ENABLE))
+ goto out;
+
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
+ } else if (IS_DG1(dev_priv)) {
+ hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
+ } else if (IS_ROCKETLAKE(dev_priv)) {
+ hw_state->cfgcr0 = intel_de_read(dev_priv,
+ RKL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ RKL_DPLL_CFGCR1(id));
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
+ hw_state->cfgcr0 = intel_de_read(dev_priv,
+ TGL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ TGL_DPLL_CFGCR1(id));
+ if (dev_priv->display.vbt.override_afc_startup) {
+ hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
+ hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
+ }
+ } else {
+ if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
+ hw_state->cfgcr0 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR0(4));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR1(4));
+ } else {
+ hw_state->cfgcr0 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR1(id));
+ }
+ }
+
+ ret = true;
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ return ret;
+}
+
+static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
+
+ return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
+}
+
+static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
+}
+
+static void icl_dpll_write(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
+ const enum intel_dpll_id id = pll->info->id;
+ i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
+
+ if (IS_ALDERLAKE_S(dev_priv)) {
+ cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
+ cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
+ } else if (IS_DG1(dev_priv)) {
+ cfgcr0_reg = DG1_DPLL_CFGCR0(id);
+ cfgcr1_reg = DG1_DPLL_CFGCR1(id);
+ } else if (IS_ROCKETLAKE(dev_priv)) {
+ cfgcr0_reg = RKL_DPLL_CFGCR0(id);
+ cfgcr1_reg = RKL_DPLL_CFGCR1(id);
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
+ cfgcr0_reg = TGL_DPLL_CFGCR0(id);
+ cfgcr1_reg = TGL_DPLL_CFGCR1(id);
+ div0_reg = TGL_DPLL0_DIV0(id);
+ } else {
+ if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
+ cfgcr0_reg = ICL_DPLL_CFGCR0(4);
+ cfgcr1_reg = ICL_DPLL_CFGCR1(4);
+ } else {
+ cfgcr0_reg = ICL_DPLL_CFGCR0(id);
+ cfgcr1_reg = ICL_DPLL_CFGCR1(id);
+ }
+ }
+
+ intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
+ intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
+ drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
+ !i915_mmio_reg_valid(div0_reg));
+ if (dev_priv->display.vbt.override_afc_startup &&
+ i915_mmio_reg_valid(div0_reg))
+ intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
+ hw_state->div0);
+ intel_de_posting_read(dev_priv, cfgcr1_reg);
+}
+
+static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
+ u32 val;
+
+ /*
+ * Some of the following registers have reserved fields, so program
+ * these with RMW based on a mask. The mask can be fixed or generated
+ * during the calc/readout phase if the mask depends on some other HW
+ * state like refclk, see icl_calc_mg_pll_state().
+ */
+ val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
+ val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+ val |= hw_state->mg_refclkin_ctl;
+ intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
+
+ val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
+ val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+ val |= hw_state->mg_clktop2_coreclkctl1;
+ intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
+
+ val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
+ val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
+ val |= hw_state->mg_clktop2_hsclkctl;
+ intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
+
+ intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
+ intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
+ intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
+ intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
+ hw_state->mg_pll_frac_lock);
+ intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
+
+ val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
+ val &= ~hw_state->mg_pll_bias_mask;
+ val |= hw_state->mg_pll_bias;
+ intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
+
+ val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
+ val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
+ val |= hw_state->mg_pll_tdc_coldst_bias;
+ intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
+
+ intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
+}
+
+static void dkl_pll_write(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
+ u32 val;
+
+ /*
+ * All registers programmed here have the same HIP_INDEX_REG even
+ * though on different building block
+ */
+ /* All the registers are RMW */
+ val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port), 2);
+ val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+ val |= hw_state->mg_refclkin_ctl;
+ intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), 2, val);
+
+ val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2);
+ val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+ val |= hw_state->mg_clktop2_coreclkctl1;
+ intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2, val);
+
+ val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2);
+ val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
+ val |= hw_state->mg_clktop2_hsclkctl;
+ intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2, val);
+
+ val = DKL_PLL_DIV0_MASK;
+ if (dev_priv->display.vbt.override_afc_startup)
+ val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
+ intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), 2, val,
+ hw_state->mg_pll_div0);
+
+ val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port), 2);
+ val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
+ DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
+ val |= hw_state->mg_pll_div1;
+ intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), 2, val);
+
+ val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port), 2);
+ val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
+ DKL_PLL_SSC_STEP_LEN_MASK |
+ DKL_PLL_SSC_STEP_NUM_MASK |
+ DKL_PLL_SSC_EN);
+ val |= hw_state->mg_pll_ssc;
+ intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), 2, val);
+
+ val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port), 2);
+ val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK);
+ val |= hw_state->mg_pll_bias;
+ intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), 2, val);
+
+ val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2);
+ val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
+ DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
+ val |= hw_state->mg_pll_tdc_coldst_bias;
+ intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2, val);
+
+ intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2);
+}
+
+static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ i915_reg_t enable_reg)
+{
+ u32 val;
+
+ val = intel_de_read(dev_priv, enable_reg);
+ val |= PLL_POWER_ENABLE;
+ intel_de_write(dev_priv, enable_reg, val);
+
+ /*
+ * The spec says we need to "wait" but it also says it should be
+ * immediate.
+ */
+ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
+ drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
+ pll->info->id);
+}
+
+static void icl_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ i915_reg_t enable_reg)
+{
+ u32 val;
+
+ val = intel_de_read(dev_priv, enable_reg);
+ val |= PLL_ENABLE;
+ intel_de_write(dev_priv, enable_reg, val);
+
+ /* Timeout is actually 600us. */
+ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
+ drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
+}
+
+static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
+{
+ u32 val;
+
+ if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
+ pll->info->id != DPLL_ID_ICL_DPLL0)
+ return;
+ /*
+ * Wa_16011069516:adl-p[a0]
+ *
+ * All CMTG regs are unreliable until CMTG clock gating is disabled,
+ * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
+ * sanity check this assumption with a double read, which presumably
+ * returns the correct value even with clock gating on.
+ *
+ * Instead of the usual place for workarounds we apply this one here,
+ * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
+ */
+ val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
+ val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
+ intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
+ if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
+ drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
+}
+
+static void combo_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
+
+ if (IS_JSL_EHL(dev_priv) &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+
+ /*
+ * We need to disable DC states when this DPLL is enabled.
+ * This can be done by taking a reference on DPLL4 power
+ * domain.
+ */
+ pll->wakeref = intel_display_power_get(dev_priv,
+ POWER_DOMAIN_DC_OFF);
+ }
+
+ icl_pll_power_enable(dev_priv, pll, enable_reg);
+
+ icl_dpll_write(dev_priv, pll);
+
+ /*
+ * DVFS pre sequence would be here, but in our driver the cdclk code
+ * paths should already be setting the appropriate voltage, hence we do
+ * nothing here.
+ */
+
+ icl_pll_enable(dev_priv, pll, enable_reg);
+
+ adlp_cmtg_clock_gating_wa(dev_priv, pll);
+
+ /* DVFS post sequence would be here. See the comment above. */
+}
+
+static void tbt_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
+
+ icl_dpll_write(dev_priv, pll);
+
+ /*
+ * DVFS pre sequence would be here, but in our driver the cdclk code
+ * paths should already be setting the appropriate voltage, hence we do
+ * nothing here.
+ */
+
+ icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
+
+ /* DVFS post sequence would be here. See the comment above. */
+}
+
+static void mg_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
+
+ icl_pll_power_enable(dev_priv, pll, enable_reg);
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ dkl_pll_write(dev_priv, pll);
+ else
+ icl_mg_pll_write(dev_priv, pll);
+
+ /*
+ * DVFS pre sequence would be here, but in our driver the cdclk code
+ * paths should already be setting the appropriate voltage, hence we do
+ * nothing here.
+ */
+
+ icl_pll_enable(dev_priv, pll, enable_reg);
+
+ /* DVFS post sequence would be here. See the comment above. */
+}
+
+static void icl_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ i915_reg_t enable_reg)
+{
+ u32 val;
+
+ /* The first steps are done by intel_ddi_post_disable(). */
+
+ /*
+ * DVFS pre sequence would be here, but in our driver the cdclk code
+ * paths should already be setting the appropriate voltage, hence we do
+ * nothing here.
+ */
+
+ val = intel_de_read(dev_priv, enable_reg);
+ val &= ~PLL_ENABLE;
+ intel_de_write(dev_priv, enable_reg, val);
+
+ /* Timeout is actually 1us. */
+ if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
+ drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
+
+ /* DVFS post sequence would be here. See the comment above. */
+
+ val = intel_de_read(dev_priv, enable_reg);
+ val &= ~PLL_POWER_ENABLE;
+ intel_de_write(dev_priv, enable_reg, val);
+
+ /*
+ * The spec says we need to "wait" but it also says it should be
+ * immediate.
+ */
+ if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
+ drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
+ pll->info->id);
+}
+
+static void combo_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
+
+ icl_pll_disable(dev_priv, pll, enable_reg);
+
+ if (IS_JSL_EHL(dev_priv) &&
+ pll->info->id == DPLL_ID_EHL_DPLL4)
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
+ pll->wakeref);
+}
+
+static void tbt_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
+}
+
+static void mg_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
+
+ icl_pll_disable(dev_priv, pll, enable_reg);
+}
+
+static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC ref */
+ i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
+}
+
+static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
+ const struct intel_dpll_hw_state *hw_state)
+{
+ drm_dbg_kms(&dev_priv->drm,
+ "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
+ "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
+ "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
+ "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
+ "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
+ "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
+ hw_state->cfgcr0, hw_state->cfgcr1,
+ hw_state->div0,
+ hw_state->mg_refclkin_ctl,
+ hw_state->mg_clktop2_coreclkctl1,
+ hw_state->mg_clktop2_hsclkctl,
+ hw_state->mg_pll_div0,
+ hw_state->mg_pll_div1,
+ hw_state->mg_pll_lf,
+ hw_state->mg_pll_frac_lock,
+ hw_state->mg_pll_ssc,
+ hw_state->mg_pll_bias,
+ hw_state->mg_pll_tdc_coldst_bias);
+}
+
+static const struct intel_shared_dpll_funcs combo_pll_funcs = {
+ .enable = combo_pll_enable,
+ .disable = combo_pll_disable,
+ .get_hw_state = combo_pll_get_hw_state,
+ .get_freq = icl_ddi_combo_pll_get_freq,
+};
+
+static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
+ .enable = tbt_pll_enable,
+ .disable = tbt_pll_disable,
+ .get_hw_state = tbt_pll_get_hw_state,
+ .get_freq = icl_ddi_tbt_pll_get_freq,
+};
+
+static const struct intel_shared_dpll_funcs mg_pll_funcs = {
+ .enable = mg_pll_enable,
+ .disable = mg_pll_disable,
+ .get_hw_state = mg_pll_get_hw_state,
+ .get_freq = icl_ddi_mg_pll_get_freq,
+};
+
+static const struct dpll_info icl_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
+ { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+ { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+ { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+ { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr icl_pll_mgr = {
+ .dpll_info = icl_plls,
+ .compute_dplls = icl_compute_dplls,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_active_dpll = icl_update_active_dpll,
+ .update_ref_clks = icl_update_dpll_ref_clks,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
+static const struct dpll_info ehl_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr ehl_pll_mgr = {
+ .dpll_info = ehl_plls,
+ .compute_dplls = icl_compute_dplls,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_ref_clks = icl_update_dpll_ref_clks,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
+ .enable = mg_pll_enable,
+ .disable = mg_pll_disable,
+ .get_hw_state = dkl_pll_get_hw_state,
+ .get_freq = icl_ddi_mg_pll_get_freq,
+};
+
+static const struct dpll_info tgl_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
+ { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+ { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+ { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+ { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+ { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
+ { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr tgl_pll_mgr = {
+ .dpll_info = tgl_plls,
+ .compute_dplls = icl_compute_dplls,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_active_dpll = icl_update_active_dpll,
+ .update_ref_clks = icl_update_dpll_ref_clks,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
+static const struct dpll_info rkl_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr rkl_pll_mgr = {
+ .dpll_info = rkl_plls,
+ .compute_dplls = icl_compute_dplls,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_ref_clks = icl_update_dpll_ref_clks,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
+static const struct dpll_info dg1_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
+ { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
+ { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr dg1_pll_mgr = {
+ .dpll_info = dg1_plls,
+ .compute_dplls = icl_compute_dplls,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_ref_clks = icl_update_dpll_ref_clks,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
+static const struct dpll_info adls_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
+ { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr adls_pll_mgr = {
+ .dpll_info = adls_plls,
+ .compute_dplls = icl_compute_dplls,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_ref_clks = icl_update_dpll_ref_clks,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
+static const struct dpll_info adlp_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
+ { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+ { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+ { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+ { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr adlp_pll_mgr = {
+ .dpll_info = adlp_plls,
+ .compute_dplls = icl_compute_dplls,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_active_dpll = icl_update_active_dpll,
+ .update_ref_clks = icl_update_dpll_ref_clks,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
+/**
+ * intel_shared_dpll_init - Initialize shared DPLLs
+ * @dev_priv: i915 device
+ *
+ * Initialize shared DPLLs for @dev_priv.
+ */
+void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
+{
+ const struct intel_dpll_mgr *dpll_mgr = NULL;
+ const struct dpll_info *dpll_info;
+ int i;
+
+ if (IS_DG2(dev_priv))
+ /* No shared DPLLs on DG2; port PLLs are part of the PHY */
+ dpll_mgr = NULL;
+ else if (IS_ALDERLAKE_P(dev_priv))
+ dpll_mgr = &adlp_pll_mgr;
+ else if (IS_ALDERLAKE_S(dev_priv))
+ dpll_mgr = &adls_pll_mgr;
+ else if (IS_DG1(dev_priv))
+ dpll_mgr = &dg1_pll_mgr;
+ else if (IS_ROCKETLAKE(dev_priv))
+ dpll_mgr = &rkl_pll_mgr;
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ dpll_mgr = &tgl_pll_mgr;
+ else if (IS_JSL_EHL(dev_priv))
+ dpll_mgr = &ehl_pll_mgr;
+ else if (DISPLAY_VER(dev_priv) >= 11)
+ dpll_mgr = &icl_pll_mgr;
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ dpll_mgr = &bxt_pll_mgr;
+ else if (DISPLAY_VER(dev_priv) == 9)
+ dpll_mgr = &skl_pll_mgr;
+ else if (HAS_DDI(dev_priv))
+ dpll_mgr = &hsw_pll_mgr;
+ else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
+ dpll_mgr = &pch_pll_mgr;
+
+ if (!dpll_mgr) {
+ dev_priv->display.dpll.num_shared_dpll = 0;
+ return;
+ }
+
+ dpll_info = dpll_mgr->dpll_info;
+
+ for (i = 0; dpll_info[i].name; i++) {
+ if (drm_WARN_ON(&dev_priv->drm,
+ i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
+ break;
+
+ drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
+ dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
+ }
+
+ dev_priv->display.dpll.mgr = dpll_mgr;
+ dev_priv->display.dpll.num_shared_dpll = i;
+ mutex_init(&dev_priv->display.dpll.lock);
+}
+
+/**
+ * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
+ * @state: atomic state
+ * @crtc: CRTC to compute DPLLs for
+ * @encoder: encoder
+ *
+ * This function computes the DPLL state for the given CRTC and encoder.
+ *
+ * The new configuration in the atomic commit @state is made effective by
+ * calling intel_shared_dpll_swap_state().
+ *
+ * Returns:
+ * 0 on success, negative error code on falure.
+ */
+int intel_compute_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
+
+ if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
+ return -EINVAL;
+
+ return dpll_mgr->compute_dplls(state, crtc, encoder);
+}
+
+/**
+ * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
+ * @state: atomic state
+ * @crtc: CRTC to reserve DPLLs for
+ * @encoder: encoder
+ *
+ * This function reserves all required DPLLs for the given CRTC and encoder
+ * combination in the current atomic commit @state and the new @crtc atomic
+ * state.
+ *
+ * The new configuration in the atomic commit @state is made effective by
+ * calling intel_shared_dpll_swap_state().
+ *
+ * The reserved DPLLs should be released by calling
+ * intel_release_shared_dplls().
+ *
+ * Returns:
+ * 0 if all required DPLLs were successfully reserved,
+ * negative error code otherwise.
+ */
+int intel_reserve_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
+
+ if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
+ return -EINVAL;
+
+ return dpll_mgr->get_dplls(state, crtc, encoder);
+}
+
+/**
+ * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
+ * @state: atomic state
+ * @crtc: crtc from which the DPLLs are to be released
+ *
+ * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
+ * from the current atomic commit @state and the old @crtc atomic state.
+ *
+ * The new configuration in the atomic commit @state is made effective by
+ * calling intel_shared_dpll_swap_state().
+ */
+void intel_release_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
+
+ /*
+ * FIXME: this function is called for every platform having a
+ * compute_clock hook, even though the platform doesn't yet support
+ * the shared DPLL framework and intel_reserve_shared_dplls() is not
+ * called on those.
+ */
+ if (!dpll_mgr)
+ return;
+
+ dpll_mgr->put_dplls(state, crtc);
+}
+
+/**
+ * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
+ * @state: atomic state
+ * @crtc: the CRTC for which to update the active DPLL
+ * @encoder: encoder determining the type of port DPLL
+ *
+ * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
+ * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
+ * DPLL selected will be based on the current mode of the encoder's port.
+ */
+void intel_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
+
+ if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
+ return;
+
+ dpll_mgr->update_active_dpll(state, crtc, encoder);
+}
+
+/**
+ * intel_dpll_get_freq - calculate the DPLL's output frequency
+ * @i915: i915 device
+ * @pll: DPLL for which to calculate the output frequency
+ * @pll_state: DPLL state from which to calculate the output frequency
+ *
+ * Return the output frequency corresponding to @pll's passed in @pll_state.
+ */
+int intel_dpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
+{
+ if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
+ return 0;
+
+ return pll->info->funcs->get_freq(i915, pll, pll_state);
+}
+
+/**
+ * intel_dpll_get_hw_state - readout the DPLL's hardware state
+ * @i915: i915 device
+ * @pll: DPLL for which to calculate the output frequency
+ * @hw_state: DPLL's hardware state
+ *
+ * Read out @pll's hardware state into @hw_state.
+ */
+bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ return pll->info->funcs->get_hw_state(i915, pll, hw_state);
+}
+
+static void readout_dpll_hw_state(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_crtc *crtc;
+
+ pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
+
+ if (IS_JSL_EHL(i915) && pll->on &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ pll->wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_DC_OFF);
+ }
+
+ pll->state.pipe_mask = 0;
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
+ pll->state.pipe_mask |= BIT(crtc->pipe);
+ }
+ pll->active_mask = pll->state.pipe_mask;
+
+ drm_dbg_kms(&i915->drm,
+ "%s hw state readout: pipe_mask 0x%x, on %i\n",
+ pll->info->name, pll->state.pipe_mask, pll->on);
+}
+
+void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
+{
+ if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
+ i915->display.dpll.mgr->update_ref_clks(i915);
+}
+
+void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
+ readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
+}
+
+static void sanitize_dpll_state(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ if (!pll->on)
+ return;
+
+ adlp_cmtg_clock_gating_wa(i915, pll);
+
+ if (pll->active_mask)
+ return;
+
+ drm_dbg_kms(&i915->drm,
+ "%s enabled but not in use, disabling\n",
+ pll->info->name);
+
+ pll->info->funcs->disable(i915, pll);
+ pll->on = false;
+}
+
+void intel_dpll_sanitize_state(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
+ sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
+}
+
+/**
+ * intel_dpll_dump_hw_state - write hw_state to dmesg
+ * @dev_priv: i915 drm device
+ * @hw_state: hw state to be written to the log
+ *
+ * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
+ */
+void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
+ const struct intel_dpll_hw_state *hw_state)
+{
+ if (dev_priv->display.dpll.mgr) {
+ dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
+ } else {
+ /* fallback for platforms that don't use the shared dpll
+ * infrastructure
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ hw_state->dpll,
+ hw_state->dpll_md,
+ hw_state->fp0,
+ hw_state->fp1);
+ }
+}
+
+static void
+verify_single_dpll_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_dpll_hw_state dpll_hw_state;
+ u8 pipe_mask;
+ bool active;
+
+ memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
+
+ drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
+
+ active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
+
+ if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
+ I915_STATE_WARN(!pll->on && pll->active_mask,
+ "pll in active use but not on in sw tracking\n");
+ I915_STATE_WARN(pll->on && !pll->active_mask,
+ "pll is on but not used by any active pipe\n");
+ I915_STATE_WARN(pll->on != active,
+ "pll on state mismatch (expected %i, found %i)\n",
+ pll->on, active);
+ }
+
+ if (!crtc) {
+ I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
+ "more active pll users than references: 0x%x vs 0x%x\n",
+ pll->active_mask, pll->state.pipe_mask);
+
+ return;
+ }
+
+ pipe_mask = BIT(crtc->pipe);
+
+ if (new_crtc_state->hw.active)
+ I915_STATE_WARN(!(pll->active_mask & pipe_mask),
+ "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
+ pipe_name(crtc->pipe), pll->active_mask);
+ else
+ I915_STATE_WARN(pll->active_mask & pipe_mask,
+ "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
+ pipe_name(crtc->pipe), pll->active_mask);
+
+ I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
+ "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
+ pipe_mask, pll->state.pipe_mask);
+
+ I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
+ &dpll_hw_state,
+ sizeof(dpll_hw_state)),
+ "pll hw state mismatch\n");
+}
+
+void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (new_crtc_state->shared_dpll)
+ verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
+ crtc, new_crtc_state);
+
+ if (old_crtc_state->shared_dpll &&
+ old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
+ u8 pipe_mask = BIT(crtc->pipe);
+ struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
+
+ I915_STATE_WARN(pll->active_mask & pipe_mask,
+ "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
+ pipe_name(crtc->pipe), pll->active_mask);
+ I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
+ "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
+ pipe_name(crtc->pipe), pll->state.pipe_mask);
+ }
+}
+
+void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
+ verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],
+ NULL, NULL);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
new file mode 100644
index 000000000..3247dc300
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -0,0 +1,376 @@
+/*
+ * Copyright © 2012-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DPLL_MGR_H_
+#define _INTEL_DPLL_MGR_H_
+
+#include <linux/types.h>
+
+#include "intel_wakeref.h"
+
+/*FIXME: Move this to a more appropriate place. */
+#define abs_diff(a, b) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ (void) (&__a == &__b); \
+ __a > __b ? (__a - __b) : (__b - __a); })
+
+enum tc_port;
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_encoder;
+struct intel_shared_dpll;
+struct intel_shared_dpll_funcs;
+
+/**
+ * enum intel_dpll_id - possible DPLL ids
+ *
+ * Enumeration of possible IDs for a DPLL. Real shared dpll ids must be >= 0.
+ */
+enum intel_dpll_id {
+ /**
+ * @DPLL_ID_PRIVATE: non-shared dpll in use
+ */
+ DPLL_ID_PRIVATE = -1,
+
+ /**
+ * @DPLL_ID_PCH_PLL_A: DPLL A in ILK, SNB and IVB
+ */
+ DPLL_ID_PCH_PLL_A = 0,
+ /**
+ * @DPLL_ID_PCH_PLL_B: DPLL B in ILK, SNB and IVB
+ */
+ DPLL_ID_PCH_PLL_B = 1,
+
+
+ /**
+ * @DPLL_ID_WRPLL1: HSW and BDW WRPLL1
+ */
+ DPLL_ID_WRPLL1 = 0,
+ /**
+ * @DPLL_ID_WRPLL2: HSW and BDW WRPLL2
+ */
+ DPLL_ID_WRPLL2 = 1,
+ /**
+ * @DPLL_ID_SPLL: HSW and BDW SPLL
+ */
+ DPLL_ID_SPLL = 2,
+ /**
+ * @DPLL_ID_LCPLL_810: HSW and BDW 0.81 GHz LCPLL
+ */
+ DPLL_ID_LCPLL_810 = 3,
+ /**
+ * @DPLL_ID_LCPLL_1350: HSW and BDW 1.35 GHz LCPLL
+ */
+ DPLL_ID_LCPLL_1350 = 4,
+ /**
+ * @DPLL_ID_LCPLL_2700: HSW and BDW 2.7 GHz LCPLL
+ */
+ DPLL_ID_LCPLL_2700 = 5,
+
+
+ /**
+ * @DPLL_ID_SKL_DPLL0: SKL and later DPLL0
+ */
+ DPLL_ID_SKL_DPLL0 = 0,
+ /**
+ * @DPLL_ID_SKL_DPLL1: SKL and later DPLL1
+ */
+ DPLL_ID_SKL_DPLL1 = 1,
+ /**
+ * @DPLL_ID_SKL_DPLL2: SKL and later DPLL2
+ */
+ DPLL_ID_SKL_DPLL2 = 2,
+ /**
+ * @DPLL_ID_SKL_DPLL3: SKL and later DPLL3
+ */
+ DPLL_ID_SKL_DPLL3 = 3,
+
+
+ /**
+ * @DPLL_ID_ICL_DPLL0: ICL/TGL combo PHY DPLL0
+ */
+ DPLL_ID_ICL_DPLL0 = 0,
+ /**
+ * @DPLL_ID_ICL_DPLL1: ICL/TGL combo PHY DPLL1
+ */
+ DPLL_ID_ICL_DPLL1 = 1,
+ /**
+ * @DPLL_ID_EHL_DPLL4: EHL combo PHY DPLL4
+ */
+ DPLL_ID_EHL_DPLL4 = 2,
+ /**
+ * @DPLL_ID_ICL_TBTPLL: ICL/TGL TBT PLL
+ */
+ DPLL_ID_ICL_TBTPLL = 2,
+ /**
+ * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C),
+ * TGL TC PLL 1 port 1 (TC1)
+ */
+ DPLL_ID_ICL_MGPLL1 = 3,
+ /**
+ * @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D)
+ * TGL TC PLL 1 port 2 (TC2)
+ */
+ DPLL_ID_ICL_MGPLL2 = 4,
+ /**
+ * @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E)
+ * TGL TC PLL 1 port 3 (TC3)
+ */
+ DPLL_ID_ICL_MGPLL3 = 5,
+ /**
+ * @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F)
+ * TGL TC PLL 1 port 4 (TC4)
+ */
+ DPLL_ID_ICL_MGPLL4 = 6,
+ /**
+ * @DPLL_ID_TGL_MGPLL5: TGL TC PLL port 5 (TC5)
+ */
+ DPLL_ID_TGL_MGPLL5 = 7,
+ /**
+ * @DPLL_ID_TGL_MGPLL6: TGL TC PLL port 6 (TC6)
+ */
+ DPLL_ID_TGL_MGPLL6 = 8,
+
+ /**
+ * @DPLL_ID_DG1_DPLL0: DG1 combo PHY DPLL0
+ */
+ DPLL_ID_DG1_DPLL0 = 0,
+ /**
+ * @DPLL_ID_DG1_DPLL1: DG1 combo PHY DPLL1
+ */
+ DPLL_ID_DG1_DPLL1 = 1,
+ /**
+ * @DPLL_ID_DG1_DPLL2: DG1 combo PHY DPLL2
+ */
+ DPLL_ID_DG1_DPLL2 = 2,
+ /**
+ * @DPLL_ID_DG1_DPLL3: DG1 combo PHY DPLL3
+ */
+ DPLL_ID_DG1_DPLL3 = 3,
+};
+
+#define I915_NUM_PLLS 9
+
+enum icl_port_dpll_id {
+ ICL_PORT_DPLL_DEFAULT,
+ ICL_PORT_DPLL_MG_PHY,
+
+ ICL_PORT_DPLL_COUNT,
+};
+
+struct intel_dpll_hw_state {
+ /* i9xx, pch plls */
+ u32 dpll;
+ u32 dpll_md;
+ u32 fp0;
+ u32 fp1;
+
+ /* hsw, bdw */
+ u32 wrpll;
+ u32 spll;
+
+ /* skl */
+ /*
+ * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
+ * lower part of ctrl1 and they get shifted into position when writing
+ * the register. This allows us to easily compare the state to share
+ * the DPLL.
+ */
+ u32 ctrl1;
+ /* HDMI only, 0 when used for DP */
+ u32 cfgcr1, cfgcr2;
+
+ /* icl */
+ u32 cfgcr0;
+
+ /* tgl */
+ u32 div0;
+
+ /* bxt */
+ u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
+
+ /*
+ * ICL uses the following, already defined:
+ * u32 cfgcr0, cfgcr1;
+ */
+ u32 mg_refclkin_ctl;
+ u32 mg_clktop2_coreclkctl1;
+ u32 mg_clktop2_hsclkctl;
+ u32 mg_pll_div0;
+ u32 mg_pll_div1;
+ u32 mg_pll_lf;
+ u32 mg_pll_frac_lock;
+ u32 mg_pll_ssc;
+ u32 mg_pll_bias;
+ u32 mg_pll_tdc_coldst_bias;
+ u32 mg_pll_bias_mask;
+ u32 mg_pll_tdc_coldst_bias_mask;
+};
+
+/**
+ * struct intel_shared_dpll_state - hold the DPLL atomic state
+ *
+ * This structure holds an atomic state for the DPLL, that can represent
+ * either its current state (in struct &intel_shared_dpll) or a desired
+ * future state which would be applied by an atomic mode set (stored in
+ * a struct &intel_atomic_state).
+ *
+ * See also intel_reserve_shared_dplls() and intel_release_shared_dplls().
+ */
+struct intel_shared_dpll_state {
+ /**
+ * @pipe_mask: mask of pipes using this DPLL, active or not
+ */
+ u8 pipe_mask;
+
+ /**
+ * @hw_state: hardware configuration for the DPLL stored in
+ * struct &intel_dpll_hw_state.
+ */
+ struct intel_dpll_hw_state hw_state;
+};
+
+/**
+ * struct dpll_info - display PLL platform specific info
+ */
+struct dpll_info {
+ /**
+ * @name: DPLL name; used for logging
+ */
+ const char *name;
+
+ /**
+ * @funcs: platform specific hooks
+ */
+ const struct intel_shared_dpll_funcs *funcs;
+
+ /**
+ * @id: unique indentifier for this DPLL; should match the index in the
+ * dev_priv->shared_dplls array
+ */
+ enum intel_dpll_id id;
+
+#define INTEL_DPLL_ALWAYS_ON (1 << 0)
+ /**
+ * @flags:
+ *
+ * INTEL_DPLL_ALWAYS_ON
+ * Inform the state checker that the DPLL is kept enabled even if
+ * not in use by any CRTC.
+ */
+ u32 flags;
+};
+
+/**
+ * struct intel_shared_dpll - display PLL with tracked state and users
+ */
+struct intel_shared_dpll {
+ /**
+ * @state:
+ *
+ * Store the state for the pll, including its hw state
+ * and CRTCs using it.
+ */
+ struct intel_shared_dpll_state state;
+
+ /**
+ * @active_mask: mask of active pipes (i.e. DPMS on) using this DPLL
+ */
+ u8 active_mask;
+
+ /**
+ * @on: is the PLL actually active? Disabled during modeset
+ */
+ bool on;
+
+ /**
+ * @info: platform specific info
+ */
+ const struct dpll_info *info;
+
+ /**
+ * @wakeref: In some platforms a device-level runtime pm reference may
+ * need to be grabbed to disable DC states while this DPLL is enabled
+ */
+ intel_wakeref_t wakeref;
+};
+
+#define SKL_DPLL0 0
+#define SKL_DPLL1 1
+#define SKL_DPLL2 2
+#define SKL_DPLL3 3
+
+/* shared dpll functions */
+struct intel_shared_dpll *
+intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id id);
+enum intel_dpll_id
+intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ bool state);
+#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
+#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+int intel_compute_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+int intel_reserve_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+void intel_release_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
+ enum icl_port_dpll_id port_dpll_id);
+void intel_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+int intel_dpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state);
+bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state);
+void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
+void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
+void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
+void intel_shared_dpll_init(struct drm_i915_private *dev_priv);
+void intel_dpll_update_ref_clks(struct drm_i915_private *dev_priv);
+void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv);
+void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv);
+
+void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
+ const struct intel_dpll_hw_state *hw_state);
+enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
+bool intel_dpll_is_combophy(enum intel_dpll_id id);
+
+void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state);
+void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915);
+
+#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
new file mode 100644
index 000000000..ea8a08b9c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "gem/i915_gem_domain.h"
+#include "gem/i915_gem_internal.h"
+#include "gt/gen8_ppgtt.h"
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_dpt.h"
+#include "intel_fb.h"
+
+struct i915_dpt {
+ struct i915_address_space vm;
+
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ void __iomem *iomem;
+};
+
+#define i915_is_dpt(vm) ((vm)->is_dpt)
+
+static inline struct i915_dpt *
+i915_vm_to_dpt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
+ GEM_BUG_ON(!i915_is_dpt(vm));
+ return container_of(vm, struct i915_dpt, vm);
+}
+
+#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
+
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+ writeq(pte, addr);
+}
+
+static void dpt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ gen8_pte_t __iomem *base = dpt->iomem;
+
+ gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
+ vm->pte_encode(addr, level, flags));
+}
+
+static void dpt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ gen8_pte_t __iomem *base = dpt->iomem;
+ const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
+ struct sgt_iter sgt_iter;
+ dma_addr_t addr;
+ int i;
+
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
+ i = vma_res->start / I915_GTT_PAGE_SIZE;
+ for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)
+ gen8_set_pte(&base[i++], pte_encode | addr);
+}
+
+static void dpt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
+static void dpt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ u32 pte_flags;
+
+ if (vma_res->bound_flags)
+ return;
+
+ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
+ pte_flags = 0;
+ if (vm->has_read_only && vma_res->bi.readonly)
+ pte_flags |= PTE_READ_ONLY;
+ if (vma_res->bi.lmem)
+ pte_flags |= PTE_LM;
+
+ vm->insert_entries(vm, vma_res, cache_level, pte_flags);
+
+ vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
+
+ /*
+ * Without aliasing PPGTT there's no difference between
+ * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
+ * upgrade to both bound if we bind either to avoid double-binding.
+ */
+ vma_res->bound_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+}
+
+static void dpt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
+{
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+}
+
+static void dpt_cleanup(struct i915_address_space *vm)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+
+ i915_gem_object_put(dpt->obj);
+}
+
+struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
+{
+ struct drm_i915_private *i915 = vm->i915;
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ intel_wakeref_t wakeref;
+ struct i915_vma *vma;
+ void __iomem *iomem;
+ struct i915_gem_ww_ctx ww;
+ u64 pin_flags = 0;
+ int err;
+
+ if (i915_gem_object_is_stolen(dpt->obj))
+ pin_flags |= PIN_MAPPABLE;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ atomic_inc(&i915->gpu_error.pending_fb_pin);
+
+ for_i915_gem_ww(&ww, err, true) {
+ err = i915_gem_object_lock(dpt->obj, &ww);
+ if (err)
+ continue;
+
+ vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0, 4096,
+ pin_flags);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ continue;
+ }
+
+ iomem = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+
+ if (IS_ERR(iomem)) {
+ err = PTR_ERR(iomem);
+ continue;
+ }
+
+ dpt->vma = vma;
+ dpt->iomem = iomem;
+
+ i915_vma_get(vma);
+ }
+
+ dpt->obj->mm.dirty = true;
+
+ atomic_dec(&i915->gpu_error.pending_fb_pin);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return err ? ERR_PTR(err) : vma;
+}
+
+void intel_dpt_unpin(struct i915_address_space *vm)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+
+ i915_vma_unpin_iomap(dpt->vma);
+ i915_vma_put(dpt->vma);
+}
+
+/**
+ * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
+ * @i915: device instance
+ *
+ * Restore the memory mapping during system resume for all framebuffers which
+ * are mapped to HW via a GGTT->DPT page table. The content of these page
+ * tables are not stored in the hibernation image during S4 and S3RST->S4
+ * transitions, so here we reprogram the PTE entries in those tables.
+ *
+ * This function must be called after the mappings in GGTT have been restored calling
+ * i915_ggtt_resume().
+ */
+void intel_dpt_resume(struct drm_i915_private *i915)
+{
+ struct drm_framebuffer *drm_fb;
+
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ mutex_lock(&i915->drm.mode_config.fb_lock);
+ drm_for_each_fb(drm_fb, &i915->drm) {
+ struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
+
+ if (fb->dpt_vm)
+ i915_ggtt_resume_vm(fb->dpt_vm);
+ }
+ mutex_unlock(&i915->drm.mode_config.fb_lock);
+}
+
+/**
+ * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
+ * @i915: device instance
+ *
+ * Suspend the memory mapping during system suspend for all framebuffers which
+ * are mapped to HW via a GGTT->DPT page table.
+ *
+ * This function must be called before the mappings in GGTT are suspended calling
+ * i915_ggtt_suspend().
+ */
+void intel_dpt_suspend(struct drm_i915_private *i915)
+{
+ struct drm_framebuffer *drm_fb;
+
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ mutex_lock(&i915->drm.mode_config.fb_lock);
+
+ drm_for_each_fb(drm_fb, &i915->drm) {
+ struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
+
+ if (fb->dpt_vm)
+ i915_ggtt_suspend_vm(fb->dpt_vm);
+ }
+
+ mutex_unlock(&i915->drm.mode_config.fb_lock);
+}
+
+struct i915_address_space *
+intel_dpt_create(struct intel_framebuffer *fb)
+{
+ struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
+ struct drm_i915_private *i915 = to_i915(obj->dev);
+ struct drm_i915_gem_object *dpt_obj;
+ struct i915_address_space *vm;
+ struct i915_dpt *dpt;
+ size_t size;
+ int ret;
+
+ if (intel_fb_needs_pot_stride_remap(fb))
+ size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
+ else
+ size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
+
+ size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
+
+ dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(dpt_obj) && i915_ggtt_has_aperture(to_gt(i915)->ggtt))
+ dpt_obj = i915_gem_object_create_stolen(i915, size);
+ if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
+ drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
+ dpt_obj = i915_gem_object_create_shmem(i915, size);
+ }
+ if (IS_ERR(dpt_obj))
+ return ERR_CAST(dpt_obj);
+
+ ret = i915_gem_object_lock_interruptible(dpt_obj, NULL);
+ if (!ret) {
+ ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
+ i915_gem_object_unlock(dpt_obj);
+ }
+ if (ret) {
+ i915_gem_object_put(dpt_obj);
+ return ERR_PTR(ret);
+ }
+
+ dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
+ if (!dpt) {
+ i915_gem_object_put(dpt_obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vm = &dpt->vm;
+
+ vm->gt = to_gt(i915);
+ vm->i915 = i915;
+ vm->dma = i915->drm.dev;
+ vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
+ vm->is_dpt = true;
+
+ i915_address_space_init(vm, VM_CLASS_DPT);
+
+ vm->insert_page = dpt_insert_page;
+ vm->clear_range = dpt_clear_range;
+ vm->insert_entries = dpt_insert_entries;
+ vm->cleanup = dpt_cleanup;
+
+ vm->vma_ops.bind_vma = dpt_bind_vma;
+ vm->vma_ops.unbind_vma = dpt_unbind_vma;
+
+ vm->pte_encode = gen8_ggtt_pte_encode;
+
+ dpt->obj = dpt_obj;
+ dpt->obj->is_dpt = true;
+
+ return &dpt->vm;
+}
+
+void intel_dpt_destroy(struct i915_address_space *vm)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+
+ dpt->obj->is_dpt = false;
+ i915_vm_put(&dpt->vm);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h
new file mode 100644
index 000000000..e18a9f767
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpt.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DPT_H__
+#define __INTEL_DPT_H__
+
+struct drm_i915_private;
+
+struct i915_address_space;
+struct i915_vma;
+struct intel_framebuffer;
+
+void intel_dpt_destroy(struct i915_address_space *vm);
+struct i915_vma *intel_dpt_pin(struct i915_address_space *vm);
+void intel_dpt_unpin(struct i915_address_space *vm);
+void intel_dpt_suspend(struct drm_i915_private *i915);
+void intel_dpt_resume(struct drm_i915_private *i915);
+struct i915_address_space *
+intel_dpt_create(struct intel_framebuffer *fb);
+
+#endif /* __INTEL_DPT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
new file mode 100644
index 000000000..7da4a9cbe
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_drrs.h"
+#include "intel_panel.h"
+
+/**
+ * DOC: Display Refresh Rate Switching (DRRS)
+ *
+ * Display Refresh Rate Switching (DRRS) is a power conservation feature
+ * which enables swtching between low and high refresh rates,
+ * dynamically, based on the usage scenario. This feature is applicable
+ * for internal panels.
+ *
+ * Indication that the panel supports DRRS is given by the panel EDID, which
+ * would list multiple refresh rates for one resolution.
+ *
+ * DRRS is of 2 types - static and seamless.
+ * Static DRRS involves changing refresh rate (RR) by doing a full modeset
+ * (may appear as a blink on screen) and is used in dock-undock scenario.
+ * Seamless DRRS involves changing RR without any visual effect to the user
+ * and can be used during normal system usage. This is done by programming
+ * certain registers.
+ *
+ * Support for static/seamless DRRS may be indicated in the VBT based on
+ * inputs from the panel spec.
+ *
+ * DRRS saves power by switching to low RR based on usage scenarios.
+ *
+ * The implementation is based on frontbuffer tracking implementation. When
+ * there is a disturbance on the screen triggered by user activity or a periodic
+ * system activity, DRRS is disabled (RR is changed to high RR). When there is
+ * no movement on screen, after a timeout of 1 second, a switch to low RR is
+ * made.
+ *
+ * For integration with frontbuffer tracking code, intel_drrs_invalidate()
+ * and intel_drrs_flush() are called.
+ *
+ * DRRS can be further extended to support other internal panels and also
+ * the scenario of video playback wherein RR is set based on the rate
+ * requested by userspace.
+ */
+
+const char *intel_drrs_type_str(enum drrs_type drrs_type)
+{
+ static const char * const str[] = {
+ [DRRS_TYPE_NONE] = "none",
+ [DRRS_TYPE_STATIC] = "static",
+ [DRRS_TYPE_SEAMLESS] = "seamless",
+ };
+
+ if (drrs_type >= ARRAY_SIZE(str))
+ return "<invalid>";
+
+ return str[drrs_type];
+}
+
+static void
+intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
+ enum drrs_refresh_rate refresh_rate)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder;
+ u32 val, bit;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ bit = PIPECONF_REFRESH_RATE_ALT_VLV;
+ else
+ bit = PIPECONF_REFRESH_RATE_ALT_ILK;
+
+ val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
+
+ if (refresh_rate == DRRS_REFRESH_RATE_LOW)
+ val |= bit;
+ else
+ val &= ~bit;
+
+ intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
+}
+
+static void
+intel_drrs_set_refresh_rate_m_n(struct intel_crtc *crtc,
+ enum drrs_refresh_rate refresh_rate)
+{
+ intel_cpu_transcoder_set_m1_n1(crtc, crtc->drrs.cpu_transcoder,
+ refresh_rate == DRRS_REFRESH_RATE_LOW ?
+ &crtc->drrs.m2_n2 : &crtc->drrs.m_n);
+}
+
+bool intel_drrs_is_active(struct intel_crtc *crtc)
+{
+ return crtc->drrs.cpu_transcoder != INVALID_TRANSCODER;
+}
+
+static void intel_drrs_set_state(struct intel_crtc *crtc,
+ enum drrs_refresh_rate refresh_rate)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (refresh_rate == crtc->drrs.refresh_rate)
+ return;
+
+ if (intel_cpu_transcoder_has_m2_n2(dev_priv, crtc->drrs.cpu_transcoder))
+ intel_drrs_set_refresh_rate_pipeconf(crtc, refresh_rate);
+ else
+ intel_drrs_set_refresh_rate_m_n(crtc, refresh_rate);
+
+ crtc->drrs.refresh_rate = refresh_rate;
+}
+
+static void intel_drrs_schedule_work(struct intel_crtc *crtc)
+{
+ mod_delayed_work(system_wq, &crtc->drrs.work, msecs_to_jiffies(1000));
+}
+
+static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ unsigned int frontbuffer_bits;
+
+ frontbuffer_bits = INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
+ crtc_state->bigjoiner_pipes)
+ frontbuffer_bits |= INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
+
+ return frontbuffer_bits;
+}
+
+/**
+ * intel_drrs_activate - activate DRRS
+ * @crtc_state: the crtc state
+ *
+ * Activates DRRS on the crtc.
+ */
+void intel_drrs_activate(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (!crtc_state->has_drrs)
+ return;
+
+ if (!crtc_state->hw.active)
+ return;
+
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
+ return;
+
+ mutex_lock(&crtc->drrs.mutex);
+
+ crtc->drrs.cpu_transcoder = crtc_state->cpu_transcoder;
+ crtc->drrs.m_n = crtc_state->dp_m_n;
+ crtc->drrs.m2_n2 = crtc_state->dp_m2_n2;
+ crtc->drrs.frontbuffer_bits = intel_drrs_frontbuffer_bits(crtc_state);
+ crtc->drrs.busy_frontbuffer_bits = 0;
+
+ intel_drrs_schedule_work(crtc);
+
+ mutex_unlock(&crtc->drrs.mutex);
+}
+
+/**
+ * intel_drrs_deactivate - deactivate DRRS
+ * @old_crtc_state: the old crtc state
+ *
+ * Deactivates DRRS on the crtc.
+ */
+void intel_drrs_deactivate(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+
+ if (!old_crtc_state->has_drrs)
+ return;
+
+ if (!old_crtc_state->hw.active)
+ return;
+
+ if (intel_crtc_is_bigjoiner_slave(old_crtc_state))
+ return;
+
+ mutex_lock(&crtc->drrs.mutex);
+
+ if (intel_drrs_is_active(crtc))
+ intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH);
+
+ crtc->drrs.cpu_transcoder = INVALID_TRANSCODER;
+ crtc->drrs.frontbuffer_bits = 0;
+ crtc->drrs.busy_frontbuffer_bits = 0;
+
+ mutex_unlock(&crtc->drrs.mutex);
+
+ cancel_delayed_work_sync(&crtc->drrs.work);
+}
+
+static void intel_drrs_downclock_work(struct work_struct *work)
+{
+ struct intel_crtc *crtc = container_of(work, typeof(*crtc), drrs.work.work);
+
+ mutex_lock(&crtc->drrs.mutex);
+
+ if (intel_drrs_is_active(crtc) && !crtc->drrs.busy_frontbuffer_bits)
+ intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_LOW);
+
+ mutex_unlock(&crtc->drrs.mutex);
+}
+
+static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
+ unsigned int all_frontbuffer_bits,
+ bool invalidate)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ unsigned int frontbuffer_bits;
+
+ mutex_lock(&crtc->drrs.mutex);
+
+ frontbuffer_bits = all_frontbuffer_bits & crtc->drrs.frontbuffer_bits;
+ if (!frontbuffer_bits) {
+ mutex_unlock(&crtc->drrs.mutex);
+ continue;
+ }
+
+ if (invalidate)
+ crtc->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
+ else
+ crtc->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+ /* flush/invalidate means busy screen hence upclock */
+ intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH);
+
+ /*
+ * flush also means no more activity hence schedule downclock, if all
+ * other fbs are quiescent too
+ */
+ if (!crtc->drrs.busy_frontbuffer_bits)
+ intel_drrs_schedule_work(crtc);
+ else
+ cancel_delayed_work(&crtc->drrs.work);
+
+ mutex_unlock(&crtc->drrs.mutex);
+ }
+}
+
+/**
+ * intel_drrs_invalidate - Disable Idleness DRRS
+ * @dev_priv: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called everytime rendering on the given planes start.
+ * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
+ *
+ * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
+ */
+void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits)
+{
+ intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, true);
+}
+
+/**
+ * intel_drrs_flush - Restart Idleness DRRS
+ * @dev_priv: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed or flip on a crtc is completed. So DRRS should be upclocked
+ * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
+ * if no other planes are dirty.
+ *
+ * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
+ */
+void intel_drrs_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits)
+{
+ intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false);
+}
+
+/**
+ * intel_crtc_drrs_init - Init DRRS for CRTC
+ * @crtc: crtc
+ *
+ * This function is called only once at driver load to initialize basic
+ * DRRS stuff.
+ *
+ */
+void intel_crtc_drrs_init(struct intel_crtc *crtc)
+{
+ INIT_DELAYED_WORK(&crtc->drrs.work, intel_drrs_downclock_work);
+ mutex_init(&crtc->drrs.mutex);
+ crtc->drrs.cpu_transcoder = INVALID_TRANSCODER;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.h b/drivers/gpu/drm/i915/display/intel_drrs.h
new file mode 100644
index 000000000..3ad1be1ad
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_drrs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DRRS_H__
+#define __INTEL_DRRS_H__
+
+#include <linux/types.h>
+
+enum drrs_type;
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_connector;
+
+const char *intel_drrs_type_str(enum drrs_type drrs_type);
+bool intel_drrs_is_active(struct intel_crtc *crtc);
+void intel_drrs_activate(const struct intel_crtc_state *crtc_state);
+void intel_drrs_deactivate(const struct intel_crtc_state *crtc_state);
+void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
+void intel_drrs_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
+void intel_crtc_drrs_init(struct intel_crtc *crtc);
+
+#endif /* __INTEL_DRRS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
new file mode 100644
index 000000000..fc9c3e41c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ */
+
+#include "gem/i915_gem_internal.h"
+
+#include "i915_drv.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dsb.h"
+
+struct i915_vma;
+
+enum dsb_id {
+ INVALID_DSB = -1,
+ DSB1,
+ DSB2,
+ DSB3,
+ MAX_DSB_PER_PIPE
+};
+
+struct intel_dsb {
+ enum dsb_id id;
+ u32 *cmd_buf;
+ struct i915_vma *vma;
+
+ /*
+ * free_pos will point the first free entry position
+ * and help in calculating tail of command buffer.
+ */
+ int free_pos;
+
+ /*
+ * ins_start_offset will help to store start address of the dsb
+ * instuction and help in identifying the batch of auto-increment
+ * register.
+ */
+ u32 ins_start_offset;
+};
+
+#define DSB_BUF_SIZE (2 * PAGE_SIZE)
+
+/**
+ * DOC: DSB
+ *
+ * A DSB (Display State Buffer) is a queue of MMIO instructions in the memory
+ * which can be offloaded to DSB HW in Display Controller. DSB HW is a DMA
+ * engine that can be programmed to download the DSB from memory.
+ * It allows driver to batch submit display HW programming. This helps to
+ * reduce loading time and CPU activity, thereby making the context switch
+ * faster. DSB Support added from Gen12 Intel graphics based platform.
+ *
+ * DSB's can access only the pipe, plane, and transcoder Data Island Packet
+ * registers.
+ *
+ * DSB HW can support only register writes (both indexed and direct MMIO
+ * writes). There are no registers reads possible with DSB HW engine.
+ */
+
+/* DSB opcodes. */
+#define DSB_OPCODE_SHIFT 24
+#define DSB_OPCODE_MMIO_WRITE 0x1
+#define DSB_OPCODE_INDEXED_WRITE 0x9
+#define DSB_BYTE_EN 0xF
+#define DSB_BYTE_EN_SHIFT 20
+#define DSB_REG_VALUE_MASK 0xfffff
+
+static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe,
+ enum dsb_id id)
+{
+ return DSB_STATUS & intel_de_read(i915, DSB_CTRL(pipe, id));
+}
+
+static bool intel_dsb_enable_engine(struct drm_i915_private *i915,
+ enum pipe pipe, enum dsb_id id)
+{
+ u32 dsb_ctrl;
+
+ dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id));
+ if (DSB_STATUS & dsb_ctrl) {
+ drm_dbg_kms(&i915->drm, "DSB engine is busy.\n");
+ return false;
+ }
+
+ dsb_ctrl |= DSB_ENABLE;
+ intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl);
+
+ intel_de_posting_read(i915, DSB_CTRL(pipe, id));
+ return true;
+}
+
+static bool intel_dsb_disable_engine(struct drm_i915_private *i915,
+ enum pipe pipe, enum dsb_id id)
+{
+ u32 dsb_ctrl;
+
+ dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id));
+ if (DSB_STATUS & dsb_ctrl) {
+ drm_dbg_kms(&i915->drm, "DSB engine is busy.\n");
+ return false;
+ }
+
+ dsb_ctrl &= ~DSB_ENABLE;
+ intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl);
+
+ intel_de_posting_read(i915, DSB_CTRL(pipe, id));
+ return true;
+}
+
+/**
+ * intel_dsb_indexed_reg_write() -Write to the DSB context for auto
+ * increment register.
+ * @crtc_state: intel_crtc_state structure
+ * @reg: register address.
+ * @val: value.
+ *
+ * This function is used for writing register-value pair in command
+ * buffer of DSB for auto-increment register. During command buffer overflow,
+ * a warning is thrown and rest all erroneous condition register programming
+ * is done through mmio write.
+ */
+
+void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val)
+{
+ struct intel_dsb *dsb = crtc_state->dsb;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 *buf;
+ u32 reg_val;
+
+ if (!dsb) {
+ intel_de_write_fw(dev_priv, reg, val);
+ return;
+ }
+ buf = dsb->cmd_buf;
+ if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
+ drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
+ return;
+ }
+
+ /*
+ * For example the buffer will look like below for 3 dwords for auto
+ * increment register:
+ * +--------------------------------------------------------+
+ * | size = 3 | offset &| value1 | value2 | value3 | zero |
+ * | | opcode | | | | |
+ * +--------------------------------------------------------+
+ * + + + + + + +
+ * 0 4 8 12 16 20 24
+ * Byte
+ *
+ * As every instruction is 8 byte aligned the index of dsb instruction
+ * will start always from even number while dealing with u32 array. If
+ * we are writing odd no of dwords, Zeros will be added in the end for
+ * padding.
+ */
+ reg_val = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
+ if (reg_val != i915_mmio_reg_offset(reg)) {
+ /* Every instruction should be 8 byte aligned. */
+ dsb->free_pos = ALIGN(dsb->free_pos, 2);
+
+ dsb->ins_start_offset = dsb->free_pos;
+
+ /* Update the size. */
+ buf[dsb->free_pos++] = 1;
+
+ /* Update the opcode and reg. */
+ buf[dsb->free_pos++] = (DSB_OPCODE_INDEXED_WRITE <<
+ DSB_OPCODE_SHIFT) |
+ i915_mmio_reg_offset(reg);
+
+ /* Update the value. */
+ buf[dsb->free_pos++] = val;
+ } else {
+ /* Update the new value. */
+ buf[dsb->free_pos++] = val;
+
+ /* Update the size. */
+ buf[dsb->ins_start_offset]++;
+ }
+
+ /* if number of data words is odd, then the last dword should be 0.*/
+ if (dsb->free_pos & 0x1)
+ buf[dsb->free_pos] = 0;
+}
+
+/**
+ * intel_dsb_reg_write() -Write to the DSB context for normal
+ * register.
+ * @crtc_state: intel_crtc_state structure
+ * @reg: register address.
+ * @val: value.
+ *
+ * This function is used for writing register-value pair in command
+ * buffer of DSB. During command buffer overflow, a warning is thrown
+ * and rest all erroneous condition register programming is done
+ * through mmio write.
+ */
+void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_dsb *dsb;
+ u32 *buf;
+
+ dsb = crtc_state->dsb;
+ if (!dsb) {
+ intel_de_write_fw(dev_priv, reg, val);
+ return;
+ }
+
+ buf = dsb->cmd_buf;
+ if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
+ drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
+ return;
+ }
+
+ dsb->ins_start_offset = dsb->free_pos;
+ buf[dsb->free_pos++] = val;
+ buf[dsb->free_pos++] = (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
+ (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
+ i915_mmio_reg_offset(reg);
+}
+
+/**
+ * intel_dsb_commit() - Trigger workload execution of DSB.
+ * @crtc_state: intel_crtc_state structure
+ *
+ * This function is used to do actual write to hardware using DSB.
+ * On errors, fall back to MMIO. Also this function help to reset the context.
+ */
+void intel_dsb_commit(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dsb *dsb = crtc_state->dsb;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ u32 tail;
+
+ if (!(dsb && dsb->free_pos))
+ return;
+
+ if (!intel_dsb_enable_engine(dev_priv, pipe, dsb->id))
+ goto reset;
+
+ if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
+ drm_err(&dev_priv->drm,
+ "HEAD_PTR write failed - dsb engine is busy.\n");
+ goto reset;
+ }
+ intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id),
+ i915_ggtt_offset(dsb->vma));
+
+ tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES);
+ if (tail > dsb->free_pos * 4)
+ memset(&dsb->cmd_buf[dsb->free_pos], 0,
+ (tail - dsb->free_pos * 4));
+
+ if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
+ drm_err(&dev_priv->drm,
+ "TAIL_PTR write failed - dsb engine is busy.\n");
+ goto reset;
+ }
+ drm_dbg_kms(&dev_priv->drm,
+ "DSB execution started - head 0x%x, tail 0x%x\n",
+ i915_ggtt_offset(dsb->vma), tail);
+ intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
+ i915_ggtt_offset(dsb->vma) + tail);
+ if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) {
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DSB workload completion.\n");
+ goto reset;
+ }
+
+reset:
+ dsb->free_pos = 0;
+ dsb->ins_start_offset = 0;
+ intel_dsb_disable_engine(dev_priv, pipe, dsb->id);
+}
+
+/**
+ * intel_dsb_prepare() - Allocate, pin and map the DSB command buffer.
+ * @crtc_state: intel_crtc_state structure to prepare associated dsb instance.
+ *
+ * This function prepare the command buffer which is used to store dsb
+ * instructions with data.
+ */
+void intel_dsb_prepare(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_dsb *dsb;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *buf;
+ intel_wakeref_t wakeref;
+
+ if (!HAS_DSB(i915))
+ return;
+
+ dsb = kmalloc(sizeof(*dsb), GFP_KERNEL);
+ if (!dsb) {
+ drm_err(&i915->drm, "DSB object creation failed\n");
+ return;
+ }
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
+ if (IS_ERR(obj)) {
+ kfree(dsb);
+ goto out;
+ }
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ kfree(dsb);
+ goto out;
+ }
+
+ buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
+ if (IS_ERR(buf)) {
+ i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
+ kfree(dsb);
+ goto out;
+ }
+
+ dsb->id = DSB1;
+ dsb->vma = vma;
+ dsb->cmd_buf = buf;
+ dsb->free_pos = 0;
+ dsb->ins_start_offset = 0;
+ crtc_state->dsb = dsb;
+out:
+ if (!crtc_state->dsb)
+ drm_info(&i915->drm,
+ "DSB queue setup failed, will fallback to MMIO for display HW programming\n");
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
+/**
+ * intel_dsb_cleanup() - To cleanup DSB context.
+ * @crtc_state: intel_crtc_state structure to cleanup associated dsb instance.
+ *
+ * This function cleanup the DSB context by unpinning and releasing
+ * the VMA object associated with it.
+ */
+void intel_dsb_cleanup(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->dsb)
+ return;
+
+ i915_vma_unpin_and_release(&crtc_state->dsb->vma, I915_VMA_RELEASE_MAP);
+ kfree(crtc_state->dsb);
+ crtc_state->dsb = NULL;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
new file mode 100644
index 000000000..74dd2b334
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _INTEL_DSB_H
+#define _INTEL_DSB_H
+
+#include <linux/types.h>
+
+#include "i915_reg_defs.h"
+
+struct intel_crtc_state;
+
+void intel_dsb_prepare(struct intel_crtc_state *crtc_state);
+void intel_dsb_cleanup(struct intel_crtc_state *crtc_state);
+void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val);
+void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val);
+void intel_dsb_commit(const struct intel_crtc_state *crtc_state);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
new file mode 100644
index 000000000..5efdd471a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <drm/drm_mipi_dsi.h>
+
+#include "i915_drv.h"
+#include "intel_dsi.h"
+#include "intel_panel.h"
+
+int intel_dsi_bitrate(const struct intel_dsi *intel_dsi)
+{
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ if (WARN_ON(bpp < 0))
+ bpp = 16;
+
+ return intel_dsi->pclk * bpp / intel_dsi->lane_count;
+}
+
+int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi)
+{
+ switch (intel_dsi->escape_clk_div) {
+ default:
+ case 0:
+ return 50;
+ case 1:
+ return 100;
+ case 2:
+ return 200;
+ }
+}
+
+int intel_dsi_get_modes(struct drm_connector *connector)
+{
+ return intel_panel_get_modes(to_intel_connector(connector));
+}
+
+enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(intel_connector, mode);
+ int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ enum drm_mode_status status;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ status = intel_panel_mode_valid(intel_connector, mode);
+ if (status != MODE_OK)
+ return status;
+
+ if (fixed_mode->clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
+
+ return intel_mode_valid_max_plane_size(dev_priv, mode, false);
+}
+
+struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+ const struct mipi_dsi_host_ops *funcs,
+ enum port port)
+{
+ struct intel_dsi_host *host;
+ struct mipi_dsi_device *device;
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return NULL;
+
+ host->base.ops = funcs;
+ host->intel_dsi = intel_dsi;
+ host->port = port;
+
+ /*
+ * We should call mipi_dsi_host_register(&host->base) here, but we don't
+ * have a host->dev, and we don't have OF stuff either. So just use the
+ * dsi framework as a library and hope for the best. Create the dsi
+ * devices by ourselves here too. Need to be careful though, because we
+ * don't initialize any of the driver model devices here.
+ */
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device) {
+ kfree(host);
+ return NULL;
+ }
+
+ device->host = &host->base;
+ host->device = device;
+
+ return host;
+}
+
+enum drm_panel_orientation
+intel_dsi_get_panel_orientation(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum drm_panel_orientation orientation;
+
+ orientation = connector->panel.vbt.dsi.orientation;
+ if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return orientation;
+
+ orientation = dev_priv->display.vbt.orientation;
+ if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return orientation;
+
+ return DRM_MODE_PANEL_ORIENTATION_NORMAL;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
new file mode 100644
index 000000000..ce80bd8be
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _INTEL_DSI_H
+#define _INTEL_DSI_H
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+
+#include "intel_display_types.h"
+
+#define INTEL_DSI_VIDEO_MODE 0
+#define INTEL_DSI_COMMAND_MODE 1
+
+/* Dual Link support */
+#define DSI_DUAL_LINK_NONE 0
+#define DSI_DUAL_LINK_FRONT_BACK 1
+#define DSI_DUAL_LINK_PIXEL_ALT 2
+
+struct intel_dsi_host;
+
+struct intel_dsi {
+ struct intel_encoder base;
+
+ struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
+ intel_wakeref_t io_wakeref[I915_MAX_PORTS];
+
+ /* GPIO Desc for panel and backlight control */
+ struct gpio_desc *gpio_panel;
+ struct gpio_desc *gpio_backlight;
+
+ struct intel_connector *attached_connector;
+
+ /* bit mask of ports (vlv dsi) or phys (icl dsi) being driven */
+ union {
+ u16 ports; /* VLV DSI */
+ u16 phys; /* ICL DSI */
+ };
+
+ /* if true, use HS mode, otherwise LP */
+ bool hs;
+
+ /* virtual channel */
+ int channel;
+
+ /* Video mode or command mode */
+ u16 operation_mode;
+
+ /* number of DSI lanes */
+ unsigned int lane_count;
+
+ /* i2c bus associated with the slave device */
+ int i2c_bus_num;
+
+ /*
+ * video mode pixel format
+ *
+ * XXX: consolidate on .format in struct mipi_dsi_device.
+ */
+ enum mipi_dsi_pixel_format pixel_format;
+
+ /* NON_BURST_SYNC_PULSE, NON_BURST_SYNC_EVENTS, or BURST_MODE */
+ int video_mode;
+
+ /* eot for MIPI_EOT_DISABLE register */
+ u8 eotp_pkt;
+ u8 clock_stop;
+
+ u8 escape_clk_div;
+ u8 dual_link;
+
+ /* RGB or BGR */
+ bool bgr_enabled;
+
+ u8 pixel_overlap;
+ u32 port_bits;
+ u32 bw_timer;
+ u32 dphy_reg;
+
+ /* data lanes dphy timing */
+ u32 dphy_data_lane_reg;
+ u32 video_frmt_cfg_bits;
+ u16 lp_byte_clk;
+
+ /* timeouts in byte clocks */
+ u16 hs_tx_timeout;
+ u16 lp_rx_timeout;
+ u16 turn_arnd_val;
+ u16 rst_timer_val;
+ u16 hs_to_lp_count;
+ u16 clk_lp_to_hs_count;
+ u16 clk_hs_to_lp_count;
+
+ u16 init_count;
+ u32 pclk;
+ u16 burst_mode_ratio;
+
+ /* all delays in ms */
+ u16 backlight_off_delay;
+ u16 backlight_on_delay;
+ u16 panel_on_delay;
+ u16 panel_off_delay;
+ u16 panel_pwr_cycle_delay;
+ ktime_t panel_power_off_time;
+};
+
+struct intel_dsi_host {
+ struct mipi_dsi_host base;
+ struct intel_dsi *intel_dsi;
+ enum port port;
+
+ /* our little hack */
+ struct mipi_dsi_device *device;
+};
+
+static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
+{
+ return container_of(h, struct intel_dsi_host, base);
+}
+
+#define for_each_dsi_port(__port, __ports_mask) \
+ for_each_port_masked(__port, __ports_mask)
+#define for_each_dsi_phy(__phy, __phys_mask) \
+ for_each_phy_masked(__phy, __phys_mask)
+
+static inline struct intel_dsi *enc_to_intel_dsi(struct intel_encoder *encoder)
+{
+ return container_of(&encoder->base, struct intel_dsi, base.base);
+}
+
+static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
+{
+ return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
+}
+
+static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
+{
+ return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
+}
+
+static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
+{
+ return enc_to_intel_dsi(encoder)->ports;
+}
+
+int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
+int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
+enum drm_panel_orientation
+intel_dsi_get_panel_orientation(struct intel_connector *connector);
+int intel_dsi_get_modes(struct drm_connector *connector);
+enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+ const struct mipi_dsi_host_ops *funcs,
+ enum port port);
+
+#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
new file mode 100644
index 000000000..20e466d84
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Deepak M <m.deepak at intel.com>
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include <video/mipi_display.h>
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_dsi.h"
+#include "intel_dsi_dcs_backlight.h"
+
+#define CONTROL_DISPLAY_BCTRL (1 << 5)
+#define CONTROL_DISPLAY_DD (1 << 3)
+#define CONTROL_DISPLAY_BL (1 << 2)
+
+#define POWER_SAVE_OFF (0 << 0)
+#define POWER_SAVE_LOW (1 << 0)
+#define POWER_SAVE_MEDIUM (2 << 0)
+#define POWER_SAVE_HIGH (3 << 0)
+#define POWER_SAVE_OUTDOOR_MODE (4 << 0)
+
+#define PANEL_PWM_MAX_VALUE 0xFF
+
+static u32 dcs_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_panel *panel = &connector->panel;
+ struct mipi_dsi_device *dsi_device;
+ u8 data[2] = {};
+ enum port port;
+ size_t len = panel->backlight.max > U8_MAX ? 2 : 1;
+
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
+ &data, len);
+ break;
+ }
+
+ return (data[1] << 8) | data[0];
+}
+
+static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
+ struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
+ struct mipi_dsi_device *dsi_device;
+ u8 data[2] = {};
+ enum port port;
+ size_t len = panel->backlight.max > U8_MAX ? 2 : 1;
+ unsigned long mode_flags;
+
+ if (len == 1) {
+ data[0] = level;
+ } else {
+ data[0] = level >> 8;
+ data[1] = level;
+ }
+
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mode_flags = dsi_device->mode_flags;
+ dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ &data, len);
+ dsi_device->mode_flags = mode_flags;
+ }
+}
+
+static void dcs_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
+ struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
+ struct mipi_dsi_device *dsi_device;
+ enum port port;
+
+ dcs_set_backlight(conn_state, 0);
+
+ for_each_dsi_port(port, panel->vbt.dsi.cabc_ports) {
+ u8 cabc = POWER_SAVE_OFF;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
+ &cabc, sizeof(cabc));
+ }
+
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
+ u8 ctrl = 0;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+
+ mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+
+ ctrl &= ~CONTROL_DISPLAY_BL;
+ ctrl &= ~CONTROL_DISPLAY_DD;
+ ctrl &= ~CONTROL_DISPLAY_BCTRL;
+
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+ }
+}
+
+static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
+ struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
+ struct mipi_dsi_device *dsi_device;
+ enum port port;
+
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
+ u8 ctrl = 0;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+
+ mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+
+ ctrl |= CONTROL_DISPLAY_BL;
+ ctrl |= CONTROL_DISPLAY_DD;
+ ctrl |= CONTROL_DISPLAY_BCTRL;
+
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+ }
+
+ for_each_dsi_port(port, panel->vbt.dsi.cabc_ports) {
+ u8 cabc = POWER_SAVE_MEDIUM;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
+ &cabc, sizeof(cabc));
+ }
+
+ dcs_set_backlight(conn_state, level);
+}
+
+static int dcs_setup_backlight(struct intel_connector *connector,
+ enum pipe unused)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->vbt.backlight.brightness_precision_bits > 8)
+ panel->backlight.max = (1 << panel->vbt.backlight.brightness_precision_bits) - 1;
+ else
+ panel->backlight.max = PANEL_PWM_MAX_VALUE;
+
+ panel->backlight.level = panel->backlight.max;
+
+ return 0;
+}
+
+static const struct intel_panel_bl_funcs dcs_bl_funcs = {
+ .setup = dcs_setup_backlight,
+ .enable = dcs_enable_backlight,
+ .disable = dcs_disable_backlight,
+ .set = dcs_set_backlight,
+ .get = dcs_get_backlight,
+};
+
+int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
+{
+ struct drm_device *dev = intel_connector->base.dev;
+ struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
+ struct intel_panel *panel = &intel_connector->panel;
+
+ if (panel->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
+ return -ENODEV;
+
+ if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI))
+ return -EINVAL;
+
+ panel->backlight.funcs = &dcs_bl_funcs;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.h b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.h
new file mode 100644
index 000000000..eb0194784
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DSI_DCS_BACKLIGHT_H__
+#define __INTEL_DSI_DCS_BACKLIGHT_H__
+
+struct intel_connector;
+
+int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
+
+#endif /* __INTEL_DSI_DCS_BACKLIGHT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
new file mode 100644
index 000000000..f102c13cb
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -0,0 +1,1037 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Shobhit Kumar <shobhit.kumar@intel.com>
+ *
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+
+#include <asm/unaligned.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include <video/mipi_display.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dsi.h"
+#include "intel_dsi_vbt.h"
+#include "intel_gmbus_regs.h"
+#include "vlv_dsi.h"
+#include "vlv_dsi_regs.h"
+#include "vlv_sideband.h"
+
+#define MIPI_TRANSFER_MODE_SHIFT 0
+#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
+#define MIPI_PORT_SHIFT 3
+
+/* base offsets for gpio pads */
+#define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130
+#define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
+#define VLV_GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
+#define VLV_GPIO_NC_3_PANEL0_VDDEN 0x4140
+#define VLV_GPIO_NC_4_PANEL0_BKLTEN 0x4150
+#define VLV_GPIO_NC_5_PANEL0_BKLTCTL 0x4160
+#define VLV_GPIO_NC_6_HV_DDI1_HPD 0x4180
+#define VLV_GPIO_NC_7_HV_DDI1_DDC_SDA 0x4190
+#define VLV_GPIO_NC_8_HV_DDI1_DDC_SCL 0x4170
+#define VLV_GPIO_NC_9_PANEL1_VDDEN 0x4100
+#define VLV_GPIO_NC_10_PANEL1_BKLTEN 0x40E0
+#define VLV_GPIO_NC_11_PANEL1_BKLTCTL 0x40F0
+
+#define VLV_GPIO_PCONF0(base_offset) (base_offset)
+#define VLV_GPIO_PAD_VAL(base_offset) ((base_offset) + 8)
+
+struct gpio_map {
+ u16 base_offset;
+ bool init;
+};
+
+static struct gpio_map vlv_gpio_table[] = {
+ { VLV_GPIO_NC_0_HV_DDI0_HPD },
+ { VLV_GPIO_NC_1_HV_DDI0_DDC_SDA },
+ { VLV_GPIO_NC_2_HV_DDI0_DDC_SCL },
+ { VLV_GPIO_NC_3_PANEL0_VDDEN },
+ { VLV_GPIO_NC_4_PANEL0_BKLTEN },
+ { VLV_GPIO_NC_5_PANEL0_BKLTCTL },
+ { VLV_GPIO_NC_6_HV_DDI1_HPD },
+ { VLV_GPIO_NC_7_HV_DDI1_DDC_SDA },
+ { VLV_GPIO_NC_8_HV_DDI1_DDC_SCL },
+ { VLV_GPIO_NC_9_PANEL1_VDDEN },
+ { VLV_GPIO_NC_10_PANEL1_BKLTEN },
+ { VLV_GPIO_NC_11_PANEL1_BKLTCTL },
+};
+
+struct i2c_adapter_lookup {
+ u16 slave_addr;
+ struct intel_dsi *intel_dsi;
+ acpi_handle dev_handle;
+};
+
+#define CHV_GPIO_IDX_START_N 0
+#define CHV_GPIO_IDX_START_E 73
+#define CHV_GPIO_IDX_START_SW 100
+#define CHV_GPIO_IDX_START_SE 198
+
+#define CHV_VBT_MAX_PINS_PER_FMLY 15
+
+#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8)
+#define CHV_GPIO_GPIOEN (1 << 15)
+#define CHV_GPIO_GPIOCFG_GPIO (0 << 8)
+#define CHV_GPIO_GPIOCFG_GPO (1 << 8)
+#define CHV_GPIO_GPIOCFG_GPI (2 << 8)
+#define CHV_GPIO_GPIOCFG_HIZ (3 << 8)
+#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1)
+
+#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
+#define CHV_GPIO_CFGLOCK (1 << 31)
+
+/* ICL DSI Display GPIO Pins */
+#define ICL_GPIO_DDSP_HPD_A 0
+#define ICL_GPIO_L_VDDEN_1 1
+#define ICL_GPIO_L_BKLTEN_1 2
+#define ICL_GPIO_DDPA_CTRLCLK_1 3
+#define ICL_GPIO_DDPA_CTRLDATA_1 4
+#define ICL_GPIO_DDSP_HPD_B 5
+#define ICL_GPIO_L_VDDEN_2 6
+#define ICL_GPIO_L_BKLTEN_2 7
+#define ICL_GPIO_DDPA_CTRLCLK_2 8
+#define ICL_GPIO_DDPA_CTRLDATA_2 9
+
+static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi,
+ u8 seq_port)
+{
+ /*
+ * If single link DSI is being used on any port, the VBT sequence block
+ * send packet apparently always has 0 for the port. Just use the port
+ * we have configured, and ignore the sequence block port.
+ */
+ if (hweight8(intel_dsi->ports) == 1)
+ return ffs(intel_dsi->ports) - 1;
+
+ if (seq_port) {
+ if (intel_dsi->ports & BIT(PORT_B))
+ return PORT_B;
+ else if (intel_dsi->ports & BIT(PORT_C))
+ return PORT_C;
+ }
+
+ return PORT_A;
+}
+
+static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
+ const u8 *data)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct mipi_dsi_device *dsi_device;
+ u8 type, flags, seq_port;
+ u16 len;
+ enum port port;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ flags = *data++;
+ type = *data++;
+
+ len = *((u16 *) data);
+ data += 2;
+
+ seq_port = (flags >> MIPI_PORT_SHIFT) & 3;
+
+ port = intel_dsi_seq_port_to_port(intel_dsi, seq_port);
+
+ if (drm_WARN_ON(&dev_priv->drm, !intel_dsi->dsi_hosts[port]))
+ goto out;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ if (!dsi_device) {
+ drm_dbg_kms(&dev_priv->drm, "no dsi device for port %c\n",
+ port_name(port));
+ goto out;
+ }
+
+ if ((flags >> MIPI_TRANSFER_MODE_SHIFT) & 1)
+ dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM;
+ else
+ dsi_device->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ dsi_device->channel = (flags >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 3;
+
+ switch (type) {
+ case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+ mipi_dsi_generic_write(dsi_device, NULL, 0);
+ break;
+ case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+ mipi_dsi_generic_write(dsi_device, data, 1);
+ break;
+ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+ mipi_dsi_generic_write(dsi_device, data, 2);
+ break;
+ case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+ drm_dbg(&dev_priv->drm,
+ "Generic Read not yet implemented or used\n");
+ break;
+ case MIPI_DSI_GENERIC_LONG_WRITE:
+ mipi_dsi_generic_write(dsi_device, data, len);
+ break;
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ mipi_dsi_dcs_write_buffer(dsi_device, data, 1);
+ break;
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
+ break;
+ case MIPI_DSI_DCS_READ:
+ drm_dbg(&dev_priv->drm,
+ "DCS Read not yet implemented or used\n");
+ break;
+ case MIPI_DSI_DCS_LONG_WRITE:
+ mipi_dsi_dcs_write_buffer(dsi_device, data, len);
+ break;
+ }
+
+ if (DISPLAY_VER(dev_priv) < 11)
+ vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
+
+out:
+ data += len;
+
+ return data;
+}
+
+static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ u32 delay = *((const u32 *) data);
+
+ drm_dbg_kms(&i915->drm, "\n");
+
+ usleep_range(delay, delay + 10);
+ data += 4;
+
+ return data;
+}
+
+static void vlv_exec_gpio(struct intel_connector *connector,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct gpio_map *map;
+ u16 pconf0, padval;
+ u32 tmp;
+ u8 port;
+
+ if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
+ drm_dbg_kms(&dev_priv->drm, "unknown gpio index %u\n",
+ gpio_index);
+ return;
+ }
+
+ map = &vlv_gpio_table[gpio_index];
+
+ if (connector->panel.vbt.dsi.seq_version >= 3) {
+ /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
+ port = IOSF_PORT_GPIO_NC;
+ } else {
+ if (gpio_source == 0) {
+ port = IOSF_PORT_GPIO_NC;
+ } else if (gpio_source == 1) {
+ drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n");
+ return;
+ } else {
+ drm_dbg_kms(&dev_priv->drm,
+ "unknown gpio source %u\n", gpio_source);
+ return;
+ }
+ }
+
+ pconf0 = VLV_GPIO_PCONF0(map->base_offset);
+ padval = VLV_GPIO_PAD_VAL(map->base_offset);
+
+ vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
+ if (!map->init) {
+ /* FIXME: remove constant below */
+ vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00);
+ map->init = true;
+ }
+
+ tmp = 0x4 | value;
+ vlv_iosf_sb_write(dev_priv, port, padval, tmp);
+ vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
+}
+
+static void chv_exec_gpio(struct intel_connector *connector,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u16 cfg0, cfg1;
+ u16 family_num;
+ u8 port;
+
+ if (connector->panel.vbt.dsi.seq_version >= 3) {
+ if (gpio_index >= CHV_GPIO_IDX_START_SE) {
+ /* XXX: it's unclear whether 255->57 is part of SE. */
+ gpio_index -= CHV_GPIO_IDX_START_SE;
+ port = CHV_IOSF_PORT_GPIO_SE;
+ } else if (gpio_index >= CHV_GPIO_IDX_START_SW) {
+ gpio_index -= CHV_GPIO_IDX_START_SW;
+ port = CHV_IOSF_PORT_GPIO_SW;
+ } else if (gpio_index >= CHV_GPIO_IDX_START_E) {
+ gpio_index -= CHV_GPIO_IDX_START_E;
+ port = CHV_IOSF_PORT_GPIO_E;
+ } else {
+ port = CHV_IOSF_PORT_GPIO_N;
+ }
+ } else {
+ /* XXX: The spec is unclear about CHV GPIO on seq v2 */
+ if (gpio_source != 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "unknown gpio source %u\n", gpio_source);
+ return;
+ }
+
+ if (gpio_index >= CHV_GPIO_IDX_START_E) {
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid gpio index %u for GPIO N\n",
+ gpio_index);
+ return;
+ }
+
+ port = CHV_IOSF_PORT_GPIO_N;
+ }
+
+ family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY;
+ gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY;
+
+ cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
+ cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
+
+ vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
+ vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
+ vlv_iosf_sb_write(dev_priv, port, cfg0,
+ CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
+ CHV_GPIO_GPIOTXSTATE(value));
+ vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
+}
+
+static void bxt_exec_gpio(struct intel_connector *connector,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ /* XXX: this table is a quick ugly hack. */
+ static struct gpio_desc *bxt_gpio_table[U8_MAX + 1];
+ struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index];
+
+ if (!gpio_desc) {
+ gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
+ NULL, gpio_index,
+ value ? GPIOD_OUT_LOW :
+ GPIOD_OUT_HIGH);
+
+ if (IS_ERR_OR_NULL(gpio_desc)) {
+ drm_err(&dev_priv->drm,
+ "GPIO index %u request failed (%ld)\n",
+ gpio_index, PTR_ERR(gpio_desc));
+ return;
+ }
+
+ bxt_gpio_table[gpio_index] = gpio_desc;
+ }
+
+ gpiod_set_value(gpio_desc, value);
+}
+
+static void icl_exec_gpio(struct intel_connector *connector,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
+}
+
+enum {
+ MIPI_RESET_1 = 0,
+ MIPI_AVDD_EN_1,
+ MIPI_BKLT_EN_1,
+ MIPI_AVEE_EN_1,
+ MIPI_VIO_EN_1,
+ MIPI_RESET_2,
+ MIPI_AVDD_EN_2,
+ MIPI_BKLT_EN_2,
+ MIPI_AVEE_EN_2,
+ MIPI_VIO_EN_2,
+};
+
+static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
+ int gpio, bool value)
+{
+ int index;
+
+ if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2))
+ return;
+
+ switch (gpio) {
+ case MIPI_RESET_1:
+ case MIPI_RESET_2:
+ index = gpio == MIPI_RESET_1 ? HPD_PORT_A : HPD_PORT_B;
+
+ /*
+ * Disable HPD to set the pin to output, and set output
+ * value. The HPD pin should not be enabled for DSI anyway,
+ * assuming the board design and VBT are sane, and the pin isn't
+ * used by a non-DSI encoder.
+ *
+ * The locking protects against concurrent SHOTPLUG_CTL_DDI
+ * modifications in irq setup and handling.
+ */
+ spin_lock_irq(&dev_priv->irq_lock);
+ intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI,
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(index) |
+ SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index),
+ value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0);
+ spin_unlock_irq(&dev_priv->irq_lock);
+ break;
+ case MIPI_AVDD_EN_1:
+ case MIPI_AVDD_EN_2:
+ index = gpio == MIPI_AVDD_EN_1 ? 0 : 1;
+
+ intel_de_rmw(dev_priv, PP_CONTROL(index), PANEL_POWER_ON,
+ value ? PANEL_POWER_ON : 0);
+ break;
+ case MIPI_BKLT_EN_1:
+ case MIPI_BKLT_EN_2:
+ index = gpio == MIPI_BKLT_EN_1 ? 0 : 1;
+
+ intel_de_rmw(dev_priv, PP_CONTROL(index), EDP_BLC_ENABLE,
+ value ? EDP_BLC_ENABLE : 0);
+ break;
+ case MIPI_AVEE_EN_1:
+ case MIPI_AVEE_EN_2:
+ index = gpio == MIPI_AVEE_EN_1 ? 1 : 2;
+
+ intel_de_rmw(dev_priv, GPIO(dev_priv, index),
+ GPIO_CLOCK_VAL_OUT,
+ GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_DIR_OUT |
+ GPIO_CLOCK_VAL_MASK | (value ? GPIO_CLOCK_VAL_OUT : 0));
+ break;
+ case MIPI_VIO_EN_1:
+ case MIPI_VIO_EN_2:
+ index = gpio == MIPI_VIO_EN_1 ? 1 : 2;
+
+ intel_de_rmw(dev_priv, GPIO(dev_priv, index),
+ GPIO_DATA_VAL_OUT,
+ GPIO_DATA_DIR_MASK | GPIO_DATA_DIR_OUT |
+ GPIO_DATA_VAL_MASK | (value ? GPIO_DATA_VAL_OUT : 0));
+ break;
+ default:
+ MISSING_CASE(gpio);
+ }
+}
+
+static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ u8 gpio_source, gpio_index = 0, gpio_number;
+ bool value;
+ bool native = DISPLAY_VER(dev_priv) >= 11;
+
+ if (connector->panel.vbt.dsi.seq_version >= 3)
+ gpio_index = *data++;
+
+ gpio_number = *data++;
+
+ /* gpio source in sequence v2 only */
+ if (connector->panel.vbt.dsi.seq_version == 2)
+ gpio_source = (*data >> 1) & 3;
+ else
+ gpio_source = 0;
+
+ if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1))
+ native = false;
+
+ /* pull up/down */
+ value = *data++ & 1;
+
+ drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
+ gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));
+
+ if (native)
+ icl_native_gpio_set_value(dev_priv, gpio_number, value);
+ else if (DISPLAY_VER(dev_priv) >= 11)
+ icl_exec_gpio(connector, gpio_source, gpio_index, value);
+ else if (IS_VALLEYVIEW(dev_priv))
+ vlv_exec_gpio(connector, gpio_source, gpio_number, value);
+ else if (IS_CHERRYVIEW(dev_priv))
+ chv_exec_gpio(connector, gpio_source, gpio_number, value);
+ else
+ bxt_exec_gpio(connector, gpio_source, gpio_index, value);
+
+ return data;
+}
+
+#ifdef CONFIG_ACPI
+static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
+{
+ struct i2c_adapter_lookup *lookup = data;
+ struct intel_dsi *intel_dsi = lookup->intel_dsi;
+ struct acpi_resource_i2c_serialbus *sb;
+ struct i2c_adapter *adapter;
+ acpi_handle adapter_handle;
+ acpi_status status;
+
+ if (!i2c_acpi_get_i2c_resource(ares, &sb))
+ return 1;
+
+ if (lookup->slave_addr != sb->slave_address)
+ return 1;
+
+ status = acpi_get_handle(lookup->dev_handle,
+ sb->resource_source.string_ptr,
+ &adapter_handle);
+ if (ACPI_FAILURE(status))
+ return 1;
+
+ adapter = i2c_acpi_find_adapter_by_handle(adapter_handle);
+ if (adapter)
+ intel_dsi->i2c_bus_num = adapter->nr;
+
+ return 1;
+}
+
+static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
+ const u16 slave_addr)
+{
+ struct drm_device *drm_dev = intel_dsi->base.base.dev;
+ struct acpi_device *adev = ACPI_COMPANION(drm_dev->dev);
+ struct i2c_adapter_lookup lookup = {
+ .slave_addr = slave_addr,
+ .intel_dsi = intel_dsi,
+ .dev_handle = acpi_device_handle(adev),
+ };
+ LIST_HEAD(resource_list);
+
+ acpi_dev_get_resources(adev, &resource_list, i2c_adapter_lookup, &lookup);
+ acpi_dev_free_resource_list(&resource_list);
+}
+#else
+static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
+ const u16 slave_addr)
+{
+}
+#endif
+
+static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct i2c_adapter *adapter;
+ struct i2c_msg msg;
+ int ret;
+ u8 vbt_i2c_bus_num = *(data + 2);
+ u16 slave_addr = *(u16 *)(data + 3);
+ u8 reg_offset = *(data + 5);
+ u8 payload_size = *(data + 6);
+ u8 *payload_data;
+
+ if (intel_dsi->i2c_bus_num < 0) {
+ intel_dsi->i2c_bus_num = vbt_i2c_bus_num;
+ i2c_acpi_find_adapter(intel_dsi, slave_addr);
+ }
+
+ adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
+ if (!adapter) {
+ drm_err(&i915->drm, "Cannot find a valid i2c bus for xfer\n");
+ goto err_bus;
+ }
+
+ payload_data = kzalloc(payload_size + 1, GFP_KERNEL);
+ if (!payload_data)
+ goto err_alloc;
+
+ payload_data[0] = reg_offset;
+ memcpy(&payload_data[1], (data + 7), payload_size);
+
+ msg.addr = slave_addr;
+ msg.flags = 0;
+ msg.len = payload_size + 1;
+ msg.buf = payload_data;
+
+ ret = i2c_transfer(adapter, &msg, 1);
+ if (ret < 0)
+ drm_err(&i915->drm,
+ "Failed to xfer payload of size (%u) to reg (%u)\n",
+ payload_size, reg_offset);
+
+ kfree(payload_data);
+err_alloc:
+ i2c_put_adapter(adapter);
+err_bus:
+ return data + payload_size + 7;
+}
+
+static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+
+ drm_dbg_kms(&i915->drm, "Skipping SPI element execution\n");
+
+ return data + *(data + 5) + 6;
+}
+
+static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+#ifdef CONFIG_PMIC_OPREGION
+ u32 value, mask, reg_address;
+ u16 i2c_address;
+ int ret;
+
+ /* byte 0 aka PMIC Flag is reserved */
+ i2c_address = get_unaligned_le16(data + 1);
+ reg_address = get_unaligned_le32(data + 3);
+ value = get_unaligned_le32(data + 7);
+ mask = get_unaligned_le32(data + 11);
+
+ ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_address,
+ reg_address,
+ value, mask);
+ if (ret)
+ drm_err(&i915->drm, "%s failed, error: %d\n", __func__, ret);
+#else
+ drm_err(&i915->drm,
+ "Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
+#endif
+
+ return data + 15;
+}
+
+typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
+ const u8 *data);
+static const fn_mipi_elem_exec exec_elem[] = {
+ [MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet,
+ [MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay,
+ [MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio,
+ [MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c,
+ [MIPI_SEQ_ELEM_SPI] = mipi_exec_spi,
+ [MIPI_SEQ_ELEM_PMIC] = mipi_exec_pmic,
+};
+
+/*
+ * MIPI Sequence from VBT #53 parsing logic
+ * We have already separated each seqence during bios parsing
+ * Following is generic execution function for any sequence
+ */
+
+static const char * const seq_name[] = {
+ [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
+ [MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
+ [MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
+ [MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF",
+ [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
+ [MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON",
+ [MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF",
+ [MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
+ [MIPI_SEQ_TEAR_OFF] = "MIPI_SEQ_TEAR_OFF",
+ [MIPI_SEQ_POWER_ON] = "MIPI_SEQ_POWER_ON",
+ [MIPI_SEQ_POWER_OFF] = "MIPI_SEQ_POWER_OFF",
+};
+
+static const char *sequence_name(enum mipi_seq seq_id)
+{
+ if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id])
+ return seq_name[seq_id];
+ else
+ return "(unknown)";
+}
+
+static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
+ enum mipi_seq seq_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ const u8 *data;
+ fn_mipi_elem_exec mipi_elem_exec;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ seq_id >= ARRAY_SIZE(connector->panel.vbt.dsi.sequence)))
+ return;
+
+ data = connector->panel.vbt.dsi.sequence[seq_id];
+ if (!data)
+ return;
+
+ drm_WARN_ON(&dev_priv->drm, *data != seq_id);
+
+ drm_dbg_kms(&dev_priv->drm, "Starting MIPI sequence %d - %s\n",
+ seq_id, sequence_name(seq_id));
+
+ /* Skip Sequence Byte. */
+ data++;
+
+ /* Skip Size of Sequence. */
+ if (connector->panel.vbt.dsi.seq_version >= 3)
+ data += 4;
+
+ while (1) {
+ u8 operation_byte = *data++;
+ u8 operation_size = 0;
+
+ if (operation_byte == MIPI_SEQ_ELEM_END)
+ break;
+
+ if (operation_byte < ARRAY_SIZE(exec_elem))
+ mipi_elem_exec = exec_elem[operation_byte];
+ else
+ mipi_elem_exec = NULL;
+
+ /* Size of Operation. */
+ if (connector->panel.vbt.dsi.seq_version >= 3)
+ operation_size = *data++;
+
+ if (mipi_elem_exec) {
+ const u8 *next = data + operation_size;
+
+ data = mipi_elem_exec(intel_dsi, data);
+
+ /* Consistency check if we have size. */
+ if (operation_size && data != next) {
+ drm_err(&dev_priv->drm,
+ "Inconsistent operation size\n");
+ return;
+ }
+ } else if (operation_size) {
+ /* We have size, skip. */
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported MIPI operation byte %u\n",
+ operation_byte);
+ data += operation_size;
+ } else {
+ /* No size, can't skip without parsing. */
+ drm_err(&dev_priv->drm,
+ "Unsupported MIPI operation byte %u\n",
+ operation_byte);
+ return;
+ }
+ }
+}
+
+void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
+ enum mipi_seq seq_id)
+{
+ if (seq_id == MIPI_SEQ_POWER_ON && intel_dsi->gpio_panel)
+ gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
+ if (seq_id == MIPI_SEQ_BACKLIGHT_ON && intel_dsi->gpio_backlight)
+ gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 1);
+
+ intel_dsi_vbt_exec(intel_dsi, seq_id);
+
+ if (seq_id == MIPI_SEQ_POWER_OFF && intel_dsi->gpio_panel)
+ gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
+ if (seq_id == MIPI_SEQ_BACKLIGHT_OFF && intel_dsi->gpio_backlight)
+ gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 0);
+}
+
+void intel_dsi_log_params(struct intel_dsi *intel_dsi)
+{
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+
+ drm_dbg_kms(&i915->drm, "Pclk %d\n", intel_dsi->pclk);
+ drm_dbg_kms(&i915->drm, "Pixel overlap %d\n",
+ intel_dsi->pixel_overlap);
+ drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count);
+ drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
+ drm_dbg_kms(&i915->drm, "Video mode format %s\n",
+ intel_dsi->video_mode == NON_BURST_SYNC_PULSE ?
+ "non-burst with sync pulse" :
+ intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ?
+ "non-burst with sync events" :
+ intel_dsi->video_mode == BURST_MODE ?
+ "burst" : "<unknown>");
+ drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n",
+ intel_dsi->burst_mode_ratio);
+ drm_dbg_kms(&i915->drm, "Reset timer %d\n", intel_dsi->rst_timer_val);
+ drm_dbg_kms(&i915->drm, "Eot %s\n",
+ str_enabled_disabled(intel_dsi->eotp_pkt));
+ drm_dbg_kms(&i915->drm, "Clockstop %s\n",
+ str_enabled_disabled(!intel_dsi->clock_stop));
+ drm_dbg_kms(&i915->drm, "Mode %s\n",
+ intel_dsi->operation_mode ? "command" : "video");
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ drm_dbg_kms(&i915->drm,
+ "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+ else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
+ drm_dbg_kms(&i915->drm,
+ "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+ else
+ drm_dbg_kms(&i915->drm, "Dual link: NONE\n");
+ drm_dbg_kms(&i915->drm, "Pixel Format %d\n", intel_dsi->pixel_format);
+ drm_dbg_kms(&i915->drm, "TLPX %d\n", intel_dsi->escape_clk_div);
+ drm_dbg_kms(&i915->drm, "LP RX Timeout 0x%x\n",
+ intel_dsi->lp_rx_timeout);
+ drm_dbg_kms(&i915->drm, "Turnaround Timeout 0x%x\n",
+ intel_dsi->turn_arnd_val);
+ drm_dbg_kms(&i915->drm, "Init Count 0x%x\n", intel_dsi->init_count);
+ drm_dbg_kms(&i915->drm, "HS to LP Count 0x%x\n",
+ intel_dsi->hs_to_lp_count);
+ drm_dbg_kms(&i915->drm, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
+ drm_dbg_kms(&i915->drm, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
+ drm_dbg_kms(&i915->drm, "LP to HS Clock Count 0x%x\n",
+ intel_dsi->clk_lp_to_hs_count);
+ drm_dbg_kms(&i915->drm, "HS to LP Clock Count 0x%x\n",
+ intel_dsi->clk_hs_to_lp_count);
+ drm_dbg_kms(&i915->drm, "BTA %s\n",
+ str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
+}
+
+bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
+ struct mipi_pps_data *pps = connector->panel.vbt.dsi.pps;
+ struct drm_display_mode *mode = connector->panel.vbt.lfp_lvds_vbt_mode;
+ u16 burst_mode_ratio;
+ enum port port;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
+ intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
+ intel_dsi->lane_count = mipi_config->lane_cnt + 1;
+ intel_dsi->pixel_format =
+ pixel_format_from_register_bits(
+ mipi_config->videomode_color_format << 7);
+
+ intel_dsi->dual_link = mipi_config->dual_link;
+ intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
+ intel_dsi->operation_mode = mipi_config->is_cmd_mode;
+ intel_dsi->video_mode = mipi_config->video_transfer_mode;
+ intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
+ intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
+ intel_dsi->hs_tx_timeout = mipi_config->hs_tx_timeout;
+ intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
+ intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
+ intel_dsi->init_count = mipi_config->master_init_timer;
+ intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
+ intel_dsi->video_frmt_cfg_bits =
+ mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+ intel_dsi->bgr_enabled = mipi_config->rgb_flip;
+
+ /* Starting point, adjusted depending on dual link and burst mode */
+ intel_dsi->pclk = mode->clock;
+
+ /* In dual link mode each port needs half of pixel clock */
+ if (intel_dsi->dual_link) {
+ intel_dsi->pclk /= 2;
+
+ /* we can enable pixel_overlap if needed by panel. In this
+ * case we need to increase the pixelclock for extra pixels
+ */
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ intel_dsi->pclk += DIV_ROUND_UP(mode->vtotal * intel_dsi->pixel_overlap * 60, 1000);
+ }
+ }
+
+ /* Burst Mode Ratio
+ * Target ddr frequency from VBT / non burst ddr freq
+ * multiply by 100 to preserve remainder
+ */
+ if (intel_dsi->video_mode == BURST_MODE) {
+ if (mipi_config->target_burst_mode_freq) {
+ u32 bitrate = intel_dsi_bitrate(intel_dsi);
+
+ /*
+ * Sometimes the VBT contains a slightly lower clock,
+ * then the bitrate we have calculated, in this case
+ * just replace it with the calculated bitrate.
+ */
+ if (mipi_config->target_burst_mode_freq < bitrate &&
+ intel_fuzzy_clock_check(
+ mipi_config->target_burst_mode_freq,
+ bitrate))
+ mipi_config->target_burst_mode_freq = bitrate;
+
+ if (mipi_config->target_burst_mode_freq < bitrate) {
+ drm_err(&dev_priv->drm,
+ "Burst mode freq is less than computed\n");
+ return false;
+ }
+
+ burst_mode_ratio = DIV_ROUND_UP(
+ mipi_config->target_burst_mode_freq * 100,
+ bitrate);
+
+ intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
+ } else {
+ drm_err(&dev_priv->drm,
+ "Burst mode target is not set\n");
+ return false;
+ }
+ } else
+ burst_mode_ratio = 100;
+
+ intel_dsi->burst_mode_ratio = burst_mode_ratio;
+
+ /* delays in VBT are in unit of 100us, so need to convert
+ * here in ms
+ * Delay (100us) * 100 /1000 = Delay / 10 (ms) */
+ intel_dsi->backlight_off_delay = pps->bl_disable_delay / 10;
+ intel_dsi->backlight_on_delay = pps->bl_enable_delay / 10;
+ intel_dsi->panel_on_delay = pps->panel_on_delay / 10;
+ intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
+ intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
+
+ intel_dsi->i2c_bus_num = -1;
+
+ /* a regular driver would get the device in probe */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device);
+ }
+
+ return true;
+}
+
+/*
+ * On some BYT/CHT devs some sequences are incomplete and we need to manually
+ * control some GPIOs. We need to add a GPIO lookup table before we get these.
+ * If the GOP did not initialize the panel (HDMI inserted) we may need to also
+ * change the pinmux for the SoC's PWM0 pin from GPIO to PWM.
+ */
+static struct gpiod_lookup_table pmic_panel_gpio_table = {
+ /* Intel GFX is consumer */
+ .dev_id = "0000:00:02.0",
+ .table = {
+ /* Panel EN/DISABLE */
+ GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table soc_panel_gpio_table = {
+ .dev_id = "0000:00:02.0",
+ .table = {
+ GPIO_LOOKUP("INT33FC:01", 10, "backlight", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:01", 11, "panel", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static const struct pinctrl_map soc_pwm_pinctrl_map[] = {
+ PIN_MAP_MUX_GROUP("0000:00:02.0", "soc_pwm0", "INT33FC:00",
+ "pwm0_grp", "pwm"),
+};
+
+void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
+ enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ bool want_backlight_gpio = false;
+ bool want_panel_gpio = false;
+ struct pinctrl *pinctrl;
+ int ret;
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ mipi_config->pwm_blc == PPS_BLC_PMIC) {
+ gpiod_add_lookup_table(&pmic_panel_gpio_table);
+ want_panel_gpio = true;
+ }
+
+ if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
+ gpiod_add_lookup_table(&soc_panel_gpio_table);
+ want_panel_gpio = true;
+ want_backlight_gpio = true;
+
+ /* Ensure PWM0 pin is muxed as PWM instead of GPIO */
+ ret = pinctrl_register_mappings(soc_pwm_pinctrl_map,
+ ARRAY_SIZE(soc_pwm_pinctrl_map));
+ if (ret)
+ drm_err(&dev_priv->drm,
+ "Failed to register pwm0 pinmux mapping\n");
+
+ pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0");
+ if (IS_ERR(pinctrl))
+ drm_err(&dev_priv->drm,
+ "Failed to set pinmux to PWM\n");
+ }
+
+ if (want_panel_gpio) {
+ intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags);
+ if (IS_ERR(intel_dsi->gpio_panel)) {
+ drm_err(&dev_priv->drm,
+ "Failed to own gpio for panel control\n");
+ intel_dsi->gpio_panel = NULL;
+ }
+ }
+
+ if (want_backlight_gpio) {
+ intel_dsi->gpio_backlight =
+ gpiod_get(dev->dev, "backlight", flags);
+ if (IS_ERR(intel_dsi->gpio_backlight)) {
+ drm_err(&dev_priv->drm,
+ "Failed to own gpio for backlight control\n");
+ intel_dsi->gpio_backlight = NULL;
+ }
+ }
+}
+
+void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
+
+ if (intel_dsi->gpio_panel) {
+ gpiod_put(intel_dsi->gpio_panel);
+ intel_dsi->gpio_panel = NULL;
+ }
+
+ if (intel_dsi->gpio_backlight) {
+ gpiod_put(intel_dsi->gpio_backlight);
+ intel_dsi->gpio_backlight = NULL;
+ }
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ mipi_config->pwm_blc == PPS_BLC_PMIC)
+ gpiod_remove_lookup_table(&pmic_panel_gpio_table);
+
+ if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
+ pinctrl_unregister_mappings(soc_pwm_pinctrl_map);
+ gpiod_remove_lookup_table(&soc_panel_gpio_table);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
new file mode 100644
index 000000000..468d873fa
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DSI_VBT_H__
+#define __INTEL_DSI_VBT_H__
+
+#include <linux/types.h>
+
+enum mipi_seq;
+struct intel_dsi;
+
+bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
+void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
+void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
+void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
+ enum mipi_seq seq_id);
+void intel_dsi_log_params(struct intel_dsi *intel_dsi);
+
+#endif /* __INTEL_DSI_VBT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
new file mode 100644
index 000000000..511c58907
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -0,0 +1,550 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+
+#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dvo.h"
+#include "intel_dvo_dev.h"
+#include "intel_gmbus.h"
+#include "intel_panel.h"
+
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+#define INTEL_DVO_CHIP_LVDS_NO_FIXED 5
+
+#define SIL164_ADDR 0x38
+#define CH7xxx_ADDR 0x76
+#define TFP410_ADDR 0x38
+#define NS2501_ADDR 0x38
+
+static const struct intel_dvo_device intel_dvo_devices[] = {
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "sil164",
+ .dvo_reg = DVOC,
+ .dvo_srcdim_reg = DVOC_SRCDIM,
+ .slave_addr = SIL164_ADDR,
+ .dev_ops = &sil164_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "ch7xxx",
+ .dvo_reg = DVOC,
+ .dvo_srcdim_reg = DVOC_SRCDIM,
+ .slave_addr = CH7xxx_ADDR,
+ .dev_ops = &ch7xxx_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "ch7xxx",
+ .dvo_reg = DVOC,
+ .dvo_srcdim_reg = DVOC_SRCDIM,
+ .slave_addr = 0x75, /* For some ch7010 */
+ .dev_ops = &ch7xxx_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_LVDS,
+ .name = "ivch",
+ .dvo_reg = DVOA,
+ .dvo_srcdim_reg = DVOA_SRCDIM,
+ .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
+ .dev_ops = &ivch_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "tfp410",
+ .dvo_reg = DVOC,
+ .dvo_srcdim_reg = DVOC_SRCDIM,
+ .slave_addr = TFP410_ADDR,
+ .dev_ops = &tfp410_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_LVDS,
+ .name = "ch7017",
+ .dvo_reg = DVOC,
+ .dvo_srcdim_reg = DVOC_SRCDIM,
+ .slave_addr = 0x75,
+ .gpio = GMBUS_PIN_DPB,
+ .dev_ops = &ch7017_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_LVDS_NO_FIXED,
+ .name = "ns2501",
+ .dvo_reg = DVOB,
+ .dvo_srcdim_reg = DVOB_SRCDIM,
+ .slave_addr = NS2501_ADDR,
+ .dev_ops = &ns2501_ops,
+ },
+};
+
+struct intel_dvo {
+ struct intel_encoder base;
+
+ struct intel_dvo_device dev;
+
+ struct intel_connector *attached_connector;
+
+ bool panel_wants_dither;
+};
+
+static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
+{
+ return container_of(encoder, struct intel_dvo, base);
+}
+
+static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector)
+{
+ return enc_to_dvo(intel_attached_encoder(connector));
+}
+
+static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
+
+ if (!(tmp & DVO_ENABLE))
+ return false;
+
+ return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
+}
+
+static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
+
+ *pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT;
+
+ return tmp & DVO_ENABLE;
+}
+
+static void intel_dvo_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ u32 tmp, flags = 0;
+
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
+
+ tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
+ if (tmp & DVO_HSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (tmp & DVO_VSYNC_ACTIVE_HIGH)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ pipe_config->hw.adjusted_mode.flags |= flags;
+
+ pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
+static void intel_disable_dvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
+ u32 temp = intel_de_read(dev_priv, dvo_reg);
+
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+ intel_de_write(dev_priv, dvo_reg, temp & ~DVO_ENABLE);
+ intel_de_read(dev_priv, dvo_reg);
+}
+
+static void intel_enable_dvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
+ u32 temp = intel_de_read(dev_priv, dvo_reg);
+
+ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+ &pipe_config->hw.mode,
+ &pipe_config->hw.adjusted_mode);
+
+ intel_de_write(dev_priv, dvo_reg, temp | DVO_ENABLE);
+ intel_de_read(dev_priv, dvo_reg);
+
+ intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+}
+
+static enum drm_mode_status
+intel_dvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(intel_connector);
+ struct drm_i915_private *i915 = to_i915(intel_connector->base.dev);
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(intel_connector, mode);
+ int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int target_clock = mode->clock;
+ enum drm_mode_status status;
+
+ status = intel_cpu_transcoder_mode_valid(i915, mode);
+ if (status != MODE_OK)
+ return status;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* XXX: Validate clock range */
+
+ if (fixed_mode) {
+ enum drm_mode_status status;
+
+ status = intel_panel_mode_valid(intel_connector, mode);
+ if (status != MODE_OK)
+ return status;
+
+ target_clock = fixed_mode->clock;
+ }
+
+ if (target_clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
+
+ return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
+}
+
+static int intel_dvo_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(intel_dvo->attached_connector, adjusted_mode);
+
+ /*
+ * If we have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ if (fixed_mode) {
+ int ret;
+
+ ret = intel_panel_compute_config(connector, adjusted_mode);
+ if (ret)
+ return ret;
+ }
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return -EINVAL;
+
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
+ return 0;
+}
+
+static void intel_dvo_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ enum pipe pipe = crtc->pipe;
+ u32 dvo_val;
+ i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
+ i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
+
+ /* Save the data order, since I don't know what it should be set to. */
+ dvo_val = intel_de_read(dev_priv, dvo_reg) &
+ (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
+ dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
+ DVO_BLANK_ACTIVE_HIGH;
+
+ dvo_val |= DVO_PIPE_SEL(pipe);
+ dvo_val |= DVO_PIPE_STALL;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
+
+ intel_de_write(dev_priv, dvo_srcdim_reg,
+ (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
+ intel_de_write(dev_priv, dvo_reg, dvo_val);
+}
+
+static enum drm_connector_status
+intel_dvo_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+ if (!INTEL_DISPLAY_ENABLED(i915))
+ return connector_status_disconnected;
+
+ return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
+}
+
+static int intel_dvo_get_modes(struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ int num_modes;
+
+ /*
+ * We should probably have an i2c driver get_modes function for those
+ * devices which will have a fixed set of modes determined by the chip
+ * (TV-out, for example), but for now with just TMDS and LVDS,
+ * that's not the case.
+ */
+ num_modes = intel_ddc_get_modes(connector,
+ intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC));
+ if (num_modes)
+ return num_modes;
+
+ return intel_panel_get_modes(to_intel_connector(connector));
+}
+
+static const struct drm_connector_funcs intel_dvo_connector_funcs = {
+ .detect = intel_dvo_detect,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
+ .destroy = intel_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
+ .mode_valid = intel_dvo_mode_valid,
+ .get_modes = intel_dvo_get_modes,
+};
+
+static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder));
+
+ if (intel_dvo->dev.dev_ops->destroy)
+ intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
+
+ intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
+ .destroy = intel_dvo_enc_destroy,
+};
+
+static enum port intel_dvo_port(i915_reg_t dvo_reg)
+{
+ if (i915_mmio_reg_equal(dvo_reg, DVOA))
+ return PORT_A;
+ else if (i915_mmio_reg_equal(dvo_reg, DVOB))
+ return PORT_B;
+ else
+ return PORT_C;
+}
+
+void intel_dvo_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *intel_encoder;
+ struct intel_dvo *intel_dvo;
+ struct intel_connector *intel_connector;
+ int i;
+ int encoder_type = DRM_MODE_ENCODER_NONE;
+
+ intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
+ if (!intel_dvo)
+ return;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
+ kfree(intel_dvo);
+ return;
+ }
+
+ intel_dvo->attached_connector = intel_connector;
+
+ intel_encoder = &intel_dvo->base;
+
+ intel_encoder->disable = intel_disable_dvo;
+ intel_encoder->enable = intel_enable_dvo;
+ intel_encoder->get_hw_state = intel_dvo_get_hw_state;
+ intel_encoder->get_config = intel_dvo_get_config;
+ intel_encoder->compute_config = intel_dvo_compute_config;
+ intel_encoder->pre_enable = intel_dvo_pre_enable;
+ intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+
+ /* Now, try to find a controller */
+ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
+ struct drm_connector *connector = &intel_connector->base;
+ const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
+ struct i2c_adapter *i2c;
+ int gpio;
+ bool dvoinit;
+ enum pipe pipe;
+ u32 dpll[I915_MAX_PIPES];
+ enum port port;
+
+ /*
+ * Allow the I2C driver info to specify the GPIO to be used in
+ * special cases, but otherwise default to what's defined
+ * in the spec.
+ */
+ if (intel_gmbus_is_valid_pin(dev_priv, dvo->gpio))
+ gpio = dvo->gpio;
+ else if (dvo->type == INTEL_DVO_CHIP_LVDS)
+ gpio = GMBUS_PIN_SSC;
+ else
+ gpio = GMBUS_PIN_DPB;
+
+ /*
+ * Set up the I2C bus necessary for the chip we're probing.
+ * It appears that everything is on GPIOE except for panels
+ * on i830 laptops, which are on GPIOB (DVOA).
+ */
+ i2c = intel_gmbus_get_adapter(dev_priv, gpio);
+
+ intel_dvo->dev = *dvo;
+
+ /*
+ * GMBUS NAK handling seems to be unstable, hence let the
+ * transmitter detection run in bit banging mode for now.
+ */
+ intel_gmbus_force_bit(i2c, true);
+
+ /*
+ * ns2501 requires the DVO 2x clock before it will
+ * respond to i2c accesses, so make sure we have
+ * have the clock enabled before we attempt to
+ * initialize the device.
+ */
+ for_each_pipe(dev_priv, pipe) {
+ dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe),
+ dpll[pipe] | DPLL_DVO_2X_MODE);
+ }
+
+ dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
+
+ /* restore the DVO 2x clock state to original */
+ for_each_pipe(dev_priv, pipe) {
+ intel_de_write(dev_priv, DPLL(pipe), dpll[pipe]);
+ }
+
+ intel_gmbus_force_bit(i2c, false);
+
+ if (!dvoinit)
+ continue;
+
+ port = intel_dvo_port(dvo->dvo_reg);
+ drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ &intel_dvo_enc_funcs, encoder_type,
+ "DVO %c", port_name(port));
+
+ intel_encoder->type = INTEL_OUTPUT_DVO;
+ intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
+ intel_encoder->port = port;
+ intel_encoder->pipe_mask = ~0;
+
+ if (dvo->type != INTEL_DVO_CHIP_LVDS)
+ intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
+ (1 << INTEL_OUTPUT_DVO);
+
+ switch (dvo->type) {
+ case INTEL_DVO_CHIP_TMDS:
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ drm_connector_init(&dev_priv->drm, connector,
+ &intel_dvo_connector_funcs,
+ DRM_MODE_CONNECTOR_DVII);
+ encoder_type = DRM_MODE_ENCODER_TMDS;
+ break;
+ case INTEL_DVO_CHIP_LVDS_NO_FIXED:
+ case INTEL_DVO_CHIP_LVDS:
+ drm_connector_init(&dev_priv->drm, connector,
+ &intel_dvo_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ encoder_type = DRM_MODE_ENCODER_LVDS;
+ break;
+ }
+
+ drm_connector_helper_add(connector,
+ &intel_dvo_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ if (dvo->type == INTEL_DVO_CHIP_LVDS) {
+ /*
+ * For our LVDS chipsets, we should hopefully be able
+ * to dig the fixed panel mode out of the BIOS data.
+ * However, it's in a different format from the BIOS
+ * data on chipsets with integrated LVDS (stored in AIM
+ * headers, likely), so for now, just get the current
+ * mode being output through DVO.
+ */
+ intel_panel_add_encoder_fixed_mode(intel_connector,
+ intel_encoder);
+
+ intel_panel_init(intel_connector);
+
+ intel_dvo->panel_wants_dither = true;
+ }
+
+ return;
+ }
+
+ kfree(intel_dvo);
+ kfree(intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.h b/drivers/gpu/drm/i915/display/intel_dvo.h
new file mode 100644
index 000000000..3ed0fdf8e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dvo.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DVO_H__
+#define __INTEL_DVO_H__
+
+struct drm_i915_private;
+
+void intel_dvo_init(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_DVO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
new file mode 100644
index 000000000..50205f064
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright © 2006 Eric Anholt
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef __INTEL_DVO_DEV_H__
+#define __INTEL_DVO_DEV_H__
+
+#include <linux/i2c.h>
+
+#include <drm/drm_crtc.h>
+
+#include "i915_reg_defs.h"
+
+struct intel_dvo_device {
+ const char *name;
+ int type;
+ /* DVOA/B/C output register */
+ i915_reg_t dvo_reg;
+ i915_reg_t dvo_srcdim_reg;
+ /* GPIO register used for i2c bus to control this device */
+ u32 gpio;
+ int slave_addr;
+
+ const struct intel_dvo_dev_ops *dev_ops;
+ void *dev_priv;
+ struct i2c_adapter *i2c_bus;
+};
+
+struct intel_dvo_dev_ops {
+ /*
+ * Initialize the device at startup time.
+ * Returns NULL if the device does not exist.
+ */
+ bool (*init)(struct intel_dvo_device *dvo,
+ struct i2c_adapter *i2cbus);
+
+ /*
+ * Called to allow the output a chance to create properties after the
+ * RandR objects have been created.
+ */
+ void (*create_resources)(struct intel_dvo_device *dvo);
+
+ /*
+ * Turn on/off output.
+ *
+ * Because none of our dvo drivers support an intermediate power levels,
+ * we don't expose this in the interfac.
+ */
+ void (*dpms)(struct intel_dvo_device *dvo, bool enable);
+
+ /*
+ * Callback for testing a video mode for a given output.
+ *
+ * This function should only check for cases where a mode can't
+ * be supported on the output specifically, and not represent
+ * generic CRTC limitations.
+ *
+ * \return MODE_OK if the mode is valid, or another MODE_* otherwise.
+ */
+ enum drm_mode_status (*mode_valid)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode);
+
+ /*
+ * Callback for preparing mode changes on an output
+ */
+ void (*prepare)(struct intel_dvo_device *dvo);
+
+ /*
+ * Callback for committing mode changes on an output
+ */
+ void (*commit)(struct intel_dvo_device *dvo);
+
+ /*
+ * Callback for setting up a video mode after fixups have been made.
+ *
+ * This is only called while the output is disabled. The dpms callback
+ * must be all that's necessary for the output, to turn the output on
+ * after this function is called.
+ */
+ void (*mode_set)(struct intel_dvo_device *dvo,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode);
+
+ /*
+ * Probe for a connected output, and return detect_status.
+ */
+ enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
+
+ /*
+ * Probe the current hw status, returning true if the connected output
+ * is active.
+ */
+ bool (*get_hw_state)(struct intel_dvo_device *dev);
+
+ /**
+ * Query the device for the modes it provides.
+ *
+ * This function may also update MonInfo, mm_width, and mm_height.
+ *
+ * \return singly-linked list of modes or NULL if no modes found.
+ */
+ struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
+
+ /**
+ * Clean up driver-specific bits of the output
+ */
+ void (*destroy) (struct intel_dvo_device *dvo);
+
+ /**
+ * Debugging hook to dump device registers to log file
+ */
+ void (*dump_regs)(struct intel_dvo_device *dvo);
+};
+
+extern const struct intel_dvo_dev_ops sil164_ops;
+extern const struct intel_dvo_dev_ops ch7xxx_ops;
+extern const struct intel_dvo_dev_ops ivch_ops;
+extern const struct intel_dvo_dev_ops tfp410_ops;
+extern const struct intel_dvo_dev_ops ch7017_ops;
+extern const struct intel_dvo_dev_ops ns2501_ops;
+
+#endif /* __INTEL_DVO_DEV_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
new file mode 100644
index 000000000..c69a63879
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -0,0 +1,2081 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/drm_blend.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper.h>
+
+#include "i915_drv.h"
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_dpt.h"
+#include "intel_fb.h"
+
+#define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a))
+
+/*
+ * From the Sky Lake PRM:
+ * "The Color Control Surface (CCS) contains the compression status of
+ * the cache-line pairs. The compression state of the cache-line pair
+ * is specified by 2 bits in the CCS. Each CCS cache-line represents
+ * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
+ * cache-line-pairs. CCS is always Y tiled."
+ *
+ * Since cache line pairs refers to horizontally adjacent cache lines,
+ * each cache line in the CCS corresponds to an area of 32x16 cache
+ * lines on the main surface. Since each pixel is 4 bytes, this gives
+ * us a ratio of one byte in the CCS for each 8x16 pixels in the
+ * main surface.
+ */
+static const struct drm_format_info skl_ccs_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
+};
+
+/*
+ * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
+ * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
+ * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
+ * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
+ * the main surface.
+ */
+static const struct drm_format_info gen12_ccs_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_YUYV, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_YVYU, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_UYVY, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_VYUY, .num_planes = 2,
+ .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XYUV8888, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_NV12, .num_planes = 4,
+ .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_P010, .num_planes = 4,
+ .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_P012, .num_planes = 4,
+ .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_P016, .num_planes = 4,
+ .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+};
+
+/*
+ * Same as gen12_ccs_formats[] above, but with additional surface used
+ * to pass Clear Color information in plane 2 with 64 bits of data.
+ */
+static const struct drm_format_info gen12_ccs_cc_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+};
+
+static const struct drm_format_info gen12_flat_ccs_cc_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+ .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+ .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+ .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+ .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+};
+
+struct intel_modifier_desc {
+ u64 modifier;
+ struct {
+ u8 from;
+ u8 until;
+ } display_ver;
+#define DISPLAY_VER_ALL { 0, -1 }
+
+ const struct drm_format_info *formats;
+ int format_count;
+#define FORMAT_OVERRIDE(format_list) \
+ .formats = format_list, \
+ .format_count = ARRAY_SIZE(format_list)
+
+ u8 plane_caps;
+
+ struct {
+ u8 cc_planes:3;
+ u8 packed_aux_planes:4;
+ u8 planar_aux_planes:4;
+ } ccs;
+};
+
+#define INTEL_PLANE_CAP_CCS_MASK (INTEL_PLANE_CAP_CCS_RC | \
+ INTEL_PLANE_CAP_CCS_RC_CC | \
+ INTEL_PLANE_CAP_CCS_MC)
+#define INTEL_PLANE_CAP_TILING_MASK (INTEL_PLANE_CAP_TILING_X | \
+ INTEL_PLANE_CAP_TILING_Y | \
+ INTEL_PLANE_CAP_TILING_Yf | \
+ INTEL_PLANE_CAP_TILING_4)
+#define INTEL_PLANE_CAP_TILING_NONE 0
+
+static const struct intel_modifier_desc intel_modifiers[] = {
+ {
+ .modifier = I915_FORMAT_MOD_4_TILED_DG2_MC_CCS,
+ .display_ver = { 13, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_MC,
+ }, {
+ .modifier = I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC,
+ .display_ver = { 13, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_RC_CC,
+
+ .ccs.cc_planes = BIT(1),
+
+ FORMAT_OVERRIDE(gen12_flat_ccs_cc_formats),
+ }, {
+ .modifier = I915_FORMAT_MOD_4_TILED_DG2_RC_CCS,
+ .display_ver = { 13, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_RC,
+ }, {
+ .modifier = I915_FORMAT_MOD_4_TILED,
+ .display_ver = { 13, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_4,
+ }, {
+ .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
+ .display_ver = { 12, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_MC,
+
+ .ccs.packed_aux_planes = BIT(1),
+ .ccs.planar_aux_planes = BIT(2) | BIT(3),
+
+ FORMAT_OVERRIDE(gen12_ccs_formats),
+ }, {
+ .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
+ .display_ver = { 12, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC,
+
+ .ccs.packed_aux_planes = BIT(1),
+
+ FORMAT_OVERRIDE(gen12_ccs_formats),
+ }, {
+ .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
+ .display_ver = { 12, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC_CC,
+
+ .ccs.cc_planes = BIT(2),
+ .ccs.packed_aux_planes = BIT(1),
+
+ FORMAT_OVERRIDE(gen12_ccs_cc_formats),
+ }, {
+ .modifier = I915_FORMAT_MOD_Yf_TILED_CCS,
+ .display_ver = { 9, 11 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_Yf | INTEL_PLANE_CAP_CCS_RC,
+
+ .ccs.packed_aux_planes = BIT(1),
+
+ FORMAT_OVERRIDE(skl_ccs_formats),
+ }, {
+ .modifier = I915_FORMAT_MOD_Y_TILED_CCS,
+ .display_ver = { 9, 11 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC,
+
+ .ccs.packed_aux_planes = BIT(1),
+
+ FORMAT_OVERRIDE(skl_ccs_formats),
+ }, {
+ .modifier = I915_FORMAT_MOD_Yf_TILED,
+ .display_ver = { 9, 11 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_Yf,
+ }, {
+ .modifier = I915_FORMAT_MOD_Y_TILED,
+ .display_ver = { 9, 13 },
+ .plane_caps = INTEL_PLANE_CAP_TILING_Y,
+ }, {
+ .modifier = I915_FORMAT_MOD_X_TILED,
+ .display_ver = DISPLAY_VER_ALL,
+ .plane_caps = INTEL_PLANE_CAP_TILING_X,
+ }, {
+ .modifier = DRM_FORMAT_MOD_LINEAR,
+ .display_ver = DISPLAY_VER_ALL,
+ },
+};
+
+static const struct intel_modifier_desc *lookup_modifier_or_null(u64 modifier)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++)
+ if (intel_modifiers[i].modifier == modifier)
+ return &intel_modifiers[i];
+
+ return NULL;
+}
+
+static const struct intel_modifier_desc *lookup_modifier(u64 modifier)
+{
+ const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier);
+
+ if (WARN_ON(!md))
+ return &intel_modifiers[0];
+
+ return md;
+}
+
+static const struct drm_format_info *
+lookup_format_info(const struct drm_format_info formats[],
+ int num_formats, u32 format)
+{
+ int i;
+
+ for (i = 0; i < num_formats; i++) {
+ if (formats[i].format == format)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * intel_fb_get_format_info: Get a modifier specific format information
+ * @cmd: FB add command structure
+ *
+ * Returns:
+ * Returns the format information for @cmd->pixel_format specific to @cmd->modifier[0],
+ * or %NULL if the modifier doesn't override the format.
+ */
+const struct drm_format_info *
+intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+{
+ const struct intel_modifier_desc *md = lookup_modifier_or_null(cmd->modifier[0]);
+
+ if (!md || !md->formats)
+ return NULL;
+
+ return lookup_format_info(md->formats, md->format_count, cmd->pixel_format);
+}
+
+static bool plane_caps_contain_any(u8 caps, u8 mask)
+{
+ return caps & mask;
+}
+
+static bool plane_caps_contain_all(u8 caps, u8 mask)
+{
+ return (caps & mask) == mask;
+}
+
+/**
+ * intel_fb_is_ccs_modifier: Check if a modifier is a CCS modifier type
+ * @modifier: Modifier to check
+ *
+ * Returns:
+ * Returns %true if @modifier is a render, render with color clear or
+ * media compression modifier.
+ */
+bool intel_fb_is_ccs_modifier(u64 modifier)
+{
+ return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps,
+ INTEL_PLANE_CAP_CCS_MASK);
+}
+
+/**
+ * intel_fb_is_rc_ccs_cc_modifier: Check if a modifier is an RC CCS CC modifier type
+ * @modifier: Modifier to check
+ *
+ * Returns:
+ * Returns %true if @modifier is a render with color clear modifier.
+ */
+bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier)
+{
+ return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps,
+ INTEL_PLANE_CAP_CCS_RC_CC);
+}
+
+/**
+ * intel_fb_is_mc_ccs_modifier: Check if a modifier is an MC CCS modifier type
+ * @modifier: Modifier to check
+ *
+ * Returns:
+ * Returns %true if @modifier is a media compression modifier.
+ */
+bool intel_fb_is_mc_ccs_modifier(u64 modifier)
+{
+ return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps,
+ INTEL_PLANE_CAP_CCS_MC);
+}
+
+static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md,
+ u8 display_ver_from, u8 display_ver_until)
+{
+ return md->display_ver.from <= display_ver_until &&
+ display_ver_from <= md->display_ver.until;
+}
+
+static bool plane_has_modifier(struct drm_i915_private *i915,
+ u8 plane_caps,
+ const struct intel_modifier_desc *md)
+{
+ if (!IS_DISPLAY_VER(i915, md->display_ver.from, md->display_ver.until))
+ return false;
+
+ if (!plane_caps_contain_all(plane_caps, md->plane_caps))
+ return false;
+
+ return true;
+}
+
+/**
+ * intel_fb_plane_get_modifiers: Get the modifiers for the given platform and plane capabilities
+ * @i915: i915 device instance
+ * @plane_caps: capabilities for the plane the modifiers are queried for
+ *
+ * Returns:
+ * Returns the list of modifiers allowed by the @i915 platform and @plane_caps.
+ * The caller must free the returned buffer.
+ */
+u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915,
+ u8 plane_caps)
+{
+ u64 *list, *p;
+ int count = 1; /* +1 for invalid modifier terminator */
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) {
+ if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i]))
+ count++;
+ }
+
+ list = kmalloc_array(count, sizeof(*list), GFP_KERNEL);
+ if (drm_WARN_ON(&i915->drm, !list))
+ return NULL;
+
+ p = list;
+ for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) {
+ if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i]))
+ *p++ = intel_modifiers[i].modifier;
+ }
+ *p++ = DRM_FORMAT_MOD_INVALID;
+
+ return list;
+}
+
+/**
+ * intel_fb_plane_supports_modifier: Determine if a modifier is supported by the given plane
+ * @plane: Plane to check the modifier support for
+ * @modifier: The modifier to check the support for
+ *
+ * Returns:
+ * %true if the @modifier is supported on @plane.
+ */
+bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier)
+{
+ int i;
+
+ for (i = 0; i < plane->base.modifier_count; i++)
+ if (plane->base.modifiers[i] == modifier)
+ return true;
+
+ return false;
+}
+
+static bool format_is_yuv_semiplanar(const struct intel_modifier_desc *md,
+ const struct drm_format_info *info)
+{
+ if (!info->is_yuv)
+ return false;
+
+ if (hweight8(md->ccs.planar_aux_planes) == 2)
+ return info->num_planes == 4;
+ else
+ return info->num_planes == 2;
+}
+
+/**
+ * intel_format_info_is_yuv_semiplanar: Check if the given format is YUV semiplanar
+ * @info: format to check
+ * @modifier: modifier used with the format
+ *
+ * Returns:
+ * %true if @info / @modifier is YUV semiplanar.
+ */
+bool intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
+ u64 modifier)
+{
+ return format_is_yuv_semiplanar(lookup_modifier(modifier), info);
+}
+
+static u8 ccs_aux_plane_mask(const struct intel_modifier_desc *md,
+ const struct drm_format_info *format)
+{
+ if (format_is_yuv_semiplanar(md, format))
+ return md->ccs.planar_aux_planes;
+ else
+ return md->ccs.packed_aux_planes;
+}
+
+/**
+ * intel_fb_is_ccs_aux_plane: Check if a framebuffer color plane is a CCS AUX plane
+ * @fb: Framebuffer
+ * @color_plane: color plane index to check
+ *
+ * Returns:
+ * Returns %true if @fb's color plane at index @color_plane is a CCS AUX plane.
+ */
+bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane)
+{
+ const struct intel_modifier_desc *md = lookup_modifier(fb->modifier);
+
+ return ccs_aux_plane_mask(md, fb->format) & BIT(color_plane);
+}
+
+/**
+ * intel_fb_is_gen12_ccs_aux_plane: Check if a framebuffer color plane is a GEN12 CCS AUX plane
+ * @fb: Framebuffer
+ * @color_plane: color plane index to check
+ *
+ * Returns:
+ * Returns %true if @fb's color plane at index @color_plane is a GEN12 CCS AUX plane.
+ */
+static bool intel_fb_is_gen12_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane)
+{
+ const struct intel_modifier_desc *md = lookup_modifier(fb->modifier);
+
+ return check_modifier_display_ver_range(md, 12, 13) &&
+ ccs_aux_plane_mask(md, fb->format) & BIT(color_plane);
+}
+
+/**
+ * intel_fb_rc_ccs_cc_plane: Get the CCS CC color plane index for a framebuffer
+ * @fb: Framebuffer
+ *
+ * Returns:
+ * Returns the index of the color clear plane for @fb, or -1 if @fb is not a
+ * framebuffer using a render compression/color clear modifier.
+ */
+int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb)
+{
+ const struct intel_modifier_desc *md = lookup_modifier(fb->modifier);
+
+ if (!md->ccs.cc_planes)
+ return -1;
+
+ drm_WARN_ON_ONCE(fb->dev, hweight8(md->ccs.cc_planes) > 1);
+
+ return ilog2((int)md->ccs.cc_planes);
+}
+
+static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int color_plane)
+{
+ return intel_fb_rc_ccs_cc_plane(fb) == color_plane;
+}
+
+static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane)
+{
+ return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ color_plane == 1;
+}
+
+bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
+{
+ return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
+ intel_fb_is_gen12_ccs_aux_plane(fb, color_plane) ||
+ is_gen12_ccs_cc_plane(fb, color_plane);
+}
+
+int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
+{
+ drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) ||
+ (main_plane && main_plane >= fb->format->num_planes / 2));
+
+ return fb->format->num_planes / 2 + main_plane;
+}
+
+int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
+{
+ drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) ||
+ ccs_plane < fb->format->num_planes / 2);
+
+ if (is_gen12_ccs_cc_plane(fb, ccs_plane))
+ return 0;
+
+ return ccs_plane - fb->format->num_planes / 2;
+}
+
+static unsigned int gen12_ccs_aux_stride(struct intel_framebuffer *fb, int ccs_plane)
+{
+ int main_plane = skl_ccs_to_main_plane(&fb->base, ccs_plane);
+ unsigned int main_stride = fb->base.pitches[main_plane];
+ unsigned int main_tile_width = intel_tile_width_bytes(&fb->base, main_plane);
+
+ return DIV_ROUND_UP(main_stride, 4 * main_tile_width) * 64;
+}
+
+int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
+{
+ const struct intel_modifier_desc *md = lookup_modifier(fb->modifier);
+ struct drm_i915_private *i915 = to_i915(fb->dev);
+
+ if (md->ccs.packed_aux_planes | md->ccs.planar_aux_planes)
+ return main_to_ccs_plane(fb, main_plane);
+ else if (DISPLAY_VER(i915) < 11 &&
+ format_is_yuv_semiplanar(md, fb->format))
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int intel_tile_size(const struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) == 2 ? 2048 : 4096;
+}
+
+unsigned int
+intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ unsigned int cpp = fb->format->cpp[color_plane];
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ return intel_tile_size(dev_priv);
+ case I915_FORMAT_MOD_X_TILED:
+ if (DISPLAY_VER(dev_priv) == 2)
+ return 128;
+ else
+ return 512;
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
+ case I915_FORMAT_MOD_4_TILED:
+ /*
+ * Each 4K tile consists of 64B(8*8) subtiles, with
+ * same shape as Y Tile(i.e 4*16B OWords)
+ */
+ return 128;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ if (intel_fb_is_ccs_aux_plane(fb, color_plane))
+ return 128;
+ fallthrough;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ if (intel_fb_is_ccs_aux_plane(fb, color_plane) ||
+ is_gen12_ccs_cc_plane(fb, color_plane))
+ return 64;
+ fallthrough;
+ case I915_FORMAT_MOD_Y_TILED:
+ if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv))
+ return 128;
+ else
+ return 512;
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ if (intel_fb_is_ccs_aux_plane(fb, color_plane))
+ return 128;
+ fallthrough;
+ case I915_FORMAT_MOD_Yf_TILED:
+ switch (cpp) {
+ case 1:
+ return 64;
+ case 2:
+ case 4:
+ return 128;
+ case 8:
+ case 16:
+ return 256;
+ default:
+ MISSING_CASE(cpp);
+ return cpp;
+ }
+ break;
+ default:
+ MISSING_CASE(fb->modifier);
+ return cpp;
+ }
+}
+
+unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
+{
+ return intel_tile_size(to_i915(fb->dev)) /
+ intel_tile_width_bytes(fb, color_plane);
+}
+
+/*
+ * Return the tile dimensions in pixel units, based on the (2 or 4 kbyte) GTT
+ * page tile size.
+ */
+static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
+ unsigned int *tile_width,
+ unsigned int *tile_height)
+{
+ unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
+ unsigned int cpp = fb->format->cpp[color_plane];
+
+ *tile_width = tile_width_bytes / cpp;
+ *tile_height = intel_tile_height(fb, color_plane);
+}
+
+/*
+ * Return the tile dimensions in pixel units, based on the tile block size.
+ * The block covers the full GTT page sized tile on all tiled surfaces and
+ * it's a 64 byte portion of the tile on TGL+ CCS surfaces.
+ */
+static void intel_tile_block_dims(const struct drm_framebuffer *fb, int color_plane,
+ unsigned int *tile_width,
+ unsigned int *tile_height)
+{
+ intel_tile_dims(fb, color_plane, tile_width, tile_height);
+
+ if (intel_fb_is_gen12_ccs_aux_plane(fb, color_plane))
+ *tile_height = 1;
+}
+
+unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_plane)
+{
+ unsigned int tile_width, tile_height;
+
+ intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
+
+ return fb->pitches[color_plane] * tile_height;
+}
+
+unsigned int
+intel_fb_align_height(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int height)
+{
+ unsigned int tile_height = intel_tile_height(fb, color_plane);
+
+ return ALIGN(height, tile_height);
+}
+
+static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
+{
+ u8 tiling_caps = lookup_modifier(fb_modifier)->plane_caps &
+ INTEL_PLANE_CAP_TILING_MASK;
+
+ switch (tiling_caps) {
+ case INTEL_PLANE_CAP_TILING_Y:
+ return I915_TILING_Y;
+ case INTEL_PLANE_CAP_TILING_X:
+ return I915_TILING_X;
+ case INTEL_PLANE_CAP_TILING_4:
+ case INTEL_PLANE_CAP_TILING_Yf:
+ case INTEL_PLANE_CAP_TILING_NONE:
+ return I915_TILING_NONE;
+ default:
+ MISSING_CASE(tiling_caps);
+ return I915_TILING_NONE;
+ }
+}
+
+static bool intel_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier)
+{
+ return DISPLAY_VER(i915) >= 13 && modifier != DRM_FORMAT_MOD_LINEAR;
+}
+
+bool intel_fb_uses_dpt(const struct drm_framebuffer *fb)
+{
+ return fb && intel_modifier_uses_dpt(to_i915(fb->dev), fb->modifier);
+}
+
+unsigned int intel_cursor_alignment(const struct drm_i915_private *i915)
+{
+ if (IS_I830(i915))
+ return 16 * 1024;
+ else if (IS_I85X(i915))
+ return 256;
+ else if (IS_I845G(i915) || IS_I865G(i915))
+ return 32;
+ else
+ return 4 * 1024;
+}
+
+static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
+{
+ if (DISPLAY_VER(dev_priv) >= 9)
+ return 256 * 1024;
+ else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return 128 * 1024;
+ else if (DISPLAY_VER(dev_priv) >= 4)
+ return 4 * 1024;
+ else
+ return 0;
+}
+
+unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+
+ if (intel_fb_uses_dpt(fb))
+ return 512 * 4096;
+
+ /* AUX_DIST needs only 4K alignment */
+ if (intel_fb_is_ccs_aux_plane(fb, color_plane))
+ return 4096;
+
+ if (is_semiplanar_uv_plane(fb, color_plane)) {
+ /*
+ * TODO: cross-check wrt. the bspec stride in bytes * 64 bytes
+ * alignment for linear UV planes on all platforms.
+ */
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ return intel_linear_alignment(dev_priv);
+
+ return intel_tile_row_size(fb, color_plane);
+ }
+
+ return 4096;
+ }
+
+ drm_WARN_ON(&dev_priv->drm, color_plane != 0);
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ return intel_linear_alignment(dev_priv);
+ case I915_FORMAT_MOD_X_TILED:
+ if (HAS_ASYNC_FLIPS(dev_priv))
+ return 256 * 1024;
+ return 0;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ return 16 * 1024;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_4_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ return 1 * 1024 * 1024;
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
+ case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
+ return 16 * 1024;
+ default:
+ MISSING_CASE(fb->modifier);
+ return 0;
+ }
+}
+
+void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
+ const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ int main_plane;
+
+ if (color_plane == 0) {
+ *hsub = 1;
+ *vsub = 1;
+
+ return;
+ }
+
+ /*
+ * TODO: Deduct the subsampling from the char block for all CCS
+ * formats and planes.
+ */
+ if (!intel_fb_is_gen12_ccs_aux_plane(fb, color_plane)) {
+ *hsub = fb->format->hsub;
+ *vsub = fb->format->vsub;
+
+ return;
+ }
+
+ main_plane = skl_ccs_to_main_plane(fb, color_plane);
+ *hsub = drm_format_info_block_width(fb->format, color_plane) /
+ drm_format_info_block_width(fb->format, main_plane);
+
+ /*
+ * The min stride check in the core framebuffer_check() function
+ * assumes that format->hsub applies to every plane except for the
+ * first plane. That's incorrect for the CCS AUX plane of the first
+ * plane, but for the above check to pass we must define the block
+ * width with that subsampling applied to it. Adjust the width here
+ * accordingly, so we can calculate the actual subsampling factor.
+ */
+ if (main_plane == 0)
+ *hsub *= fb->format->hsub;
+
+ *vsub = 32;
+}
+
+static void intel_fb_plane_dims(const struct intel_framebuffer *fb, int color_plane, int *w, int *h)
+{
+ int main_plane = intel_fb_is_ccs_aux_plane(&fb->base, color_plane) ?
+ skl_ccs_to_main_plane(&fb->base, color_plane) : 0;
+ unsigned int main_width = fb->base.width;
+ unsigned int main_height = fb->base.height;
+ int main_hsub, main_vsub;
+ int hsub, vsub;
+
+ intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane);
+ intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane);
+
+ *w = DIV_ROUND_UP(main_width, main_hsub * hsub);
+ *h = DIV_ROUND_UP(main_height, main_vsub * vsub);
+}
+
+static u32 intel_adjust_tile_offset(int *x, int *y,
+ unsigned int tile_width,
+ unsigned int tile_height,
+ unsigned int tile_size,
+ unsigned int pitch_tiles,
+ u32 old_offset,
+ u32 new_offset)
+{
+ unsigned int pitch_pixels = pitch_tiles * tile_width;
+ unsigned int tiles;
+
+ WARN_ON(old_offset & (tile_size - 1));
+ WARN_ON(new_offset & (tile_size - 1));
+ WARN_ON(new_offset > old_offset);
+
+ tiles = (old_offset - new_offset) / tile_size;
+
+ *y += tiles / pitch_tiles * tile_height;
+ *x += tiles % pitch_tiles * tile_width;
+
+ /* minimize x in case it got needlessly big */
+ *y += *x / pitch_pixels * tile_height;
+ *x %= pitch_pixels;
+
+ return new_offset;
+}
+
+static u32 intel_adjust_linear_offset(int *x, int *y,
+ unsigned int cpp,
+ unsigned int pitch,
+ u32 old_offset,
+ u32 new_offset)
+{
+ old_offset += *y * pitch + *x * cpp;
+
+ *y = (old_offset - new_offset) / pitch;
+ *x = ((old_offset - new_offset) - *y * pitch) / cpp;
+
+ return new_offset;
+}
+
+static u32 intel_adjust_aligned_offset(int *x, int *y,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation,
+ unsigned int pitch,
+ u32 old_offset, u32 new_offset)
+{
+ struct drm_i915_private *i915 = to_i915(fb->dev);
+ unsigned int cpp = fb->format->cpp[color_plane];
+
+ drm_WARN_ON(&i915->drm, new_offset > old_offset);
+
+ if (!is_surface_linear(fb, color_plane)) {
+ unsigned int tile_size, tile_width, tile_height;
+ unsigned int pitch_tiles;
+
+ tile_size = intel_tile_size(i915);
+ intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
+
+ if (drm_rotation_90_or_270(rotation)) {
+ pitch_tiles = pitch / tile_height;
+ swap(tile_width, tile_height);
+ } else {
+ pitch_tiles = pitch / (tile_width * cpp);
+ }
+
+ intel_adjust_tile_offset(x, y, tile_width, tile_height,
+ tile_size, pitch_tiles,
+ old_offset, new_offset);
+ } else {
+ intel_adjust_linear_offset(x, y, cpp, pitch,
+ old_offset, new_offset);
+ }
+
+ return new_offset;
+}
+
+/*
+ * Adjust the tile offset by moving the difference into
+ * the x/y offsets.
+ */
+u32 intel_plane_adjust_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane,
+ u32 old_offset, u32 new_offset)
+{
+ return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
+ state->hw.rotation,
+ state->view.color_plane[color_plane].mapping_stride,
+ old_offset, new_offset);
+}
+
+/*
+ * Computes the aligned offset to the base tile and adjusts
+ * x, y. bytes per pixel is assumed to be a power-of-two.
+ *
+ * In the 90/270 rotated case, x and y are assumed
+ * to be already rotated to match the rotated GTT view, and
+ * pitch is the tile_height aligned framebuffer height.
+ *
+ * This function is used when computing the derived information
+ * under intel_framebuffer, so using any of that information
+ * here is not allowed. Anything under drm_framebuffer can be
+ * used. This is why the user has to pass in the pitch since it
+ * is specified in the rotated orientation.
+ */
+static u32 intel_compute_aligned_offset(struct drm_i915_private *i915,
+ int *x, int *y,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int pitch,
+ unsigned int rotation,
+ u32 alignment)
+{
+ unsigned int cpp = fb->format->cpp[color_plane];
+ u32 offset, offset_aligned;
+
+ if (!is_surface_linear(fb, color_plane)) {
+ unsigned int tile_size, tile_width, tile_height;
+ unsigned int tile_rows, tiles, pitch_tiles;
+
+ tile_size = intel_tile_size(i915);
+ intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
+
+ if (drm_rotation_90_or_270(rotation)) {
+ pitch_tiles = pitch / tile_height;
+ swap(tile_width, tile_height);
+ } else {
+ pitch_tiles = pitch / (tile_width * cpp);
+ }
+
+ tile_rows = *y / tile_height;
+ *y %= tile_height;
+
+ tiles = *x / tile_width;
+ *x %= tile_width;
+
+ offset = (tile_rows * pitch_tiles + tiles) * tile_size;
+
+ offset_aligned = offset;
+ if (alignment)
+ offset_aligned = rounddown(offset_aligned, alignment);
+
+ intel_adjust_tile_offset(x, y, tile_width, tile_height,
+ tile_size, pitch_tiles,
+ offset, offset_aligned);
+ } else {
+ offset = *y * pitch + *x * cpp;
+ offset_aligned = offset;
+ if (alignment) {
+ offset_aligned = rounddown(offset_aligned, alignment);
+ *y = (offset % alignment) / pitch;
+ *x = ((offset % alignment) - *y * pitch) / cpp;
+ } else {
+ *y = *x = 0;
+ }
+ }
+
+ return offset_aligned;
+}
+
+u32 intel_plane_compute_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(intel_plane->base.dev);
+ const struct drm_framebuffer *fb = state->hw.fb;
+ unsigned int rotation = state->hw.rotation;
+ int pitch = state->view.color_plane[color_plane].mapping_stride;
+ u32 alignment;
+
+ if (intel_plane->id == PLANE_CURSOR)
+ alignment = intel_cursor_alignment(i915);
+ else
+ alignment = intel_surf_alignment(fb, color_plane);
+
+ return intel_compute_aligned_offset(i915, x, y, fb, color_plane,
+ pitch, rotation, alignment);
+}
+
+/* Convert the fb->offset[] into x/y offsets */
+static int intel_fb_offset_to_xy(int *x, int *y,
+ const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ struct drm_i915_private *i915 = to_i915(fb->dev);
+ unsigned int height;
+ u32 alignment;
+
+ if (DISPLAY_VER(i915) >= 12 &&
+ !intel_fb_needs_pot_stride_remap(to_intel_framebuffer(fb)) &&
+ is_semiplanar_uv_plane(fb, color_plane))
+ alignment = intel_tile_row_size(fb, color_plane);
+ else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
+ alignment = intel_tile_size(i915);
+ else
+ alignment = 0;
+
+ if (alignment != 0 && fb->offsets[color_plane] % alignment) {
+ drm_dbg_kms(&i915->drm,
+ "Misaligned offset 0x%08x for color plane %d\n",
+ fb->offsets[color_plane], color_plane);
+ return -EINVAL;
+ }
+
+ height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
+ height = ALIGN(height, intel_tile_height(fb, color_plane));
+
+ /* Catch potential overflows early */
+ if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
+ fb->offsets[color_plane])) {
+ drm_dbg_kms(&i915->drm,
+ "Bad offset 0x%08x or pitch %d for color plane %d\n",
+ fb->offsets[color_plane], fb->pitches[color_plane],
+ color_plane);
+ return -ERANGE;
+ }
+
+ *x = 0;
+ *y = 0;
+
+ intel_adjust_aligned_offset(x, y,
+ fb, color_plane, DRM_MODE_ROTATE_0,
+ fb->pitches[color_plane],
+ fb->offsets[color_plane], 0);
+
+ return 0;
+}
+
+static int intel_fb_check_ccs_xy(const struct drm_framebuffer *fb, int ccs_plane, int x, int y)
+{
+ struct drm_i915_private *i915 = to_i915(fb->dev);
+ const struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ int main_plane;
+ int hsub, vsub;
+ int tile_width, tile_height;
+ int ccs_x, ccs_y;
+ int main_x, main_y;
+
+ if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane))
+ return 0;
+
+ /*
+ * While all the tile dimensions are based on a 2k or 4k GTT page size
+ * here the main and CCS coordinates must match only within a (64 byte
+ * on TGL+) block inside the tile.
+ */
+ intel_tile_block_dims(fb, ccs_plane, &tile_width, &tile_height);
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
+
+ tile_width *= hsub;
+ tile_height *= vsub;
+
+ ccs_x = (x * hsub) % tile_width;
+ ccs_y = (y * vsub) % tile_height;
+
+ main_plane = skl_ccs_to_main_plane(fb, ccs_plane);
+ main_x = intel_fb->normal_view.color_plane[main_plane].x % tile_width;
+ main_y = intel_fb->normal_view.color_plane[main_plane].y % tile_height;
+
+ /*
+ * CCS doesn't have its own x/y offset register, so the intra CCS tile
+ * x/y offsets must match between CCS and the main surface.
+ */
+ if (main_x != ccs_x || main_y != ccs_y) {
+ drm_dbg_kms(&i915->drm,
+ "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
+ main_x, main_y,
+ ccs_x, ccs_y,
+ intel_fb->normal_view.color_plane[main_plane].x,
+ intel_fb->normal_view.color_plane[main_plane].y,
+ x, y);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int i;
+
+ /* We don't want to deal with remapping with cursors */
+ if (plane->id == PLANE_CURSOR)
+ return false;
+
+ /*
+ * The display engine limits already match/exceed the
+ * render engine limits, so not much point in remapping.
+ * Would also need to deal with the fence POT alignment
+ * and gen2 2KiB GTT tile size.
+ */
+ if (DISPLAY_VER(i915) < 4)
+ return false;
+
+ /*
+ * The new CCS hash mode isn't compatible with remapping as
+ * the virtual address of the pages affects the compressed data.
+ */
+ if (intel_fb_is_ccs_modifier(fb->modifier))
+ return false;
+
+ /* Linear needs a page aligned stride for remapping */
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ unsigned int alignment = intel_tile_size(i915) - 1;
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ if (fb->pitches[i] & alignment)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
+{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+
+ return (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
+ intel_fb_uses_dpt(&fb->base);
+}
+
+static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation)
+{
+ if (drm_rotation_90_or_270(rotation))
+ return fb->rotated_view.color_plane[color_plane].mapping_stride;
+ else if (intel_fb_needs_pot_stride_remap(fb))
+ return fb->remapped_view.color_plane[color_plane].mapping_stride;
+ else
+ return fb->normal_view.color_plane[color_plane].mapping_stride;
+}
+
+static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct intel_framebuffer *fb = to_intel_framebuffer(plane_state->hw.fb);
+ unsigned int rotation = plane_state->hw.rotation;
+ u32 stride, max_stride;
+
+ /*
+ * No remapping for invisible planes since we don't have
+ * an actual source viewport to remap.
+ */
+ if (!plane_state->uapi.visible)
+ return false;
+
+ if (!intel_plane_can_remap(plane_state))
+ return false;
+
+ /*
+ * FIXME: aux plane limits on gen9+ are
+ * unclear in Bspec, for now no checking.
+ */
+ stride = intel_fb_pitch(fb, 0, rotation);
+ max_stride = plane->max_stride(plane, fb->base.format->format,
+ fb->base.modifier, rotation);
+
+ return stride > max_stride;
+}
+
+static int convert_plane_offset_to_xy(const struct intel_framebuffer *fb, int color_plane,
+ int plane_width, int *x, int *y)
+{
+ struct drm_i915_gem_object *obj = intel_fb_obj(&fb->base);
+ int ret;
+
+ ret = intel_fb_offset_to_xy(x, y, &fb->base, color_plane);
+ if (ret) {
+ drm_dbg_kms(fb->base.dev,
+ "bad fb plane %d offset: 0x%x\n",
+ color_plane, fb->base.offsets[color_plane]);
+ return ret;
+ }
+
+ ret = intel_fb_check_ccs_xy(&fb->base, color_plane, *x, *y);
+ if (ret)
+ return ret;
+
+ /*
+ * The fence (if used) is aligned to the start of the object
+ * so having the framebuffer wrap around across the edge of the
+ * fenced region doesn't really work. We have no API to configure
+ * the fence start offset within the object (nor could we probably
+ * on gen2/3). So it's just easier if we just require that the
+ * fb layout agrees with the fence layout. We already check that the
+ * fb stride matches the fence stride elsewhere.
+ */
+ if (color_plane == 0 && i915_gem_object_is_tiled(obj) &&
+ (*x + plane_width) * fb->base.format->cpp[color_plane] > fb->base.pitches[color_plane]) {
+ drm_dbg_kms(fb->base.dev,
+ "bad fb plane %d offset: 0x%x\n",
+ color_plane, fb->base.offsets[color_plane]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u32 calc_plane_aligned_offset(const struct intel_framebuffer *fb, int color_plane, int *x, int *y)
+{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ unsigned int tile_size = intel_tile_size(i915);
+ u32 offset;
+
+ offset = intel_compute_aligned_offset(i915, x, y, &fb->base, color_plane,
+ fb->base.pitches[color_plane],
+ DRM_MODE_ROTATE_0,
+ tile_size);
+
+ return offset / tile_size;
+}
+
+struct fb_plane_view_dims {
+ unsigned int width, height;
+ unsigned int tile_width, tile_height;
+};
+
+static void init_plane_view_dims(const struct intel_framebuffer *fb, int color_plane,
+ unsigned int width, unsigned int height,
+ struct fb_plane_view_dims *dims)
+{
+ dims->width = width;
+ dims->height = height;
+
+ intel_tile_dims(&fb->base, color_plane, &dims->tile_width, &dims->tile_height);
+}
+
+static unsigned int
+plane_view_src_stride_tiles(const struct intel_framebuffer *fb, int color_plane,
+ const struct fb_plane_view_dims *dims)
+{
+ return DIV_ROUND_UP(fb->base.pitches[color_plane],
+ dims->tile_width * fb->base.format->cpp[color_plane]);
+}
+
+static unsigned int
+plane_view_dst_stride_tiles(const struct intel_framebuffer *fb, int color_plane,
+ unsigned int pitch_tiles)
+{
+ if (intel_fb_needs_pot_stride_remap(fb)) {
+ /*
+ * ADL_P, the only platform needing a POT stride has a minimum
+ * of 8 main surface tiles.
+ */
+ return roundup_pow_of_two(max(pitch_tiles, 8u));
+ } else {
+ return pitch_tiles;
+ }
+}
+
+static unsigned int
+plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane,
+ unsigned int tile_width,
+ unsigned int src_stride_tiles, unsigned int dst_stride_tiles)
+{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ unsigned int stride_tiles;
+
+ if ((IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
+ src_stride_tiles < dst_stride_tiles)
+ stride_tiles = src_stride_tiles;
+ else
+ stride_tiles = dst_stride_tiles;
+
+ return stride_tiles * tile_width * fb->base.format->cpp[color_plane];
+}
+
+static unsigned int
+plane_view_width_tiles(const struct intel_framebuffer *fb, int color_plane,
+ const struct fb_plane_view_dims *dims,
+ int x)
+{
+ return DIV_ROUND_UP(x + dims->width, dims->tile_width);
+}
+
+static unsigned int
+plane_view_height_tiles(const struct intel_framebuffer *fb, int color_plane,
+ const struct fb_plane_view_dims *dims,
+ int y)
+{
+ return DIV_ROUND_UP(y + dims->height, dims->tile_height);
+}
+
+static unsigned int
+plane_view_linear_tiles(const struct intel_framebuffer *fb, int color_plane,
+ const struct fb_plane_view_dims *dims,
+ int x, int y)
+{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ unsigned int size;
+
+ size = (y + dims->height) * fb->base.pitches[color_plane] +
+ x * fb->base.format->cpp[color_plane];
+
+ return DIV_ROUND_UP(size, intel_tile_size(i915));
+}
+
+#define assign_chk_ovf(i915, var, val) ({ \
+ drm_WARN_ON(&(i915)->drm, overflows_type(val, var)); \
+ (var) = (val); \
+})
+
+#define assign_bfld_chk_ovf(i915, var, val) ({ \
+ (var) = (val); \
+ drm_WARN_ON(&(i915)->drm, (var) != (val)); \
+ (var); \
+})
+
+static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_plane,
+ const struct fb_plane_view_dims *dims,
+ u32 obj_offset, u32 gtt_offset, int x, int y,
+ struct intel_fb_view *view)
+{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ struct intel_remapped_plane_info *remap_info = &view->gtt.remapped.plane[color_plane];
+ struct i915_color_plane_view *color_plane_info = &view->color_plane[color_plane];
+ unsigned int tile_width = dims->tile_width;
+ unsigned int tile_height = dims->tile_height;
+ unsigned int tile_size = intel_tile_size(i915);
+ struct drm_rect r;
+ u32 size = 0;
+
+ assign_bfld_chk_ovf(i915, remap_info->offset, obj_offset);
+
+ if (intel_fb_is_gen12_ccs_aux_plane(&fb->base, color_plane)) {
+ remap_info->linear = 1;
+
+ assign_chk_ovf(i915, remap_info->size,
+ plane_view_linear_tiles(fb, color_plane, dims, x, y));
+ } else {
+ remap_info->linear = 0;
+
+ assign_chk_ovf(i915, remap_info->src_stride,
+ plane_view_src_stride_tiles(fb, color_plane, dims));
+ assign_chk_ovf(i915, remap_info->width,
+ plane_view_width_tiles(fb, color_plane, dims, x));
+ assign_chk_ovf(i915, remap_info->height,
+ plane_view_height_tiles(fb, color_plane, dims, y));
+ }
+
+ if (view->gtt.type == I915_GTT_VIEW_ROTATED) {
+ drm_WARN_ON(&i915->drm, remap_info->linear);
+ check_array_bounds(i915, view->gtt.rotated.plane, color_plane);
+
+ assign_chk_ovf(i915, remap_info->dst_stride,
+ plane_view_dst_stride_tiles(fb, color_plane, remap_info->height));
+
+ /* rotate the x/y offsets to match the GTT view */
+ drm_rect_init(&r, x, y, dims->width, dims->height);
+ drm_rect_rotate(&r,
+ remap_info->width * tile_width,
+ remap_info->height * tile_height,
+ DRM_MODE_ROTATE_270);
+
+ color_plane_info->x = r.x1;
+ color_plane_info->y = r.y1;
+
+ color_plane_info->mapping_stride = remap_info->dst_stride * tile_height;
+ color_plane_info->scanout_stride = color_plane_info->mapping_stride;
+
+ size += remap_info->dst_stride * remap_info->width;
+
+ /* rotate the tile dimensions to match the GTT view */
+ swap(tile_width, tile_height);
+ } else {
+ drm_WARN_ON(&i915->drm, view->gtt.type != I915_GTT_VIEW_REMAPPED);
+
+ check_array_bounds(i915, view->gtt.remapped.plane, color_plane);
+
+ if (view->gtt.remapped.plane_alignment) {
+ unsigned int aligned_offset = ALIGN(gtt_offset,
+ view->gtt.remapped.plane_alignment);
+
+ size += aligned_offset - gtt_offset;
+ gtt_offset = aligned_offset;
+ }
+
+ color_plane_info->x = x;
+ color_plane_info->y = y;
+
+ if (remap_info->linear) {
+ color_plane_info->mapping_stride = fb->base.pitches[color_plane];
+ color_plane_info->scanout_stride = color_plane_info->mapping_stride;
+
+ size += remap_info->size;
+ } else {
+ unsigned int dst_stride;
+
+ /*
+ * The hardware automagically calculates the CCS AUX surface
+ * stride from the main surface stride so can't really remap a
+ * smaller subset (unless we'd remap in whole AUX page units).
+ */
+ if (intel_fb_needs_pot_stride_remap(fb) &&
+ intel_fb_is_ccs_modifier(fb->base.modifier))
+ dst_stride = remap_info->src_stride;
+ else
+ dst_stride = remap_info->width;
+
+ dst_stride = plane_view_dst_stride_tiles(fb, color_plane, dst_stride);
+
+ assign_chk_ovf(i915, remap_info->dst_stride, dst_stride);
+ color_plane_info->mapping_stride = dst_stride *
+ tile_width *
+ fb->base.format->cpp[color_plane];
+ color_plane_info->scanout_stride =
+ plane_view_scanout_stride(fb, color_plane, tile_width,
+ remap_info->src_stride,
+ dst_stride);
+
+ size += dst_stride * remap_info->height;
+ }
+ }
+
+ /*
+ * We only keep the x/y offsets, so push all of the gtt offset into
+ * the x/y offsets. x,y will hold the first pixel of the framebuffer
+ * plane from the start of the remapped/rotated gtt mapping.
+ */
+ if (remap_info->linear)
+ intel_adjust_linear_offset(&color_plane_info->x, &color_plane_info->y,
+ fb->base.format->cpp[color_plane],
+ color_plane_info->mapping_stride,
+ gtt_offset * tile_size, 0);
+ else
+ intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y,
+ tile_width, tile_height,
+ tile_size, remap_info->dst_stride,
+ gtt_offset * tile_size, 0);
+
+ return size;
+}
+
+#undef assign_chk_ovf
+
+/* Return number of tiles @color_plane needs. */
+static unsigned int
+calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane,
+ const struct fb_plane_view_dims *dims,
+ int x, int y)
+{
+ unsigned int tiles;
+
+ if (is_surface_linear(&fb->base, color_plane)) {
+ tiles = plane_view_linear_tiles(fb, color_plane, dims, x, y);
+ } else {
+ tiles = plane_view_src_stride_tiles(fb, color_plane, dims) *
+ plane_view_height_tiles(fb, color_plane, dims, y);
+ /*
+ * If the plane isn't horizontally tile aligned,
+ * we need one more tile.
+ */
+ if (x != 0)
+ tiles++;
+ }
+
+ return tiles;
+}
+
+static void intel_fb_view_init(struct drm_i915_private *i915, struct intel_fb_view *view,
+ enum i915_gtt_view_type view_type)
+{
+ memset(view, 0, sizeof(*view));
+ view->gtt.type = view_type;
+
+ if (view_type == I915_GTT_VIEW_REMAPPED &&
+ (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14))
+ view->gtt.remapped.plane_alignment = SZ_2M / PAGE_SIZE;
+}
+
+bool intel_fb_supports_90_270_rotation(const struct intel_framebuffer *fb)
+{
+ if (DISPLAY_VER(to_i915(fb->base.dev)) >= 13)
+ return false;
+
+ return fb->base.modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->base.modifier == I915_FORMAT_MOD_Yf_TILED;
+}
+
+int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *fb)
+{
+ struct drm_i915_gem_object *obj = intel_fb_obj(&fb->base);
+ u32 gtt_offset_rotated = 0;
+ u32 gtt_offset_remapped = 0;
+ unsigned int max_size = 0;
+ int i, num_planes = fb->base.format->num_planes;
+ unsigned int tile_size = intel_tile_size(i915);
+
+ intel_fb_view_init(i915, &fb->normal_view, I915_GTT_VIEW_NORMAL);
+
+ drm_WARN_ON(&i915->drm,
+ intel_fb_supports_90_270_rotation(fb) &&
+ intel_fb_needs_pot_stride_remap(fb));
+
+ if (intel_fb_supports_90_270_rotation(fb))
+ intel_fb_view_init(i915, &fb->rotated_view, I915_GTT_VIEW_ROTATED);
+ if (intel_fb_needs_pot_stride_remap(fb))
+ intel_fb_view_init(i915, &fb->remapped_view, I915_GTT_VIEW_REMAPPED);
+
+ for (i = 0; i < num_planes; i++) {
+ struct fb_plane_view_dims view_dims;
+ unsigned int width, height;
+ unsigned int cpp, size;
+ u32 offset;
+ int x, y;
+ int ret;
+
+ /*
+ * Plane 2 of Render Compression with Clear Color fb modifier
+ * is consumed by the driver and not passed to DE. Skip the
+ * arithmetic related to alignment and offset calculation.
+ */
+ if (is_gen12_ccs_cc_plane(&fb->base, i)) {
+ if (IS_ALIGNED(fb->base.offsets[i], PAGE_SIZE))
+ continue;
+ else
+ return -EINVAL;
+ }
+
+ cpp = fb->base.format->cpp[i];
+ intel_fb_plane_dims(fb, i, &width, &height);
+
+ ret = convert_plane_offset_to_xy(fb, i, width, &x, &y);
+ if (ret)
+ return ret;
+
+ init_plane_view_dims(fb, i, width, height, &view_dims);
+
+ /*
+ * First pixel of the framebuffer from
+ * the start of the normal gtt mapping.
+ */
+ fb->normal_view.color_plane[i].x = x;
+ fb->normal_view.color_plane[i].y = y;
+ fb->normal_view.color_plane[i].mapping_stride = fb->base.pitches[i];
+ fb->normal_view.color_plane[i].scanout_stride =
+ fb->normal_view.color_plane[i].mapping_stride;
+
+ offset = calc_plane_aligned_offset(fb, i, &x, &y);
+
+ if (intel_fb_supports_90_270_rotation(fb))
+ gtt_offset_rotated += calc_plane_remap_info(fb, i, &view_dims,
+ offset, gtt_offset_rotated, x, y,
+ &fb->rotated_view);
+
+ if (intel_fb_needs_pot_stride_remap(fb))
+ gtt_offset_remapped += calc_plane_remap_info(fb, i, &view_dims,
+ offset, gtt_offset_remapped, x, y,
+ &fb->remapped_view);
+
+ size = calc_plane_normal_size(fb, i, &view_dims, x, y);
+ /* how many tiles in total needed in the bo */
+ max_size = max(max_size, offset + size);
+ }
+
+ if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
+ drm_dbg_kms(&i915->drm,
+ "fb too big for bo (need %llu bytes, have %zu bytes)\n",
+ mul_u32_u32(max_size, tile_size), obj->base.size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void intel_plane_remap_gtt(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 =
+ to_i915(plane_state->uapi.plane->dev);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ unsigned int rotation = plane_state->hw.rotation;
+ int i, num_planes = fb->format->num_planes;
+ unsigned int src_x, src_y;
+ unsigned int src_w, src_h;
+ u32 gtt_offset = 0;
+
+ intel_fb_view_init(i915, &plane_state->view,
+ drm_rotation_90_or_270(rotation) ? I915_GTT_VIEW_ROTATED :
+ I915_GTT_VIEW_REMAPPED);
+
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ drm_WARN_ON(&i915->drm, intel_fb_is_ccs_modifier(fb->modifier));
+
+ /* Make src coordinates relative to the viewport */
+ drm_rect_translate(&plane_state->uapi.src,
+ -(src_x << 16), -(src_y << 16));
+
+ /* Rotate src coordinates to match rotated GTT view */
+ if (drm_rotation_90_or_270(rotation))
+ drm_rect_rotate(&plane_state->uapi.src,
+ src_w << 16, src_h << 16,
+ DRM_MODE_ROTATE_270);
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int hsub = i ? fb->format->hsub : 1;
+ unsigned int vsub = i ? fb->format->vsub : 1;
+ struct fb_plane_view_dims view_dims;
+ unsigned int width, height;
+ unsigned int x, y;
+ u32 offset;
+
+ x = src_x / hsub;
+ y = src_y / vsub;
+ width = src_w / hsub;
+ height = src_h / vsub;
+
+ init_plane_view_dims(intel_fb, i, width, height, &view_dims);
+
+ /*
+ * First pixel of the src viewport from the
+ * start of the normal gtt mapping.
+ */
+ x += intel_fb->normal_view.color_plane[i].x;
+ y += intel_fb->normal_view.color_plane[i].y;
+
+ offset = calc_plane_aligned_offset(intel_fb, i, &x, &y);
+
+ gtt_offset += calc_plane_remap_info(intel_fb, i, &view_dims,
+ offset, gtt_offset, x, y,
+ &plane_state->view);
+ }
+}
+
+void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotation,
+ struct intel_fb_view *view)
+{
+ if (drm_rotation_90_or_270(rotation))
+ *view = fb->rotated_view;
+ else if (intel_fb_needs_pot_stride_remap(fb))
+ *view = fb->remapped_view;
+ else
+ *view = fb->normal_view;
+}
+
+static
+u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
+ u32 pixel_format, u64 modifier)
+{
+ /*
+ * Arbitrary limit for gen4+ chosen to match the
+ * render engine max stride.
+ *
+ * The new CCS hash mode makes remapping impossible
+ */
+ if (DISPLAY_VER(dev_priv) < 4 || intel_fb_is_ccs_modifier(modifier) ||
+ intel_modifier_uses_dpt(dev_priv, modifier))
+ return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
+ else if (DISPLAY_VER(dev_priv) >= 7)
+ return 256 * 1024;
+ else
+ return 128 * 1024;
+}
+
+static u32
+intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ u32 tile_width;
+
+ if (is_surface_linear(fb, color_plane)) {
+ u32 max_stride = intel_plane_fb_max_stride(dev_priv,
+ fb->format->format,
+ fb->modifier);
+
+ /*
+ * To make remapping with linear generally feasible
+ * we need the stride to be page aligned.
+ */
+ if (fb->pitches[color_plane] > max_stride &&
+ !intel_fb_is_ccs_modifier(fb->modifier))
+ return intel_tile_size(dev_priv);
+ else
+ return 64;
+ }
+
+ tile_width = intel_tile_width_bytes(fb, color_plane);
+ if (intel_fb_is_ccs_modifier(fb->modifier)) {
+ /*
+ * On TGL the surface stride must be 4 tile aligned, mapped by
+ * one 64 byte cacheline on the CCS AUX surface.
+ */
+ if (DISPLAY_VER(dev_priv) >= 12)
+ tile_width *= 4;
+ /*
+ * Display WA #0531: skl,bxt,kbl,glk
+ *
+ * Render decompression and plane width > 3840
+ * combined with horizontal panning requires the
+ * plane stride to be a multiple of 4. We'll just
+ * require the entire fb to accommodate that to avoid
+ * potential runtime errors at plane configuration time.
+ */
+ else if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
+ color_plane == 0 && fb->width > 3840)
+ tile_width *= 4;
+ }
+ return tile_width;
+}
+
+static int intel_plane_check_stride(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ u32 stride, max_stride;
+
+ /*
+ * We ignore stride for all invisible planes that
+ * can be remapped. Otherwise we could end up
+ * with a false positive when the remapping didn't
+ * kick in due the plane being invisible.
+ */
+ if (intel_plane_can_remap(plane_state) &&
+ !plane_state->uapi.visible)
+ return 0;
+
+ /* FIXME other color planes? */
+ stride = plane_state->view.color_plane[0].mapping_stride;
+ max_stride = plane->max_stride(plane, fb->format->format,
+ fb->modifier, rotation);
+
+ if (stride > max_stride) {
+ DRM_DEBUG_KMS("[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n",
+ fb->base.id, stride,
+ plane->base.base.id, plane->base.name, max_stride);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int intel_plane_compute_gtt(struct intel_plane_state *plane_state)
+{
+ const struct intel_framebuffer *fb =
+ to_intel_framebuffer(plane_state->hw.fb);
+ unsigned int rotation = plane_state->hw.rotation;
+
+ if (!fb)
+ return 0;
+
+ if (intel_plane_needs_remap(plane_state)) {
+ intel_plane_remap_gtt(plane_state);
+
+ /*
+ * Sometimes even remapping can't overcome
+ * the stride limitations :( Can happen with
+ * big plane sizes and suitably misaligned
+ * offsets.
+ */
+ return intel_plane_check_stride(plane_state);
+ }
+
+ intel_fb_fill_view(fb, rotation, &plane_state->view);
+
+ /* Rotate src coordinates to match rotated GTT view */
+ if (drm_rotation_90_or_270(rotation))
+ drm_rect_rotate(&plane_state->uapi.src,
+ fb->base.width << 16, fb->base.height << 16,
+ DRM_MODE_ROTATE_270);
+
+ return intel_plane_check_stride(plane_state);
+}
+
+static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ drm_framebuffer_cleanup(fb);
+
+ if (intel_fb_uses_dpt(fb))
+ intel_dpt_destroy(intel_fb->dpt_vm);
+
+ intel_frontbuffer_put(intel_fb->frontbuffer);
+
+ kfree(intel_fb);
+}
+
+static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned int *handle)
+{
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ if (i915_gem_object_is_userptr(obj)) {
+ drm_dbg(&i915->drm,
+ "attempting to use a userptr for a framebuffer, denied\n");
+ return -EINVAL;
+ }
+
+ return drm_gem_handle_create(file, &obj->base, handle);
+}
+
+static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+
+ i915_gem_object_flush_if_display(obj);
+ intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
+
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs intel_fb_funcs = {
+ .destroy = intel_user_framebuffer_destroy,
+ .create_handle = intel_user_framebuffer_create_handle,
+ .dirty = intel_user_framebuffer_dirty,
+};
+
+int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
+ struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_framebuffer *fb = &intel_fb->base;
+ u32 max_stride;
+ unsigned int tiling, stride;
+ int ret = -EINVAL;
+ int i;
+
+ intel_fb->frontbuffer = intel_frontbuffer_get(obj);
+ if (!intel_fb->frontbuffer)
+ return -ENOMEM;
+
+ i915_gem_object_lock(obj, NULL);
+ tiling = i915_gem_object_get_tiling(obj);
+ stride = i915_gem_object_get_stride(obj);
+ i915_gem_object_unlock(obj);
+
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ /*
+ * If there's a fence, enforce that
+ * the fb modifier and tiling mode match.
+ */
+ if (tiling != I915_TILING_NONE &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&dev_priv->drm,
+ "tiling_mode doesn't match fb modifier\n");
+ goto err;
+ }
+ } else {
+ if (tiling == I915_TILING_X) {
+ mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ } else if (tiling == I915_TILING_Y) {
+ drm_dbg_kms(&dev_priv->drm,
+ "No Y tiling for legacy addfb\n");
+ goto err;
+ }
+ }
+
+ if (!drm_any_plane_has_format(&dev_priv->drm,
+ mode_cmd->pixel_format,
+ mode_cmd->modifier[0])) {
+ drm_dbg_kms(&dev_priv->drm,
+ "unsupported pixel format %p4cc / modifier 0x%llx\n",
+ &mode_cmd->pixel_format, mode_cmd->modifier[0]);
+ goto err;
+ }
+
+ /*
+ * gen2/3 display engine uses the fence if present,
+ * so the tiling mode must match the fb modifier exactly.
+ */
+ if (DISPLAY_VER(dev_priv) < 4 &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&dev_priv->drm,
+ "tiling_mode must match fb modifier exactly on gen2/3\n");
+ goto err;
+ }
+
+ max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
+ if (mode_cmd->pitches[0] > max_stride) {
+ drm_dbg_kms(&dev_priv->drm,
+ "%s pitch (%u) must be at most %d\n",
+ mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
+ "tiled" : "linear",
+ mode_cmd->pitches[0], max_stride);
+ goto err;
+ }
+
+ /*
+ * If there's a fence, enforce that
+ * the fb pitch and fence stride match.
+ */
+ if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
+ drm_dbg_kms(&dev_priv->drm,
+ "pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], stride);
+ goto err;
+ }
+
+ /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
+ if (mode_cmd->offsets[0] != 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "plane 0 offset (0x%08x) must be 0\n",
+ mode_cmd->offsets[0]);
+ goto err;
+ }
+
+ drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ u32 stride_alignment;
+
+ if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
+ drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
+ i);
+ goto err;
+ }
+
+ stride_alignment = intel_fb_stride_alignment(fb, i);
+ if (fb->pitches[i] & (stride_alignment - 1)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "plane %d pitch (%d) must be at least %u byte aligned\n",
+ i, fb->pitches[i], stride_alignment);
+ goto err;
+ }
+
+ if (intel_fb_is_gen12_ccs_aux_plane(fb, i)) {
+ int ccs_aux_stride = gen12_ccs_aux_stride(intel_fb, i);
+
+ if (fb->pitches[i] != ccs_aux_stride) {
+ drm_dbg_kms(&dev_priv->drm,
+ "ccs aux plane %d pitch (%d) must be %d\n",
+ i,
+ fb->pitches[i], ccs_aux_stride);
+ goto err;
+ }
+ }
+
+ fb->obj[i] = &obj->base;
+ }
+
+ ret = intel_fill_fb_info(dev_priv, intel_fb);
+ if (ret)
+ goto err;
+
+ if (intel_fb_uses_dpt(fb)) {
+ struct i915_address_space *vm;
+
+ vm = intel_dpt_create(intel_fb);
+ if (IS_ERR(vm)) {
+ ret = PTR_ERR(vm);
+ goto err;
+ }
+
+ intel_fb->dpt_vm = vm;
+ }
+
+ ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
+ if (ret) {
+ drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ intel_frontbuffer_put(intel_fb->frontbuffer);
+ return ret;
+}
+
+struct drm_framebuffer *
+intel_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd)
+{
+ struct drm_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
+ struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
+ struct drm_i915_private *i915;
+
+ obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
+
+ /* object is backed with LMEM for discrete */
+ i915 = to_i915(obj->base.dev);
+ if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
+ /* object is "remote", not in local memory */
+ i915_gem_object_put(obj);
+ return ERR_PTR(-EREMOTE);
+ }
+
+ fb = intel_framebuffer_create(obj, &mode_cmd);
+ i915_gem_object_put(obj);
+
+ return fb;
+}
+
+struct drm_framebuffer *
+intel_framebuffer_create(struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct intel_framebuffer *intel_fb;
+ int ret;
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
+ if (ret)
+ goto err;
+
+ return &intel_fb->base;
+
+err:
+ kfree(intel_fb);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
new file mode 100644
index 000000000..12386f13a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#ifndef __INTEL_FB_H__
+#define __INTEL_FB_H__
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_i915_gem_object;
+struct drm_i915_private;
+struct drm_mode_fb_cmd2;
+struct intel_fb_view;
+struct intel_framebuffer;
+struct intel_plane;
+struct intel_plane_state;
+
+#define INTEL_PLANE_CAP_NONE 0
+#define INTEL_PLANE_CAP_CCS_RC BIT(0)
+#define INTEL_PLANE_CAP_CCS_RC_CC BIT(1)
+#define INTEL_PLANE_CAP_CCS_MC BIT(2)
+#define INTEL_PLANE_CAP_TILING_X BIT(3)
+#define INTEL_PLANE_CAP_TILING_Y BIT(4)
+#define INTEL_PLANE_CAP_TILING_Yf BIT(5)
+#define INTEL_PLANE_CAP_TILING_4 BIT(6)
+
+bool intel_fb_is_ccs_modifier(u64 modifier);
+bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier);
+bool intel_fb_is_mc_ccs_modifier(u64 modifier);
+
+bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane);
+int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb);
+
+u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915,
+ u8 plane_caps);
+bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier);
+
+const struct drm_format_info *
+intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+
+bool
+intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
+ u64 modifier);
+
+bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane);
+
+int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane);
+int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane);
+int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane);
+
+unsigned int intel_tile_size(const struct drm_i915_private *i915);
+unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane);
+unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane);
+unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_plane);
+unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int height);
+unsigned int intel_cursor_alignment(const struct drm_i915_private *i915);
+unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
+ int color_plane);
+
+void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
+ const struct drm_framebuffer *fb,
+ int color_plane);
+
+u32 intel_plane_adjust_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane,
+ u32 old_offset, u32 new_offset);
+u32 intel_plane_compute_aligned_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int color_plane);
+
+bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb);
+bool intel_fb_supports_90_270_rotation(const struct intel_framebuffer *fb);
+
+int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *fb);
+void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotation,
+ struct intel_fb_view *view);
+int intel_plane_compute_gtt(struct intel_plane_state *plane_state);
+
+int intel_framebuffer_init(struct intel_framebuffer *ifb,
+ struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_framebuffer *
+intel_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd);
+
+bool intel_fb_uses_dpt(const struct drm_framebuffer *fb);
+
+#endif /* __INTEL_FB_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
new file mode 100644
index 000000000..1dddd6abd
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+/**
+ * DOC: display pinning helpers
+ */
+
+#include "gem/i915_gem_domain.h"
+#include "gem/i915_gem_object.h"
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_dpt.h"
+#include "intel_fb.h"
+#include "intel_fb_pin.h"
+
+static struct i915_vma *
+intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
+ const struct i915_gtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags,
+ struct i915_address_space *vm)
+{
+ struct drm_device *dev = fb->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_gem_ww_ctx ww;
+ struct i915_vma *vma;
+ u32 alignment;
+ int ret;
+
+ /*
+ * We are not syncing against the binding (and potential migrations)
+ * below, so this vm must never be async.
+ */
+ GEM_WARN_ON(vm->bind_async_flags);
+
+ if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
+ return ERR_PTR(-EINVAL);
+
+ alignment = 4096 * 512;
+
+ atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+
+ for_i915_gem_ww(&ww, ret, true) {
+ ret = i915_gem_object_lock(obj, &ww);
+ if (ret)
+ continue;
+
+ if (HAS_LMEM(dev_priv)) {
+ unsigned int flags = obj->flags;
+
+ /*
+ * For this type of buffer we need to able to read from the CPU
+ * the clear color value found in the buffer, hence we need to
+ * ensure it is always in the mappable part of lmem, if this is
+ * a small-bar device.
+ */
+ if (intel_fb_rc_ccs_cc_plane(fb) >= 0)
+ flags &= ~I915_BO_ALLOC_GPU_ONLY;
+ ret = __i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0,
+ flags);
+ if (ret)
+ continue;
+ }
+
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ if (ret)
+ continue;
+
+ vma = i915_vma_instance(obj, vm, view);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ continue;
+ }
+
+ if (i915_vma_misplaced(vma, 0, alignment, 0)) {
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ continue;
+ }
+
+ ret = i915_vma_pin_ww(vma, &ww, 0, alignment, PIN_GLOBAL);
+ if (ret)
+ continue;
+ }
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+
+ vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+
+ i915_gem_object_flush_if_display(obj);
+
+ i915_vma_get(vma);
+err:
+ atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+
+ return vma;
+}
+
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ bool phys_cursor,
+ const struct i915_gtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags)
+{
+ struct drm_device *dev = fb->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ intel_wakeref_t wakeref;
+ struct i915_gem_ww_ctx ww;
+ struct i915_vma *vma;
+ unsigned int pinctl;
+ u32 alignment;
+ int ret;
+
+ if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
+ return ERR_PTR(-EINVAL);
+
+ if (phys_cursor)
+ alignment = intel_cursor_alignment(dev_priv);
+ else
+ alignment = intel_surf_alignment(fb, 0);
+ if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
+ return ERR_PTR(-EINVAL);
+
+ /* Note that the w/a also requires 64 PTE of padding following the
+ * bo. We currently fill all unused PTE with the shadow page and so
+ * we should always have valid PTE following the scanout preventing
+ * the VT-d warning.
+ */
+ if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
+ alignment = 256 * 1024;
+
+ /*
+ * Global gtt pte registers are special registers which actually forward
+ * writes to a chunk of system memory. Which means that there is no risk
+ * that the register values disappear as soon as we call
+ * intel_runtime_pm_put(), so it is correct to wrap only the
+ * pin/unpin/fence and not more.
+ */
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+
+ /*
+ * Valleyview is definitely limited to scanning out the first
+ * 512MiB. Lets presume this behaviour was inherited from the
+ * g4x display engine and that all earlier gen are similarly
+ * limited. Testing suggests that it is a little more
+ * complicated than this. For example, Cherryview appears quite
+ * happy to scanout from anywhere within its global aperture.
+ */
+ pinctl = 0;
+ if (HAS_GMCH(dev_priv))
+ pinctl |= PIN_MAPPABLE;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(obj, &ww);
+ if (!ret && phys_cursor)
+ ret = i915_gem_object_attach_phys(obj, alignment);
+ else if (!ret && HAS_LMEM(dev_priv))
+ ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0);
+ /* TODO: Do we need to sync when migration becomes async? */
+ if (!ret)
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err;
+
+ vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
+ view, pinctl);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unpin;
+ }
+
+ if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
+ /*
+ * Install a fence for tiled scan-out. Pre-i965 always needs a
+ * fence, whereas 965+ only requires a fence if using
+ * framebuffer compression. For simplicity, we always, when
+ * possible, install a fence as the cost is not that onerous.
+ *
+ * If we fail to fence the tiled scanout, then either the
+ * modeset will reject the change (which is highly unlikely as
+ * the affected systems, all but one, do not have unmappable
+ * space) or we will not be able to enable full powersaving
+ * techniques (also likely not to apply due to various limits
+ * FBC and the like impose on the size of the buffer, which
+ * presumably we violated anyway with this unmappable buffer).
+ * Anyway, it is presumably better to stumble onwards with
+ * something and try to run the system in a "less than optimal"
+ * mode that matches the user configuration.
+ */
+ ret = i915_vma_pin_fence(vma);
+ if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
+ i915_vma_unpin(vma);
+ goto err_unpin;
+ }
+ ret = 0;
+
+ if (vma->fence)
+ *out_flags |= PLANE_HAS_FENCE;
+ }
+
+ i915_vma_get(vma);
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err:
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (ret)
+ vma = ERR_PTR(ret);
+
+ atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ return vma;
+}
+
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
+{
+ if (flags & PLANE_HAS_FENCE)
+ i915_vma_unpin_fence(vma);
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+}
+
+int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct i915_vma *vma;
+ bool phys_cursor =
+ plane->id == PLANE_CURSOR &&
+ INTEL_INFO(dev_priv)->display.cursor_needs_physical;
+
+ if (!intel_fb_uses_dpt(fb)) {
+ vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
+ &plane_state->view.gtt,
+ intel_plane_uses_fence(plane_state),
+ &plane_state->flags);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ plane_state->ggtt_vma = vma;
+ } else {
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ vma = intel_dpt_pin(intel_fb->dpt_vm);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ plane_state->ggtt_vma = vma;
+
+ vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
+ &plane_state->flags, intel_fb->dpt_vm);
+ if (IS_ERR(vma)) {
+ intel_dpt_unpin(intel_fb->dpt_vm);
+ plane_state->ggtt_vma = NULL;
+ return PTR_ERR(vma);
+ }
+
+ plane_state->dpt_vma = vma;
+
+ WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
+ }
+
+ return 0;
+}
+
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
+{
+ struct drm_framebuffer *fb = old_plane_state->hw.fb;
+ struct i915_vma *vma;
+
+ if (!intel_fb_uses_dpt(fb)) {
+ vma = fetch_and_zero(&old_plane_state->ggtt_vma);
+ if (vma)
+ intel_unpin_fb_vma(vma, old_plane_state->flags);
+ } else {
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ vma = fetch_and_zero(&old_plane_state->dpt_vma);
+ if (vma)
+ intel_unpin_fb_vma(vma, old_plane_state->flags);
+
+ vma = fetch_and_zero(&old_plane_state->ggtt_vma);
+ if (vma)
+ intel_dpt_unpin(intel_fb->dpt_vm);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.h b/drivers/gpu/drm/i915/display/intel_fb_pin.h
new file mode 100644
index 000000000..de0efaa25
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_FB_PIN_H__
+#define __INTEL_FB_PIN_H__
+
+#include <linux/types.h>
+
+struct drm_framebuffer;
+struct i915_vma;
+struct intel_plane_state;
+struct i915_gtt_view;
+
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ bool phys_cursor,
+ const struct i915_gtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags);
+
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
+
+int intel_plane_pin_fb(struct intel_plane_state *plane_state);
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
new file mode 100644
index 000000000..f38175304
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -0,0 +1,1846 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * DOC: Frame Buffer Compression (FBC)
+ *
+ * FBC tries to save memory bandwidth (and so power consumption) by
+ * compressing the amount of memory used by the display. It is total
+ * transparent to user space and completely handled in the kernel.
+ *
+ * The benefits of FBC are mostly visible with solid backgrounds and
+ * variation-less patterns. It comes from keeping the memory footprint small
+ * and having fewer memory pages opened and accessed for refreshing the display.
+ *
+ * i915 is responsible to reserve stolen memory for FBC and configure its
+ * offset on proper registers. The hardware takes care of all
+ * compress/decompress. However there are many known cases where we have to
+ * forcibly disable it to allow proper screen updates.
+ */
+
+#include <linux/string_helpers.h>
+
+#include <drm/drm_blend.h>
+#include <drm/drm_fourcc.h>
+
+#include "i915_drv.h"
+#include "i915_utils.h"
+#include "i915_vgpu.h"
+#include "intel_cdclk.h"
+#include "intel_de.h"
+#include "intel_display_trace.h"
+#include "intel_display_types.h"
+#include "intel_fbc.h"
+#include "intel_frontbuffer.h"
+
+#define for_each_fbc_id(__dev_priv, __fbc_id) \
+ for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \
+ for_each_if(RUNTIME_INFO(__dev_priv)->fbc_mask & BIT(__fbc_id))
+
+#define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \
+ for_each_fbc_id((__dev_priv), (__fbc_id)) \
+ for_each_if((__fbc) = (__dev_priv)->display.fbc[(__fbc_id)])
+
+struct intel_fbc_funcs {
+ void (*activate)(struct intel_fbc *fbc);
+ void (*deactivate)(struct intel_fbc *fbc);
+ bool (*is_active)(struct intel_fbc *fbc);
+ bool (*is_compressing)(struct intel_fbc *fbc);
+ void (*nuke)(struct intel_fbc *fbc);
+ void (*program_cfb)(struct intel_fbc *fbc);
+ void (*set_false_color)(struct intel_fbc *fbc, bool enable);
+};
+
+struct intel_fbc_state {
+ struct intel_plane *plane;
+ unsigned int cfb_stride;
+ unsigned int cfb_size;
+ unsigned int fence_y_offset;
+ u16 override_cfb_stride;
+ u16 interval;
+ s8 fence_id;
+};
+
+struct intel_fbc {
+ struct drm_i915_private *i915;
+ const struct intel_fbc_funcs *funcs;
+
+ /*
+ * This is always the inner lock when overlapping with
+ * struct_mutex and it's the outer lock when overlapping
+ * with stolen_lock.
+ */
+ struct mutex lock;
+ unsigned int busy_bits;
+
+ struct drm_mm_node compressed_fb;
+ struct drm_mm_node compressed_llb;
+
+ enum intel_fbc_id id;
+
+ u8 limit;
+
+ bool false_color;
+
+ bool active;
+ bool activated;
+ bool flip_pending;
+
+ bool underrun_detected;
+ struct work_struct underrun_work;
+
+ /*
+ * This structure contains everything that's relevant to program the
+ * hardware registers. When we want to figure out if we need to disable
+ * and re-enable FBC for a new configuration we just check if there's
+ * something different in the struct. The genx_fbc_activate functions
+ * are supposed to read from it in order to program the registers.
+ */
+ struct intel_fbc_state state;
+ const char *no_fbc_reason;
+};
+
+/* plane stride in pixels */
+static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int stride;
+
+ stride = plane_state->view.color_plane[0].mapping_stride;
+ if (!drm_rotation_90_or_270(plane_state->hw.rotation))
+ stride /= fb->format->cpp[0];
+
+ return stride;
+}
+
+/* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
+static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
+{
+ unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
+
+ return intel_fbc_plane_stride(plane_state) * cpp;
+}
+
+/* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */
+static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ unsigned int limit = 4; /* 1:4 compression limit is the worst case */
+ unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
+ unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
+ unsigned int height = 4; /* FBC segment is 4 lines */
+ unsigned int stride;
+
+ /* minimum segment stride we can use */
+ stride = width * cpp * height / limit;
+
+ /*
+ * Wa_16011863758: icl+
+ * Avoid some hardware segment address miscalculation.
+ */
+ if (DISPLAY_VER(i915) >= 11)
+ stride += 64;
+
+ /*
+ * At least some of the platforms require each 4 line segment to
+ * be 512 byte aligned. Just do it always for simplicity.
+ */
+ stride = ALIGN(stride, 512);
+
+ /* convert back to single line equivalent with 1:1 compression limit */
+ return stride * limit / height;
+}
+
+/* properly aligned cfb stride in bytes, assuming 1:1 compression limit */
+static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ unsigned int stride = _intel_fbc_cfb_stride(plane_state);
+
+ /*
+ * At least some of the platforms require each 4 line segment to
+ * be 512 byte aligned. Aligning each line to 512 bytes guarantees
+ * that regardless of the compression limit we choose later.
+ */
+ if (DISPLAY_VER(i915) >= 9)
+ return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state));
+ else
+ return stride;
+}
+
+static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ int lines = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ if (DISPLAY_VER(i915) == 7)
+ lines = min(lines, 2048);
+ else if (DISPLAY_VER(i915) >= 8)
+ lines = min(lines, 2560);
+
+ return lines * intel_fbc_cfb_stride(plane_state);
+}
+
+static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state);
+ unsigned int stride = _intel_fbc_cfb_stride(plane_state);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ /*
+ * Override stride in 64 byte units per 4 line segment.
+ *
+ * Gen9 hw miscalculates cfb stride for linear as
+ * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
+ * we always need to use the override there.
+ */
+ if (stride != stride_aligned ||
+ (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
+ return stride_aligned * 4 / 64;
+
+ return 0;
+}
+
+static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
+{
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
+ unsigned int cfb_stride;
+ u32 fbc_ctl;
+
+ cfb_stride = fbc_state->cfb_stride / fbc->limit;
+
+ /* FBC_CTL wants 32B or 64B units */
+ if (DISPLAY_VER(i915) == 2)
+ cfb_stride = (cfb_stride / 32) - 1;
+ else
+ cfb_stride = (cfb_stride / 64) - 1;
+
+ fbc_ctl = FBC_CTL_PERIODIC |
+ FBC_CTL_INTERVAL(fbc_state->interval) |
+ FBC_CTL_STRIDE(cfb_stride);
+
+ if (IS_I945GM(i915))
+ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+
+ if (fbc_state->fence_id >= 0)
+ fbc_ctl |= FBC_CTL_FENCENO(fbc_state->fence_id);
+
+ return fbc_ctl;
+}
+
+static u32 i965_fbc_ctl2(struct intel_fbc *fbc)
+{
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ u32 fbc_ctl2;
+
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM |
+ FBC_CTL_PLANE(fbc_state->plane->i9xx_plane);
+
+ if (fbc_state->fence_id >= 0)
+ fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN;
+
+ return fbc_ctl2;
+}
+
+static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+ u32 fbc_ctl;
+
+ /* Disable compression */
+ fbc_ctl = intel_de_read(i915, FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
+ fbc_ctl &= ~FBC_CTL_EN;
+ intel_de_write(i915, FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ if (intel_de_wait_for_clear(i915, FBC_STATUS,
+ FBC_STAT_COMPRESSING, 10)) {
+ drm_dbg_kms(&i915->drm, "FBC idle timed out\n");
+ return;
+ }
+}
+
+static void i8xx_fbc_activate(struct intel_fbc *fbc)
+{
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
+ int i;
+
+ /* Clear old tags */
+ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+ intel_de_write(i915, FBC_TAG(i), 0);
+
+ if (DISPLAY_VER(i915) == 4) {
+ intel_de_write(i915, FBC_CONTROL2,
+ i965_fbc_ctl2(fbc));
+ intel_de_write(i915, FBC_FENCE_OFF,
+ fbc_state->fence_y_offset);
+ }
+
+ intel_de_write(i915, FBC_CONTROL,
+ FBC_CTL_EN | i8xx_fbc_ctl(fbc));
+}
+
+static bool i8xx_fbc_is_active(struct intel_fbc *fbc)
+{
+ return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc)
+{
+ return intel_de_read(fbc->i915, FBC_STATUS) &
+ (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
+}
+
+static void i8xx_fbc_nuke(struct intel_fbc *fbc)
+{
+ struct intel_fbc_state *fbc_state = &fbc->state;
+ enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
+ struct drm_i915_private *dev_priv = fbc->i915;
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
+ intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
+ spin_unlock_irq(&dev_priv->uncore.lock);
+}
+
+static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
+ fbc->compressed_fb.start, U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
+ fbc->compressed_llb.start, U32_MAX));
+
+ intel_de_write(i915, FBC_CFB_BASE,
+ i915->dsm.start + fbc->compressed_fb.start);
+ intel_de_write(i915, FBC_LL_BASE,
+ i915->dsm.start + fbc->compressed_llb.start);
+}
+
+static const struct intel_fbc_funcs i8xx_fbc_funcs = {
+ .activate = i8xx_fbc_activate,
+ .deactivate = i8xx_fbc_deactivate,
+ .is_active = i8xx_fbc_is_active,
+ .is_compressing = i8xx_fbc_is_compressing,
+ .nuke = i8xx_fbc_nuke,
+ .program_cfb = i8xx_fbc_program_cfb,
+};
+
+static void i965_fbc_nuke(struct intel_fbc *fbc)
+{
+ struct intel_fbc_state *fbc_state = &fbc->state;
+ enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
+ struct drm_i915_private *dev_priv = fbc->i915;
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
+ spin_unlock_irq(&dev_priv->uncore.lock);
+}
+
+static const struct intel_fbc_funcs i965_fbc_funcs = {
+ .activate = i8xx_fbc_activate,
+ .deactivate = i8xx_fbc_deactivate,
+ .is_active = i8xx_fbc_is_active,
+ .is_compressing = i8xx_fbc_is_compressing,
+ .nuke = i965_fbc_nuke,
+ .program_cfb = i8xx_fbc_program_cfb,
+};
+
+static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc)
+{
+ switch (fbc->limit) {
+ default:
+ MISSING_CASE(fbc->limit);
+ fallthrough;
+ case 1:
+ return DPFC_CTL_LIMIT_1X;
+ case 2:
+ return DPFC_CTL_LIMIT_2X;
+ case 4:
+ return DPFC_CTL_LIMIT_4X;
+ }
+}
+
+static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
+{
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = g4x_dpfc_ctl_limit(fbc) |
+ DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane);
+
+ if (IS_G4X(i915))
+ dpfc_ctl |= DPFC_CTL_SR_EN;
+
+ if (fbc_state->fence_id >= 0) {
+ dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X;
+
+ if (DISPLAY_VER(i915) < 6)
+ dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id);
+ }
+
+ return dpfc_ctl;
+}
+
+static void g4x_fbc_activate(struct intel_fbc *fbc)
+{
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
+
+ intel_de_write(i915, DPFC_FENCE_YOFF,
+ fbc_state->fence_y_offset);
+
+ intel_de_write(i915, DPFC_CONTROL,
+ DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
+}
+
+static void g4x_fbc_deactivate(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = intel_de_read(i915, DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ intel_de_write(i915, DPFC_CONTROL, dpfc_ctl);
+ }
+}
+
+static bool g4x_fbc_is_active(struct intel_fbc *fbc)
+{
+ return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static bool g4x_fbc_is_compressing(struct intel_fbc *fbc)
+{
+ return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
+}
+
+static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start);
+}
+
+static const struct intel_fbc_funcs g4x_fbc_funcs = {
+ .activate = g4x_fbc_activate,
+ .deactivate = g4x_fbc_deactivate,
+ .is_active = g4x_fbc_is_active,
+ .is_compressing = g4x_fbc_is_compressing,
+ .nuke = i965_fbc_nuke,
+ .program_cfb = g4x_fbc_program_cfb,
+};
+
+static void ilk_fbc_activate(struct intel_fbc *fbc)
+{
+ struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
+
+ intel_de_write(i915, ILK_DPFC_FENCE_YOFF(fbc->id),
+ fbc_state->fence_y_offset);
+
+ intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
+ DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
+}
+
+static void ilk_fbc_deactivate(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL(fbc->id));
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
+ }
+}
+
+static bool ilk_fbc_is_active(struct intel_fbc *fbc)
+{
+ return intel_de_read(fbc->i915, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN;
+}
+
+static bool ilk_fbc_is_compressing(struct intel_fbc *fbc)
+{
+ return intel_de_read(fbc->i915, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK;
+}
+
+static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id), fbc->compressed_fb.start);
+}
+
+static const struct intel_fbc_funcs ilk_fbc_funcs = {
+ .activate = ilk_fbc_activate,
+ .deactivate = ilk_fbc_deactivate,
+ .is_active = ilk_fbc_is_active,
+ .is_compressing = ilk_fbc_is_compressing,
+ .nuke = i965_fbc_nuke,
+ .program_cfb = ilk_fbc_program_cfb,
+};
+
+static void snb_fbc_program_fence(struct intel_fbc *fbc)
+{
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
+ u32 ctl = 0;
+
+ if (fbc_state->fence_id >= 0)
+ ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id);
+
+ intel_de_write(i915, SNB_DPFC_CTL_SA, ctl);
+ intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset);
+}
+
+static void snb_fbc_activate(struct intel_fbc *fbc)
+{
+ snb_fbc_program_fence(fbc);
+
+ ilk_fbc_activate(fbc);
+}
+
+static void snb_fbc_nuke(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ intel_de_write(i915, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE);
+ intel_de_posting_read(i915, MSG_FBC_REND_STATE(fbc->id));
+}
+
+static const struct intel_fbc_funcs snb_fbc_funcs = {
+ .activate = snb_fbc_activate,
+ .deactivate = ilk_fbc_deactivate,
+ .is_active = ilk_fbc_is_active,
+ .is_compressing = ilk_fbc_is_compressing,
+ .nuke = snb_fbc_nuke,
+ .program_cfb = ilk_fbc_program_cfb,
+};
+
+static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
+{
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
+ u32 val = 0;
+
+ if (fbc_state->override_cfb_stride)
+ val |= FBC_STRIDE_OVERRIDE |
+ FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
+
+ intel_de_write(i915, GLK_FBC_STRIDE(fbc->id), val);
+}
+
+static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
+{
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
+ u32 val = 0;
+
+ /* Display WA #0529: skl, kbl, bxt. */
+ if (fbc_state->override_cfb_stride)
+ val |= CHICKEN_FBC_STRIDE_OVERRIDE |
+ CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
+
+ intel_de_rmw(i915, CHICKEN_MISC_4,
+ CHICKEN_FBC_STRIDE_OVERRIDE |
+ CHICKEN_FBC_STRIDE_MASK, val);
+}
+
+static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
+{
+ const struct intel_fbc_state *fbc_state = &fbc->state;
+ struct drm_i915_private *i915 = fbc->i915;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = g4x_dpfc_ctl_limit(fbc);
+
+ if (IS_IVYBRIDGE(i915))
+ dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane);
+
+ if (fbc_state->fence_id >= 0)
+ dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB;
+
+ if (fbc->false_color)
+ dpfc_ctl |= DPFC_CTL_FALSE_COLOR;
+
+ return dpfc_ctl;
+}
+
+static void ivb_fbc_activate(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ if (DISPLAY_VER(i915) >= 10)
+ glk_fbc_program_cfb_stride(fbc);
+ else if (DISPLAY_VER(i915) == 9)
+ skl_fbc_program_cfb_stride(fbc);
+
+ if (to_gt(i915)->ggtt->num_fences)
+ snb_fbc_program_fence(fbc);
+
+ intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
+ DPFC_CTL_EN | ivb_dpfc_ctl(fbc));
+}
+
+static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
+{
+ return intel_de_read(fbc->i915, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB;
+}
+
+static void ivb_fbc_set_false_color(struct intel_fbc *fbc,
+ bool enable)
+{
+ intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL(fbc->id),
+ DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0);
+}
+
+static const struct intel_fbc_funcs ivb_fbc_funcs = {
+ .activate = ivb_fbc_activate,
+ .deactivate = ilk_fbc_deactivate,
+ .is_active = ilk_fbc_is_active,
+ .is_compressing = ivb_fbc_is_compressing,
+ .nuke = snb_fbc_nuke,
+ .program_cfb = ilk_fbc_program_cfb,
+ .set_false_color = ivb_fbc_set_false_color,
+};
+
+static bool intel_fbc_hw_is_active(struct intel_fbc *fbc)
+{
+ return fbc->funcs->is_active(fbc);
+}
+
+static void intel_fbc_hw_activate(struct intel_fbc *fbc)
+{
+ trace_intel_fbc_activate(fbc->state.plane);
+
+ fbc->active = true;
+ fbc->activated = true;
+
+ fbc->funcs->activate(fbc);
+}
+
+static void intel_fbc_hw_deactivate(struct intel_fbc *fbc)
+{
+ trace_intel_fbc_deactivate(fbc->state.plane);
+
+ fbc->active = false;
+
+ fbc->funcs->deactivate(fbc);
+}
+
+static bool intel_fbc_is_compressing(struct intel_fbc *fbc)
+{
+ return fbc->funcs->is_compressing(fbc);
+}
+
+static void intel_fbc_nuke(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ drm_WARN_ON(&i915->drm, fbc->flip_pending);
+
+ trace_intel_fbc_nuke(fbc->state.plane);
+
+ fbc->funcs->nuke(fbc);
+}
+
+static void intel_fbc_activate(struct intel_fbc *fbc)
+{
+ intel_fbc_hw_activate(fbc);
+ intel_fbc_nuke(fbc);
+
+ fbc->no_fbc_reason = NULL;
+}
+
+static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
+
+ if (fbc->active)
+ intel_fbc_hw_deactivate(fbc);
+
+ fbc->no_fbc_reason = reason;
+}
+
+static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
+ return BIT_ULL(28);
+ else
+ return BIT_ULL(32);
+}
+
+static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
+{
+ u64 end;
+
+ /* The FBC hardware for BDW/SKL doesn't have access to the stolen
+ * reserved range size, so it always assumes the maximum (8mb) is used.
+ * If we enable FBC using a CFB on that memory range we'll get FIFO
+ * underruns, even if that range is not reserved by the BIOS. */
+ if (IS_BROADWELL(i915) ||
+ (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
+ end = resource_size(&i915->dsm) - 8 * 1024 * 1024;
+ else
+ end = U64_MAX;
+
+ return min(end, intel_fbc_cfb_base_max(i915));
+}
+
+static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
+{
+ return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1;
+}
+
+static int intel_fbc_max_limit(struct drm_i915_private *i915)
+{
+ /* WaFbcOnly1to1Ratio:ctg */
+ if (IS_G4X(i915))
+ return 1;
+
+ /*
+ * FBC2 can only do 1:1, 1:2, 1:4, we limit
+ * FBC1 to the same out of convenience.
+ */
+ return 4;
+}
+
+static int find_compression_limit(struct intel_fbc *fbc,
+ unsigned int size, int min_limit)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+ u64 end = intel_fbc_stolen_end(i915);
+ int ret, limit = min_limit;
+
+ size /= limit;
+
+ /* Try to over-allocate to reduce reallocations and fragmentation. */
+ ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
+ size <<= 1, 4096, 0, end);
+ if (ret == 0)
+ return limit;
+
+ for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) {
+ ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
+ size >>= 1, 4096, 0, end);
+ if (ret == 0)
+ return limit;
+ }
+
+ return 0;
+}
+
+static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
+ unsigned int size, int min_limit)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+ int ret;
+
+ drm_WARN_ON(&i915->drm,
+ drm_mm_node_allocated(&fbc->compressed_fb));
+ drm_WARN_ON(&i915->drm,
+ drm_mm_node_allocated(&fbc->compressed_llb));
+
+ if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) {
+ ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
+ 4096, 4096);
+ if (ret)
+ goto err;
+ }
+
+ ret = find_compression_limit(fbc, size, min_limit);
+ if (!ret)
+ goto err_llb;
+ else if (ret > min_limit)
+ drm_info_once(&i915->drm,
+ "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+
+ fbc->limit = ret;
+
+ drm_dbg_kms(&i915->drm,
+ "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
+ fbc->compressed_fb.size, fbc->limit);
+
+ return 0;
+
+err_llb:
+ if (drm_mm_node_allocated(&fbc->compressed_llb))
+ i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
+err:
+ if (drm_mm_initialized(&i915->mm.stolen))
+ drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+ return -ENOSPC;
+}
+
+static void intel_fbc_program_cfb(struct intel_fbc *fbc)
+{
+ fbc->funcs->program_cfb(fbc);
+}
+
+static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
+{
+ /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp */
+ if (DISPLAY_VER(fbc->i915) >= 11 && !IS_DG2(fbc->i915))
+ intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0,
+ DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
+}
+
+static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ if (WARN_ON(intel_fbc_hw_is_active(fbc)))
+ return;
+
+ if (drm_mm_node_allocated(&fbc->compressed_llb))
+ i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
+ if (drm_mm_node_allocated(&fbc->compressed_fb))
+ i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
+}
+
+void intel_fbc_cleanup(struct drm_i915_private *i915)
+{
+ struct intel_fbc *fbc;
+ enum intel_fbc_id fbc_id;
+
+ for_each_intel_fbc(i915, fbc, fbc_id) {
+ mutex_lock(&fbc->lock);
+ __intel_fbc_cleanup_cfb(fbc);
+ mutex_unlock(&fbc->lock);
+
+ kfree(fbc);
+ }
+}
+
+static bool stride_is_valid(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int stride = intel_fbc_plane_stride(plane_state) *
+ fb->format->cpp[0];
+
+ /* This should have been caught earlier. */
+ if (drm_WARN_ON_ONCE(&i915->drm, (stride & (64 - 1)) != 0))
+ return false;
+
+ /* Below are the additional FBC restrictions. */
+ if (stride < 512)
+ return false;
+
+ if (DISPLAY_VER(i915) == 2 || DISPLAY_VER(i915) == 3)
+ return stride == 4096 || stride == 8192;
+
+ if (DISPLAY_VER(i915) == 4 && !IS_G4X(i915) && stride < 2048)
+ return false;
+
+ /* Display WA #1105: skl,bxt,kbl,cfl,glk */
+ if ((DISPLAY_VER(i915) == 9 || IS_GEMINILAKE(i915)) &&
+ fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
+ return false;
+
+ if (stride > 16384)
+ return false;
+
+ return true;
+}
+
+static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ return true;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_RGB565:
+ /* 16bpp not supported on gen2 */
+ if (DISPLAY_VER(i915) == 2)
+ return false;
+ /* WaFbcOnly1to1Ratio:ctg */
+ if (IS_G4X(i915))
+ return false;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rotation_is_valid(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+
+ if (DISPLAY_VER(i915) >= 9 && fb->format->format == DRM_FORMAT_RGB565 &&
+ drm_rotation_90_or_270(rotation))
+ return false;
+ else if (DISPLAY_VER(i915) <= 4 && !IS_G4X(i915) &&
+ rotation != DRM_MODE_ROTATE_0)
+ return false;
+
+ return true;
+}
+
+/*
+ * For some reason, the hardware tracking starts looking at whatever we
+ * programmed as the display plane base address register. It does not look at
+ * the X and Y offset registers. That's why we include the src x/y offsets
+ * instead of just looking at the plane size.
+ */
+static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ unsigned int effective_w, effective_h, max_w, max_h;
+
+ if (DISPLAY_VER(i915) >= 10) {
+ max_w = 5120;
+ max_h = 4096;
+ } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
+ max_w = 4096;
+ max_h = 4096;
+ } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
+ max_w = 4096;
+ max_h = 2048;
+ } else {
+ max_w = 2048;
+ max_h = 1536;
+ }
+
+ effective_w = plane_state->view.color_plane[0].x +
+ (drm_rect_width(&plane_state->uapi.src) >> 16);
+ effective_h = plane_state->view.color_plane[0].y +
+ (drm_rect_height(&plane_state->uapi.src) >> 16);
+
+ return effective_w <= max_w && effective_h <= max_h;
+}
+
+static bool tiling_is_valid(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ return DISPLAY_VER(i915) >= 9;
+ case I915_FORMAT_MOD_4_TILED:
+ case I915_FORMAT_MOD_X_TILED:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void intel_fbc_update_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+ struct intel_fbc *fbc = plane->fbc;
+ struct intel_fbc_state *fbc_state = &fbc->state;
+
+ WARN_ON(plane_state->no_fbc_reason);
+ WARN_ON(fbc_state->plane && fbc_state->plane != plane);
+
+ fbc_state->plane = plane;
+
+ /* FBC1 compression interval: arbitrary choice of 1 second */
+ fbc_state->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
+
+ fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state);
+
+ drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE &&
+ !plane_state->ggtt_vma->fence);
+
+ if (plane_state->flags & PLANE_HAS_FENCE &&
+ plane_state->ggtt_vma->fence)
+ fbc_state->fence_id = plane_state->ggtt_vma->fence->id;
+ else
+ fbc_state->fence_id = -1;
+
+ fbc_state->cfb_stride = intel_fbc_cfb_stride(plane_state);
+ fbc_state->cfb_size = intel_fbc_cfb_size(plane_state);
+ fbc_state->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state);
+}
+
+static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+
+ /* The use of a CPU fence is one of two ways to detect writes by the
+ * CPU to the scanout and trigger updates to the FBC.
+ *
+ * The other method is by software tracking (see
+ * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
+ * the current compressed buffer and recompress it.
+ *
+ * Note that is possible for a tiled surface to be unmappable (and
+ * so have no fence associated with it) due to aperture constraints
+ * at the time of pinning.
+ *
+ * FIXME with 90/270 degree rotation we should use the fence on
+ * the normal GTT view (the rotated view doesn't even have a
+ * fence). Would need changes to the FBC fence Y offset as well.
+ * For now this will effectively disable FBC with 90/270 degree
+ * rotation.
+ */
+ return DISPLAY_VER(i915) >= 9 ||
+ (plane_state->flags & PLANE_HAS_FENCE &&
+ plane_state->ggtt_vma->fence);
+}
+
+static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct intel_fbc *fbc = plane->fbc;
+
+ return intel_fbc_min_limit(plane_state) <= fbc->limit &&
+ intel_fbc_cfb_size(plane_state) <= fbc->compressed_fb.size * fbc->limit;
+}
+
+static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
+{
+ return !plane_state->no_fbc_reason &&
+ intel_fbc_is_fence_ok(plane_state) &&
+ intel_fbc_is_cfb_ok(plane_state);
+}
+
+static int intel_fbc_check_plane(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
+ const struct intel_crtc_state *crtc_state;
+ struct intel_fbc *fbc = plane->fbc;
+
+ if (!fbc)
+ return 0;
+
+ if (intel_vgpu_active(i915)) {
+ plane_state->no_fbc_reason = "VGPU active";
+ return 0;
+ }
+
+ if (!i915->params.enable_fbc) {
+ plane_state->no_fbc_reason = "disabled per module param or by default";
+ return 0;
+ }
+
+ if (!plane_state->uapi.visible) {
+ plane_state->no_fbc_reason = "plane not visible";
+ return 0;
+ }
+
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ plane_state->no_fbc_reason = "interlaced mode not supported";
+ return 0;
+ }
+
+ if (crtc_state->double_wide) {
+ plane_state->no_fbc_reason = "double wide pipe not supported";
+ return 0;
+ }
+
+ /*
+ * Display 12+ is not supporting FBC with PSR2.
+ * Recommendation is to keep this combination disabled
+ * Bspec: 50422 HSD: 14010260002
+ */
+ if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) {
+ plane_state->no_fbc_reason = "PSR2 enabled";
+ return 0;
+ }
+
+ /* Wa_14016291713 */
+ if (IS_DISPLAY_VER(i915, 12, 13) && crtc_state->has_psr) {
+ plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)";
+ return 0;
+ }
+
+ if (!pixel_format_is_valid(plane_state)) {
+ plane_state->no_fbc_reason = "pixel format not supported";
+ return 0;
+ }
+
+ if (!tiling_is_valid(plane_state)) {
+ plane_state->no_fbc_reason = "tiling not supported";
+ return 0;
+ }
+
+ if (!rotation_is_valid(plane_state)) {
+ plane_state->no_fbc_reason = "rotation not supported";
+ return 0;
+ }
+
+ if (!stride_is_valid(plane_state)) {
+ plane_state->no_fbc_reason = "stride not supported";
+ return 0;
+ }
+
+ if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
+ fb->format->has_alpha) {
+ plane_state->no_fbc_reason = "per-pixel alpha not supported";
+ return 0;
+ }
+
+ if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
+ plane_state->no_fbc_reason = "plane size too big";
+ return 0;
+ }
+
+ /*
+ * Work around a problem on GEN9+ HW, where enabling FBC on a plane
+ * having a Y offset that isn't divisible by 4 causes FIFO underrun
+ * and screen flicker.
+ */
+ if (DISPLAY_VER(i915) >= 9 &&
+ plane_state->view.color_plane[0].y & 3) {
+ plane_state->no_fbc_reason = "plane start Y offset misaligned";
+ return 0;
+ }
+
+ /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
+ if (DISPLAY_VER(i915) >= 11 &&
+ (plane_state->view.color_plane[0].y +
+ (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
+ plane_state->no_fbc_reason = "plane end Y offset misaligned";
+ return 0;
+ }
+
+ /* WaFbcExceedCdClockThreshold:hsw,bdw */
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ const struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) {
+ plane_state->no_fbc_reason = "pixel rate too high";
+ return 0;
+ }
+ }
+
+ plane_state->no_fbc_reason = NULL;
+
+ return 0;
+}
+
+
+static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_plane *plane)
+{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *old_plane_state =
+ intel_atomic_get_old_plane_state(state, plane);
+ const struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+ const struct drm_framebuffer *old_fb = old_plane_state->hw.fb;
+ const struct drm_framebuffer *new_fb = new_plane_state->hw.fb;
+
+ if (drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi))
+ return false;
+
+ if (!intel_fbc_is_ok(old_plane_state) ||
+ !intel_fbc_is_ok(new_plane_state))
+ return false;
+
+ if (old_fb->format->format != new_fb->format->format)
+ return false;
+
+ if (old_fb->modifier != new_fb->modifier)
+ return false;
+
+ if (intel_fbc_plane_stride(old_plane_state) !=
+ intel_fbc_plane_stride(new_plane_state))
+ return false;
+
+ if (intel_fbc_cfb_stride(old_plane_state) !=
+ intel_fbc_cfb_stride(new_plane_state))
+ return false;
+
+ if (intel_fbc_cfb_size(old_plane_state) !=
+ intel_fbc_cfb_size(new_plane_state))
+ return false;
+
+ if (intel_fbc_override_cfb_stride(old_plane_state) !=
+ intel_fbc_override_cfb_stride(new_plane_state))
+ return false;
+
+ return true;
+}
+
+static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_fbc *fbc = plane->fbc;
+ bool need_vblank_wait = false;
+
+ fbc->flip_pending = true;
+
+ if (intel_fbc_can_flip_nuke(state, crtc, plane))
+ return need_vblank_wait;
+
+ intel_fbc_deactivate(fbc, "update pending");
+
+ /*
+ * Display WA #1198: glk+
+ * Need an extra vblank wait between FBC disable and most plane
+ * updates. Bspec says this is only needed for plane disable, but
+ * that is not true. Touching most plane registers will cause the
+ * corruption to appear. Also SKL/derivatives do not seem to be
+ * affected.
+ *
+ * TODO: could optimize this a bit by sampling the frame
+ * counter when we disable FBC (if it was already done earlier)
+ * and skipping the extra vblank wait before the plane update
+ * if at least one frame has already passed.
+ */
+ if (fbc->activated && DISPLAY_VER(i915) >= 10)
+ need_vblank_wait = true;
+ fbc->activated = false;
+
+ return need_vblank_wait;
+}
+
+bool intel_fbc_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_plane_state *plane_state;
+ bool need_vblank_wait = false;
+ struct intel_plane *plane;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_fbc *fbc = plane->fbc;
+
+ if (!fbc || plane->pipe != crtc->pipe)
+ continue;
+
+ mutex_lock(&fbc->lock);
+
+ if (fbc->state.plane == plane)
+ need_vblank_wait |= __intel_fbc_pre_update(state, crtc, plane);
+
+ mutex_unlock(&fbc->lock);
+ }
+
+ return need_vblank_wait;
+}
+
+static void __intel_fbc_disable(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+ struct intel_plane *plane = fbc->state.plane;
+
+ drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
+ drm_WARN_ON(&i915->drm, fbc->active);
+
+ drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n",
+ plane->base.base.id, plane->base.name);
+
+ __intel_fbc_cleanup_cfb(fbc);
+
+ fbc->state.plane = NULL;
+ fbc->flip_pending = false;
+ fbc->busy_bits = 0;
+}
+
+static void __intel_fbc_post_update(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
+
+ if (!fbc->busy_bits)
+ intel_fbc_activate(fbc);
+ else
+ intel_fbc_deactivate(fbc, "frontbuffer write");
+}
+
+void intel_fbc_post_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_fbc *fbc = plane->fbc;
+
+ if (!fbc || plane->pipe != crtc->pipe)
+ continue;
+
+ mutex_lock(&fbc->lock);
+
+ if (fbc->state.plane == plane) {
+ fbc->flip_pending = false;
+ __intel_fbc_post_update(fbc);
+ }
+
+ mutex_unlock(&fbc->lock);
+ }
+}
+
+static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
+{
+ if (fbc->state.plane)
+ return fbc->state.plane->frontbuffer_bit;
+ else
+ return 0;
+}
+
+static void __intel_fbc_invalidate(struct intel_fbc *fbc,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
+{
+ if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
+ return;
+
+ mutex_lock(&fbc->lock);
+
+ frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc);
+ if (!frontbuffer_bits)
+ goto out;
+
+ fbc->busy_bits |= frontbuffer_bits;
+ intel_fbc_deactivate(fbc, "frontbuffer write");
+
+out:
+ mutex_unlock(&fbc->lock);
+}
+
+void intel_fbc_invalidate(struct drm_i915_private *i915,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
+{
+ struct intel_fbc *fbc;
+ enum intel_fbc_id fbc_id;
+
+ for_each_intel_fbc(i915, fbc, fbc_id)
+ __intel_fbc_invalidate(fbc, frontbuffer_bits, origin);
+
+}
+
+static void __intel_fbc_flush(struct intel_fbc *fbc,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
+{
+ mutex_lock(&fbc->lock);
+
+ frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc);
+ if (!frontbuffer_bits)
+ goto out;
+
+ fbc->busy_bits &= ~frontbuffer_bits;
+
+ if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
+ goto out;
+
+ if (fbc->busy_bits || fbc->flip_pending)
+ goto out;
+
+ if (fbc->active)
+ intel_fbc_nuke(fbc);
+ else
+ intel_fbc_activate(fbc);
+
+out:
+ mutex_unlock(&fbc->lock);
+}
+
+void intel_fbc_flush(struct drm_i915_private *i915,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
+{
+ struct intel_fbc *fbc;
+ enum intel_fbc_id fbc_id;
+
+ for_each_intel_fbc(i915, fbc, fbc_id)
+ __intel_fbc_flush(fbc, frontbuffer_bits, origin);
+}
+
+int intel_fbc_atomic_check(struct intel_atomic_state *state)
+{
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ int ret;
+
+ ret = intel_fbc_check_plane(state, plane);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __intel_fbc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+ struct intel_fbc *fbc = plane->fbc;
+
+ if (fbc->state.plane) {
+ if (fbc->state.plane != plane)
+ return;
+
+ if (intel_fbc_is_ok(plane_state)) {
+ intel_fbc_update_state(state, crtc, plane);
+ return;
+ }
+
+ __intel_fbc_disable(fbc);
+ }
+
+ drm_WARN_ON(&i915->drm, fbc->active);
+
+ fbc->no_fbc_reason = plane_state->no_fbc_reason;
+ if (fbc->no_fbc_reason)
+ return;
+
+ if (!intel_fbc_is_fence_ok(plane_state)) {
+ fbc->no_fbc_reason = "framebuffer not fenced";
+ return;
+ }
+
+ if (fbc->underrun_detected) {
+ fbc->no_fbc_reason = "FIFO underrun";
+ return;
+ }
+
+ if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state),
+ intel_fbc_min_limit(plane_state))) {
+ fbc->no_fbc_reason = "not enough stolen memory";
+ return;
+ }
+
+ drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n",
+ plane->base.base.id, plane->base.name);
+ fbc->no_fbc_reason = "FBC enabled but not active yet\n";
+
+ intel_fbc_update_state(state, crtc, plane);
+
+ intel_fbc_program_workarounds(fbc);
+ intel_fbc_program_cfb(fbc);
+}
+
+/**
+ * intel_fbc_disable - disable FBC if it's associated with crtc
+ * @crtc: the CRTC
+ *
+ * This function disables FBC if it's associated with the provided CRTC.
+ */
+void intel_fbc_disable(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
+
+ for_each_intel_plane(&i915->drm, plane) {
+ struct intel_fbc *fbc = plane->fbc;
+
+ if (!fbc || plane->pipe != crtc->pipe)
+ continue;
+
+ mutex_lock(&fbc->lock);
+ if (fbc->state.plane == plane)
+ __intel_fbc_disable(fbc);
+ mutex_unlock(&fbc->lock);
+ }
+}
+
+void intel_fbc_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_fbc *fbc = plane->fbc;
+
+ if (!fbc || plane->pipe != crtc->pipe)
+ continue;
+
+ mutex_lock(&fbc->lock);
+
+ if (crtc_state->update_pipe && plane_state->no_fbc_reason) {
+ if (fbc->state.plane == plane)
+ __intel_fbc_disable(fbc);
+ } else {
+ __intel_fbc_enable(state, crtc, plane);
+ }
+
+ mutex_unlock(&fbc->lock);
+ }
+}
+
+static void intel_fbc_underrun_work_fn(struct work_struct *work)
+{
+ struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work);
+ struct drm_i915_private *i915 = fbc->i915;
+
+ mutex_lock(&fbc->lock);
+
+ /* Maybe we were scheduled twice. */
+ if (fbc->underrun_detected || !fbc->state.plane)
+ goto out;
+
+ drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n");
+ fbc->underrun_detected = true;
+
+ intel_fbc_deactivate(fbc, "FIFO underrun");
+ if (!fbc->flip_pending)
+ intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe));
+ __intel_fbc_disable(fbc);
+out:
+ mutex_unlock(&fbc->lock);
+}
+
+static void __intel_fbc_reset_underrun(struct intel_fbc *fbc)
+{
+ struct drm_i915_private *i915 = fbc->i915;
+
+ cancel_work_sync(&fbc->underrun_work);
+
+ mutex_lock(&fbc->lock);
+
+ if (fbc->underrun_detected) {
+ drm_dbg_kms(&i915->drm,
+ "Re-allowing FBC after fifo underrun\n");
+ fbc->no_fbc_reason = "FIFO underrun cleared";
+ }
+
+ fbc->underrun_detected = false;
+ mutex_unlock(&fbc->lock);
+}
+
+/*
+ * intel_fbc_reset_underrun - reset FBC fifo underrun status.
+ * @i915: the i915 device
+ *
+ * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
+ * want to re-enable FBC after an underrun to increase test coverage.
+ */
+void intel_fbc_reset_underrun(struct drm_i915_private *i915)
+{
+ struct intel_fbc *fbc;
+ enum intel_fbc_id fbc_id;
+
+ for_each_intel_fbc(i915, fbc, fbc_id)
+ __intel_fbc_reset_underrun(fbc);
+}
+
+static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
+{
+ /*
+ * There's no guarantee that underrun_detected won't be set to true
+ * right after this check and before the work is scheduled, but that's
+ * not a problem since we'll check it again under the work function
+ * while FBC is locked. This check here is just to prevent us from
+ * unnecessarily scheduling the work, and it relies on the fact that we
+ * never switch underrun_detect back to false after it's true.
+ */
+ if (READ_ONCE(fbc->underrun_detected))
+ return;
+
+ schedule_work(&fbc->underrun_work);
+}
+
+/**
+ * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
+ * @i915: i915 device
+ *
+ * Without FBC, most underruns are harmless and don't really cause too many
+ * problems, except for an annoying message on dmesg. With FBC, underruns can
+ * become black screens or even worse, especially when paired with bad
+ * watermarks. So in order for us to be on the safe side, completely disable FBC
+ * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
+ * already suggests that watermarks may be bad, so try to be as safe as
+ * possible.
+ *
+ * This function is called from the IRQ handler.
+ */
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
+{
+ struct intel_fbc *fbc;
+ enum intel_fbc_id fbc_id;
+
+ for_each_intel_fbc(i915, fbc, fbc_id)
+ __intel_fbc_handle_fifo_underrun_irq(fbc);
+}
+
+/*
+ * The DDX driver changes its behavior depending on the value it reads from
+ * i915.enable_fbc, so sanitize it by translating the default value into either
+ * 0 or 1 in order to allow it to know what's going on.
+ *
+ * Notice that this is done at driver initialization and we still allow user
+ * space to change the value during runtime without sanitizing it again. IGT
+ * relies on being able to change i915.enable_fbc at runtime.
+ */
+static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
+{
+ if (i915->params.enable_fbc >= 0)
+ return !!i915->params.enable_fbc;
+
+ if (!HAS_FBC(i915))
+ return 0;
+
+ if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9)
+ return 1;
+
+ return 0;
+}
+
+static bool need_fbc_vtd_wa(struct drm_i915_private *i915)
+{
+ /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
+ if (i915_vtd_active(i915) &&
+ (IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
+ drm_info(&i915->drm,
+ "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
+ return true;
+ }
+
+ return false;
+}
+
+void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane)
+{
+ plane->fbc = fbc;
+}
+
+static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915,
+ enum intel_fbc_id fbc_id)
+{
+ struct intel_fbc *fbc;
+
+ fbc = kzalloc(sizeof(*fbc), GFP_KERNEL);
+ if (!fbc)
+ return NULL;
+
+ fbc->id = fbc_id;
+ fbc->i915 = i915;
+ INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
+ mutex_init(&fbc->lock);
+
+ if (DISPLAY_VER(i915) >= 7)
+ fbc->funcs = &ivb_fbc_funcs;
+ else if (DISPLAY_VER(i915) == 6)
+ fbc->funcs = &snb_fbc_funcs;
+ else if (DISPLAY_VER(i915) == 5)
+ fbc->funcs = &ilk_fbc_funcs;
+ else if (IS_G4X(i915))
+ fbc->funcs = &g4x_fbc_funcs;
+ else if (DISPLAY_VER(i915) == 4)
+ fbc->funcs = &i965_fbc_funcs;
+ else
+ fbc->funcs = &i8xx_fbc_funcs;
+
+ return fbc;
+}
+
+/**
+ * intel_fbc_init - Initialize FBC
+ * @i915: the i915 device
+ *
+ * This function might be called during PM init process.
+ */
+void intel_fbc_init(struct drm_i915_private *i915)
+{
+ enum intel_fbc_id fbc_id;
+
+ if (!drm_mm_initialized(&i915->mm.stolen))
+ RUNTIME_INFO(i915)->fbc_mask = 0;
+
+ if (need_fbc_vtd_wa(i915))
+ RUNTIME_INFO(i915)->fbc_mask = 0;
+
+ i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
+ drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
+ i915->params.enable_fbc);
+
+ for_each_fbc_id(i915, fbc_id)
+ i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
+}
+
+/**
+ * intel_fbc_sanitize - Sanitize FBC
+ * @i915: the i915 device
+ *
+ * Make sure FBC is initially disabled since we have no
+ * idea eg. into which parts of stolen it might be scribbling
+ * into.
+ */
+void intel_fbc_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_fbc *fbc;
+ enum intel_fbc_id fbc_id;
+
+ for_each_intel_fbc(i915, fbc, fbc_id) {
+ if (intel_fbc_hw_is_active(fbc))
+ intel_fbc_hw_deactivate(fbc);
+ }
+}
+
+static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
+{
+ struct intel_fbc *fbc = m->private;
+ struct drm_i915_private *i915 = fbc->i915;
+ struct intel_plane *plane;
+ intel_wakeref_t wakeref;
+
+ drm_modeset_lock_all(&i915->drm);
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ mutex_lock(&fbc->lock);
+
+ if (fbc->active) {
+ seq_puts(m, "FBC enabled\n");
+ seq_printf(m, "Compressing: %s\n",
+ str_yes_no(intel_fbc_is_compressing(fbc)));
+ } else {
+ seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
+ }
+
+ for_each_intel_plane(&i915->drm, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (plane->fbc != fbc)
+ continue;
+
+ seq_printf(m, "%c [PLANE:%d:%s]: %s\n",
+ fbc->state.plane == plane ? '*' : ' ',
+ plane->base.base.id, plane->base.name,
+ plane_state->no_fbc_reason ?: "FBC possible");
+ }
+
+ mutex_unlock(&fbc->lock);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ drm_modeset_unlock_all(&i915->drm);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(intel_fbc_debugfs_status);
+
+static int intel_fbc_debugfs_false_color_get(void *data, u64 *val)
+{
+ struct intel_fbc *fbc = data;
+
+ *val = fbc->false_color;
+
+ return 0;
+}
+
+static int intel_fbc_debugfs_false_color_set(void *data, u64 val)
+{
+ struct intel_fbc *fbc = data;
+
+ mutex_lock(&fbc->lock);
+
+ fbc->false_color = val;
+
+ if (fbc->active)
+ fbc->funcs->set_false_color(fbc, fbc->false_color);
+
+ mutex_unlock(&fbc->lock);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(intel_fbc_debugfs_false_color_fops,
+ intel_fbc_debugfs_false_color_get,
+ intel_fbc_debugfs_false_color_set,
+ "%llu\n");
+
+static void intel_fbc_debugfs_add(struct intel_fbc *fbc,
+ struct dentry *parent)
+{
+ debugfs_create_file("i915_fbc_status", 0444, parent,
+ fbc, &intel_fbc_debugfs_status_fops);
+
+ if (fbc->funcs->set_false_color)
+ debugfs_create_file("i915_fbc_false_color", 0644, parent,
+ fbc, &intel_fbc_debugfs_false_color_fops);
+}
+
+void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc)
+{
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+
+ if (plane->fbc)
+ intel_fbc_debugfs_add(plane->fbc, crtc->base.debugfs_entry);
+}
+
+/* FIXME: remove this once igt is on board with per-crtc stuff */
+void intel_fbc_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+ struct intel_fbc *fbc;
+
+ fbc = i915->display.fbc[INTEL_FBC_A];
+ if (fbc)
+ intel_fbc_debugfs_add(fbc, minor->debugfs_root);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
new file mode 100644
index 000000000..4adb98afe
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_FBC_H__
+#define __INTEL_FBC_H__
+
+#include <linux/types.h>
+
+enum fb_op_origin;
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_fbc;
+struct intel_plane;
+struct intel_plane_state;
+
+enum intel_fbc_id {
+ INTEL_FBC_A,
+ INTEL_FBC_B,
+
+ I915_MAX_FBCS,
+};
+
+int intel_fbc_atomic_check(struct intel_atomic_state *state);
+bool intel_fbc_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_fbc_post_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_fbc_init(struct drm_i915_private *dev_priv);
+void intel_fbc_cleanup(struct drm_i915_private *dev_priv);
+void intel_fbc_sanitize(struct drm_i915_private *dev_priv);
+void intel_fbc_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_fbc_disable(struct intel_crtc *crtc);
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits, enum fb_op_origin origin);
+void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane);
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915);
+void intel_fbc_reset_underrun(struct drm_i915_private *i915);
+void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc);
+void intel_fbc_debugfs_register(struct drm_i915_private *i915);
+
+#endif /* __INTEL_FBC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
new file mode 100644
index 000000000..968915000
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -0,0 +1,733 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * David Airlie
+ */
+
+#include <linux/async.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+#include <linux/vga_switcheroo.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include "gem/i915_gem_lmem.h"
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_fb_pin.h"
+#include "intel_fbdev.h"
+#include "intel_frontbuffer.h"
+
+struct intel_fbdev {
+ struct drm_fb_helper helper;
+ struct intel_framebuffer *fb;
+ struct i915_vma *vma;
+ unsigned long vma_flags;
+ async_cookie_t cookie;
+ int preferred_bpp;
+
+ /* Whether or not fbdev hpd processing is temporarily suspended */
+ bool hpd_suspended: 1;
+ /* Set when a hotplug was received while HPD processing was suspended */
+ bool hpd_waiting: 1;
+
+ /* Protects hpd_suspended */
+ struct mutex hpd_lock;
+};
+
+static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
+{
+ return ifbdev->fb->frontbuffer;
+}
+
+static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
+{
+ intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU);
+}
+
+static int intel_fbdev_set_par(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct intel_fbdev *ifbdev =
+ container_of(fb_helper, struct intel_fbdev, helper);
+ int ret;
+
+ ret = drm_fb_helper_set_par(info);
+ if (ret == 0)
+ intel_fbdev_invalidate(ifbdev);
+
+ return ret;
+}
+
+static int intel_fbdev_blank(int blank, struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct intel_fbdev *ifbdev =
+ container_of(fb_helper, struct intel_fbdev, helper);
+ int ret;
+
+ ret = drm_fb_helper_blank(blank, info);
+ if (ret == 0)
+ intel_fbdev_invalidate(ifbdev);
+
+ return ret;
+}
+
+static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct intel_fbdev *ifbdev =
+ container_of(fb_helper, struct intel_fbdev, helper);
+ int ret;
+
+ ret = drm_fb_helper_pan_display(var, info);
+ if (ret == 0)
+ intel_fbdev_invalidate(ifbdev);
+
+ return ret;
+}
+
+static const struct fb_ops intelfb_ops = {
+ .owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
+ .fb_set_par = intel_fbdev_set_par,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
+ .fb_pan_display = intel_fbdev_pan_display,
+ .fb_blank = intel_fbdev_blank,
+};
+
+static int intelfb_alloc(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct intel_fbdev *ifbdev =
+ container_of(helper, struct intel_fbdev, helper);
+ struct drm_framebuffer *fb;
+ struct drm_device *dev = helper->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_mode_fb_cmd2 mode_cmd = {};
+ struct drm_i915_gem_object *obj;
+ int size;
+
+ /* we don't do packed 24bpp */
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
+ DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = PAGE_ALIGN(size);
+
+ obj = ERR_PTR(-ENODEV);
+ if (HAS_LMEM(dev_priv)) {
+ obj = i915_gem_object_create_lmem(dev_priv, size,
+ I915_BO_ALLOC_CONTIGUOUS);
+ } else {
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ */
+ if (size * 2 < dev_priv->stolen_usable_size)
+ obj = i915_gem_object_create_stolen(dev_priv, size);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_shmem(dev_priv, size);
+ }
+
+ if (IS_ERR(obj)) {
+ drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
+ return PTR_ERR(obj);
+ }
+
+ fb = intel_framebuffer_create(obj, &mode_cmd);
+ i915_gem_object_put(obj);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+
+ ifbdev->fb = to_intel_framebuffer(fb);
+ return 0;
+}
+
+static int intelfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct intel_fbdev *ifbdev =
+ container_of(helper, struct intel_fbdev, helper);
+ struct intel_framebuffer *intel_fb = ifbdev->fb;
+ struct drm_device *dev = helper->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct i915_ggtt *ggtt = to_gt(dev_priv)->ggtt;
+ const struct i915_gtt_view view = {
+ .type = I915_GTT_VIEW_NORMAL,
+ };
+ intel_wakeref_t wakeref;
+ struct fb_info *info;
+ struct i915_vma *vma;
+ unsigned long flags = 0;
+ bool prealloc = false;
+ void __iomem *vaddr;
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_ww_ctx ww;
+ int ret;
+
+ mutex_lock(&ifbdev->hpd_lock);
+ ret = ifbdev->hpd_suspended ? -EAGAIN : 0;
+ mutex_unlock(&ifbdev->hpd_lock);
+ if (ret)
+ return ret;
+
+ if (intel_fb &&
+ (sizes->fb_width > intel_fb->base.width ||
+ sizes->fb_height > intel_fb->base.height)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "BIOS fb too small (%dx%d), we require (%dx%d),"
+ " releasing it\n",
+ intel_fb->base.width, intel_fb->base.height,
+ sizes->fb_width, sizes->fb_height);
+ drm_framebuffer_put(&intel_fb->base);
+ intel_fb = ifbdev->fb = NULL;
+ }
+ if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) {
+ drm_dbg_kms(&dev_priv->drm,
+ "no BIOS fb, allocating a new one\n");
+ ret = intelfb_alloc(helper, sizes);
+ if (ret)
+ return ret;
+ intel_fb = ifbdev->fb;
+ } else {
+ drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n");
+ prealloc = true;
+ sizes->fb_width = intel_fb->base.width;
+ sizes->fb_height = intel_fb->base.height;
+ }
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ /* Pin the GGTT vma for our access via info->screen_base.
+ * This also validates that any existing fb inherited from the
+ * BIOS is suitable for own access.
+ */
+ vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, false,
+ &view, false, &flags);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out_unlock;
+ }
+
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ drm_err(&dev_priv->drm, "Failed to allocate fb_info (%pe)\n", info);
+ ret = PTR_ERR(info);
+ goto out_unpin;
+ }
+
+ ifbdev->helper.fb = &ifbdev->fb->base;
+
+ info->fbops = &intelfb_ops;
+
+ /* setup aperture base/size for vesafb takeover */
+ obj = intel_fb_obj(&intel_fb->base);
+ if (i915_gem_object_is_lmem(obj)) {
+ struct intel_memory_region *mem = obj->mm.region;
+
+ info->apertures->ranges[0].base = mem->io_start;
+ info->apertures->ranges[0].size = mem->io_size;
+
+ /* Use fbdev's framebuffer from lmem for discrete */
+ info->fix.smem_start =
+ (unsigned long)(mem->io_start +
+ i915_gem_object_get_dma_address(obj, 0));
+ info->fix.smem_len = obj->base.size;
+ } else {
+ info->apertures->ranges[0].base = ggtt->gmadr.start;
+ info->apertures->ranges[0].size = ggtt->mappable_end;
+
+ /* Our framebuffer is the entirety of fbdev's system memory */
+ info->fix.smem_start =
+ (unsigned long)(ggtt->gmadr.start + vma->node.start);
+ info->fix.smem_len = vma->size;
+ }
+
+ for_i915_gem_ww(&ww, ret, false) {
+ ret = i915_gem_object_lock(vma->obj, &ww);
+
+ if (ret)
+ continue;
+
+ vaddr = i915_vma_pin_iomap(vma);
+ if (IS_ERR(vaddr)) {
+ drm_err(&dev_priv->drm,
+ "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
+ ret = PTR_ERR(vaddr);
+ continue;
+ }
+ }
+
+ if (ret)
+ goto out_unpin;
+
+ info->screen_base = vaddr;
+ info->screen_size = vma->size;
+
+ drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
+
+ /* If the object is shmemfs backed, it will have given us zeroed pages.
+ * If the object is stolen however, it will be full of whatever
+ * garbage was left in there.
+ */
+ if (!i915_gem_object_is_shmem(vma->obj) && !prealloc)
+ memset_io(info->screen_base, 0, info->screen_size);
+
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+ drm_dbg_kms(&dev_priv->drm, "allocated %dx%d fb: 0x%08x\n",
+ ifbdev->fb->base.width, ifbdev->fb->base.height,
+ i915_ggtt_offset(vma));
+ ifbdev->vma = vma;
+ ifbdev->vma_flags = flags;
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ vga_switcheroo_client_fb_set(pdev, info);
+ return 0;
+
+out_unpin:
+ intel_unpin_fb_vma(vma, flags);
+out_unlock:
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ return ret;
+}
+
+static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ .fb_probe = intelfb_create,
+};
+
+static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
+{
+ /* We rely on the object-free to release the VMA pinning for
+ * the info->screen_base mmaping. Leaking the VMA is simpler than
+ * trying to rectify all the possible error paths leading here.
+ */
+
+ drm_fb_helper_fini(&ifbdev->helper);
+
+ if (ifbdev->vma)
+ intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
+
+ if (ifbdev->fb)
+ drm_framebuffer_remove(&ifbdev->fb->base);
+
+ kfree(ifbdev);
+}
+
+/*
+ * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
+ * The core display code will have read out the current plane configuration,
+ * so we use that to figure out if there's an object for us to use as the
+ * fb, and if so, we re-use it for the fbdev configuration.
+ *
+ * Note we only support a single fb shared across pipes for boot (mostly for
+ * fbcon), so we just find the biggest and use that.
+ */
+static bool intel_fbdev_init_bios(struct drm_device *dev,
+ struct intel_fbdev *ifbdev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_framebuffer *fb = NULL;
+ struct intel_crtc *crtc;
+ unsigned int max_size = 0;
+
+ /* Find the largest fb */
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct drm_i915_gem_object *obj =
+ intel_fb_obj(plane_state->uapi.fb);
+
+ if (!crtc_state->uapi.active) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] not active, skipping\n",
+ crtc->base.base.id, crtc->base.name);
+ continue;
+ }
+
+ if (!obj) {
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] no fb, skipping\n",
+ plane->base.base.id, plane->base.name);
+ continue;
+ }
+
+ if (obj->base.size > max_size) {
+ drm_dbg_kms(&i915->drm,
+ "found possible fb from [PLANE:%d:%s]\n",
+ plane->base.base.id, plane->base.name);
+ fb = to_intel_framebuffer(plane_state->uapi.fb);
+ max_size = obj->base.size;
+ }
+ }
+
+ if (!fb) {
+ drm_dbg_kms(&i915->drm,
+ "no active fbs found, not using BIOS config\n");
+ goto out;
+ }
+
+ /* Now make sure all the pipes will fit into it */
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ unsigned int cur_size;
+
+ if (!crtc_state->uapi.active) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] not active, skipping\n",
+ crtc->base.base.id, crtc->base.name);
+ continue;
+ }
+
+ drm_dbg_kms(&i915->drm, "checking [PLANE:%d:%s] for BIOS fb\n",
+ plane->base.base.id, plane->base.name);
+
+ /*
+ * See if the plane fb we found above will fit on this
+ * pipe. Note we need to use the selected fb's pitch and bpp
+ * rather than the current pipe's, since they differ.
+ */
+ cur_size = crtc_state->uapi.adjusted_mode.crtc_hdisplay;
+ cur_size = cur_size * fb->base.format->cpp[0];
+ if (fb->base.pitches[0] < cur_size) {
+ drm_dbg_kms(&i915->drm,
+ "fb not wide enough for [PLANE:%d:%s] (%d vs %d)\n",
+ plane->base.base.id, plane->base.name,
+ cur_size, fb->base.pitches[0]);
+ fb = NULL;
+ break;
+ }
+
+ cur_size = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
+ cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
+ cur_size *= fb->base.pitches[0];
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] area: %dx%d, bpp: %d, size: %d\n",
+ crtc->base.base.id, crtc->base.name,
+ crtc_state->uapi.adjusted_mode.crtc_hdisplay,
+ crtc_state->uapi.adjusted_mode.crtc_vdisplay,
+ fb->base.format->cpp[0] * 8,
+ cur_size);
+
+ if (cur_size > max_size) {
+ drm_dbg_kms(&i915->drm,
+ "fb not big enough for [PLANE:%d:%s] (%d vs %d)\n",
+ plane->base.base.id, plane->base.name,
+ cur_size, max_size);
+ fb = NULL;
+ break;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "fb big enough [PLANE:%d:%s] (%d >= %d)\n",
+ plane->base.base.id, plane->base.name,
+ max_size, cur_size);
+ }
+
+ if (!fb) {
+ drm_dbg_kms(&i915->drm,
+ "BIOS fb not suitable for all pipes, not using\n");
+ goto out;
+ }
+
+ ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
+ ifbdev->fb = fb;
+
+ drm_framebuffer_get(&ifbdev->fb->base);
+
+ /* Final pass to check if any active pipes don't have fbs */
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (!crtc_state->uapi.active)
+ continue;
+
+ drm_WARN(dev, !plane_state->uapi.fb,
+ "re-used BIOS config but lost an fb on [PLANE:%d:%s]\n",
+ plane->base.base.id, plane->base.name);
+ }
+
+
+ drm_dbg_kms(&i915->drm, "using BIOS fb for initial console\n");
+ return true;
+
+out:
+
+ return false;
+}
+
+static void intel_fbdev_suspend_worker(struct work_struct *work)
+{
+ intel_fbdev_set_suspend(&container_of(work,
+ struct drm_i915_private,
+ display.fbdev.suspend_work)->drm,
+ FBINFO_STATE_RUNNING,
+ true);
+}
+
+int intel_fbdev_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_fbdev *ifbdev;
+ int ret;
+
+ if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv)))
+ return -ENODEV;
+
+ ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+ if (ifbdev == NULL)
+ return -ENOMEM;
+
+ mutex_init(&ifbdev->hpd_lock);
+ drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
+
+ if (!intel_fbdev_init_bios(dev, ifbdev))
+ ifbdev->preferred_bpp = 32;
+
+ ret = drm_fb_helper_init(dev, &ifbdev->helper);
+ if (ret) {
+ kfree(ifbdev);
+ return ret;
+ }
+
+ dev_priv->display.fbdev.fbdev = ifbdev;
+ INIT_WORK(&dev_priv->display.fbdev.suspend_work, intel_fbdev_suspend_worker);
+
+ return 0;
+}
+
+static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
+{
+ struct intel_fbdev *ifbdev = data;
+
+ /* Due to peculiar init order wrt to hpd handling this is separate. */
+ if (drm_fb_helper_initial_config(&ifbdev->helper,
+ ifbdev->preferred_bpp))
+ intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
+}
+
+void intel_fbdev_initial_config_async(struct drm_device *dev)
+{
+ struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
+
+ if (!ifbdev)
+ return;
+
+ ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
+}
+
+static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
+{
+ if (!ifbdev->cookie)
+ return;
+
+ /* Only serialises with all preceding async calls, hence +1 */
+ async_synchronize_cookie(ifbdev->cookie + 1);
+ ifbdev->cookie = 0;
+}
+
+void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
+
+ if (!ifbdev)
+ return;
+
+ intel_fbdev_set_suspend(&dev_priv->drm, FBINFO_STATE_SUSPENDED, true);
+
+ if (!current_is_async())
+ intel_fbdev_sync(ifbdev);
+
+ drm_fb_helper_unregister_fbi(&ifbdev->helper);
+}
+
+void intel_fbdev_fini(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->display.fbdev.fbdev);
+
+ if (!ifbdev)
+ return;
+
+ intel_fbdev_destroy(ifbdev);
+}
+
+/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
+ * processing, fbdev will perform a full connector reprobe if a hotplug event
+ * was received while HPD was suspended.
+ */
+static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state)
+{
+ struct intel_fbdev *ifbdev = i915->display.fbdev.fbdev;
+ bool send_hpd = false;
+
+ mutex_lock(&ifbdev->hpd_lock);
+ ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
+ send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
+ ifbdev->hpd_waiting = false;
+ mutex_unlock(&ifbdev->hpd_lock);
+
+ if (send_hpd) {
+ drm_dbg_kms(&i915->drm, "Handling delayed fbcon HPD event\n");
+ drm_fb_helper_hotplug_event(&ifbdev->helper);
+ }
+}
+
+void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
+ struct fb_info *info;
+
+ if (!ifbdev)
+ return;
+
+ if (drm_WARN_ON(&dev_priv->drm, !HAS_DISPLAY(dev_priv)))
+ return;
+
+ if (!ifbdev->vma)
+ goto set_suspend;
+
+ info = ifbdev->helper.fbdev;
+
+ if (synchronous) {
+ /* Flush any pending work to turn the console on, and then
+ * wait to turn it off. It must be synchronous as we are
+ * about to suspend or unload the driver.
+ *
+ * Note that from within the work-handler, we cannot flush
+ * ourselves, so only flush outstanding work upon suspend!
+ */
+ if (state != FBINFO_STATE_RUNNING)
+ flush_work(&dev_priv->display.fbdev.suspend_work);
+
+ console_lock();
+ } else {
+ /*
+ * The console lock can be pretty contented on resume due
+ * to all the printk activity. Try to keep it out of the hot
+ * path of resume if possible.
+ */
+ drm_WARN_ON(dev, state != FBINFO_STATE_RUNNING);
+ if (!console_trylock()) {
+ /* Don't block our own workqueue as this can
+ * be run in parallel with other i915.ko tasks.
+ */
+ schedule_work(&dev_priv->display.fbdev.suspend_work);
+ return;
+ }
+ }
+
+ /* On resume from hibernation: If the object is shmemfs backed, it has
+ * been restored from swap. If the object is stolen however, it will be
+ * full of whatever garbage was left in there.
+ */
+ if (state == FBINFO_STATE_RUNNING &&
+ !i915_gem_object_is_shmem(intel_fb_obj(&ifbdev->fb->base)))
+ memset_io(info->screen_base, 0, info->screen_size);
+
+ drm_fb_helper_set_suspend(&ifbdev->helper, state);
+ console_unlock();
+
+set_suspend:
+ intel_fbdev_hpd_set_suspend(dev_priv, state);
+}
+
+void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+ struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
+ bool send_hpd;
+
+ if (!ifbdev)
+ return;
+
+ intel_fbdev_sync(ifbdev);
+
+ mutex_lock(&ifbdev->hpd_lock);
+ send_hpd = !ifbdev->hpd_suspended;
+ ifbdev->hpd_waiting = true;
+ mutex_unlock(&ifbdev->hpd_lock);
+
+ if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
+ drm_fb_helper_hotplug_event(&ifbdev->helper);
+}
+
+void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+ struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
+
+ if (!ifbdev)
+ return;
+
+ intel_fbdev_sync(ifbdev);
+ if (!ifbdev->vma)
+ return;
+
+ if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
+ intel_fbdev_invalidate(ifbdev);
+}
+
+struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
+{
+ if (!fbdev || !fbdev->helper.fb)
+ return NULL;
+
+ return to_intel_framebuffer(fbdev->helper.fb);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
new file mode 100644
index 000000000..0e95e9472
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_FBDEV_H__
+#define __INTEL_FBDEV_H__
+
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_i915_private;
+struct intel_fbdev;
+struct intel_framebuffer;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int intel_fbdev_init(struct drm_device *dev);
+void intel_fbdev_initial_config_async(struct drm_device *dev);
+void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
+void intel_fbdev_fini(struct drm_i915_private *dev_priv);
+void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
+void intel_fbdev_output_poll_changed(struct drm_device *dev);
+void intel_fbdev_restore_mode(struct drm_device *dev);
+struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev);
+#else
+static inline int intel_fbdev_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
+{
+}
+
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __INTEL_FBDEV_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
new file mode 100644
index 000000000..7f47e5c85
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -0,0 +1,1076 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/string_helpers.h>
+
+#include "intel_atomic.h"
+#include "intel_crtc.h"
+#include "intel_ddi.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_fdi.h"
+
+struct intel_fdi_funcs {
+ void (*fdi_link_train)(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
+};
+
+static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ bool cur_state;
+
+ if (HAS_DDI(dev_priv)) {
+ /*
+ * DDI does not have a specific FDI_TX register.
+ *
+ * FDI is never fed from EDP transcoder
+ * so pipe->transcoder cast is fine here.
+ */
+ enum transcoder cpu_transcoder = (enum transcoder)pipe;
+ cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
+ } else {
+ cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
+ }
+ I915_STATE_WARN(cur_state != state,
+ "FDI TX state assertion failure (expected %s, current %s)\n",
+ str_on_off(state), str_on_off(cur_state));
+}
+
+void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_tx(i915, pipe, true);
+}
+
+void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_tx(i915, pipe, false);
+}
+
+static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ bool cur_state;
+
+ cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
+ I915_STATE_WARN(cur_state != state,
+ "FDI RX state assertion failure (expected %s, current %s)\n",
+ str_on_off(state), str_on_off(cur_state));
+}
+
+void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_rx(i915, pipe, true);
+}
+
+void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_rx(i915, pipe, false);
+}
+
+void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
+ enum pipe pipe)
+{
+ bool cur_state;
+
+ /* ILK FDI PLL is always enabled */
+ if (IS_IRONLAKE(i915))
+ return;
+
+ /* On Haswell, DDI ports are responsible for the FDI PLL setup */
+ if (HAS_DDI(i915))
+ return;
+
+ cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
+ I915_STATE_WARN(!cur_state, "FDI TX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_fdi_rx_pll(struct drm_i915_private *i915,
+ enum pipe pipe, bool state)
+{
+ bool cur_state;
+
+ cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
+ I915_STATE_WARN(cur_state != state,
+ "FDI RX PLL assertion failure (expected %s, current %s)\n",
+ str_on_off(state), str_on_off(cur_state));
+}
+
+void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_rx_pll(i915, pipe, true);
+}
+
+void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_rx_pll(i915, pipe, false);
+}
+
+void intel_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
+}
+
+/* units of 100MHz */
+static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
+ return crtc_state->fdi_lanes;
+
+ return 0;
+}
+
+static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_atomic_state *state = pipe_config->uapi.state;
+ struct intel_crtc *other_crtc;
+ struct intel_crtc_state *other_crtc_state;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "checking fdi config on pipe %c, lanes %i\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ if (pipe_config->fdi_lanes > 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return -EINVAL;
+ }
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ if (pipe_config->fdi_lanes > 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on haswell, required: %i lanes\n",
+ pipe_config->fdi_lanes);
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ }
+
+ if (INTEL_NUM_PIPES(dev_priv) == 2)
+ return 0;
+
+ /* Ivybridge 3 pipe is really complicated */
+ switch (pipe) {
+ case PIPE_A:
+ return 0;
+ case PIPE_B:
+ if (pipe_config->fdi_lanes <= 2)
+ return 0;
+
+ other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C);
+ other_crtc_state =
+ intel_atomic_get_crtc_state(state, other_crtc);
+ if (IS_ERR(other_crtc_state))
+ return PTR_ERR(other_crtc_state);
+
+ if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid shared fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return -EINVAL;
+ }
+ return 0;
+ case PIPE_C:
+ if (pipe_config->fdi_lanes > 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on pipe %c: required %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
+ return -EINVAL;
+ }
+
+ other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B);
+ other_crtc_state =
+ intel_atomic_get_crtc_state(state, other_crtc);
+ if (IS_ERR(other_crtc_state))
+ return PTR_ERR(other_crtc_state);
+
+ if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
+ drm_dbg_kms(&dev_priv->drm,
+ "fdi link B uses too many lanes to enable link C\n");
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ MISSING_CASE(pipe);
+ return 0;
+ }
+}
+
+void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
+{
+ if (IS_IRONLAKE(i915)) {
+ u32 fdi_pll_clk =
+ intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
+
+ i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
+ } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
+ i915->display.fdi.pll_freq = 270000;
+ } else {
+ return;
+ }
+
+ drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
+}
+
+int intel_fdi_link_freq(struct drm_i915_private *i915,
+ const struct intel_crtc_state *pipe_config)
+{
+ if (HAS_DDI(i915))
+ return pipe_config->port_clock; /* SPLL */
+ else
+ return i915->display.fdi.pll_freq;
+}
+
+int ilk_fdi_compute_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *i915 = to_i915(dev);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ int lane, link_bw, fdi_dotclock, ret;
+ bool needs_recompute = false;
+
+retry:
+ /* FDI is a binary signal running at ~2.7GHz, encoding
+ * each output octet as 10 bits. The actual frequency
+ * is stored as a divider into a 100MHz clock, and the
+ * mode pixel clock is stored in units of 1KHz.
+ * Hence the bw of each lane in terms of the mode signal
+ * is:
+ */
+ link_bw = intel_fdi_link_freq(i915, pipe_config);
+
+ fdi_dotclock = adjusted_mode->crtc_clock;
+
+ lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
+ pipe_config->pipe_bpp);
+
+ pipe_config->fdi_lanes = lane;
+
+ intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
+ link_bw, &pipe_config->fdi_m_n, false);
+
+ ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config);
+ if (ret == -EDEADLK)
+ return ret;
+
+ if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
+ pipe_config->pipe_bpp -= 2*3;
+ drm_dbg_kms(&i915->drm,
+ "fdi link bw constraint, reducing pipe bpp to %i\n",
+ pipe_config->pipe_bpp);
+ needs_recompute = true;
+ pipe_config->bw_constrained = true;
+
+ goto retry;
+ }
+
+ if (needs_recompute)
+ return -EAGAIN;
+
+ return ret;
+}
+
+static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
+ if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
+ return;
+
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
+ FDI_RX_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
+ FDI_RX_ENABLE);
+
+ temp &= ~FDI_BC_BIFURCATION_SELECT;
+ if (enable)
+ temp |= FDI_BC_BIFURCATION_SELECT;
+
+ drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
+ enable ? "en" : "dis");
+ intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
+ intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
+}
+
+static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ switch (crtc->pipe) {
+ case PIPE_A:
+ break;
+ case PIPE_B:
+ if (crtc_state->fdi_lanes > 2)
+ cpt_set_fdi_bc_bifurcation(dev_priv, false);
+ else
+ cpt_set_fdi_bc_bifurcation(dev_priv, true);
+
+ break;
+ case PIPE_C:
+ cpt_set_fdi_bc_bifurcation(dev_priv, true);
+
+ break;
+ default:
+ MISSING_CASE(crtc->pipe);
+ }
+}
+
+void intel_fdi_normal_train(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* enable normal train */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (IS_IVYBRIDGE(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+ }
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+ /* wait one idle pattern time */
+ intel_de_posting_read(dev_priv, reg);
+ udelay(1000);
+
+ /* IVB wants error correction enabled */
+ if (IS_IVYBRIDGE(dev_priv))
+ intel_de_write(dev_priv, reg,
+ intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
+}
+
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ilk_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp, tries;
+
+ /*
+ * Write the TU size bits before fdi link training, so that error
+ * detection works.
+ */
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
+ /* FDI needs bits from pipe first */
+ assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ intel_de_write(dev_priv, reg, temp);
+ intel_de_read(dev_priv, reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ /* Ironlake workaround, enable clock pointer after FDI enable*/
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK)) {
+ drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
+ intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
+ break;
+ }
+ }
+ if (tries == 5)
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (tries == 5)
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+
+ drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
+
+}
+
+static const int snb_b_fdi_train_param[] = {
+ FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+ FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp, i, retry;
+
+ /*
+ * Write the TU size bits before fdi link training, so that error
+ * detection works.
+ */
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(500);
+
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_BIT_LOCK) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done.\n");
+ break;
+ }
+ udelay(50);
+ }
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ if (IS_SANDYBRIDGE(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ }
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(500);
+
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done.\n");
+ break;
+ }
+ udelay(50);
+ }
+ if (retry < 5)
+ break;
+ }
+ if (i == 4)
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
+
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+}
+
+/* Manual link training for Ivy Bridge A0 parts */
+static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp, i, j;
+
+ ivb_update_fdi_bc_bifurcation(crtc_state);
+
+ /*
+ * Write the TU size bits before fdi link training, so that error
+ * detection works.
+ */
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ reg = FDI_RX_IMR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(150);
+
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
+ intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
+
+ /* Try each vswing and preemphasis setting twice before moving on */
+ for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
+ /* disable first in case we need to retry */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+ temp &= ~FDI_TX_ENABLE;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_AUTO;
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp &= ~FDI_RX_ENABLE;
+ intel_de_write(dev_priv, reg, temp);
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[j/2];
+ temp |= FDI_COMPOSITE_SYNC;
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
+
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ temp |= FDI_COMPOSITE_SYNC;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(1); /* should be 0.5us */
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_BIT_LOCK ||
+ (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done, level %i.\n",
+ i);
+ break;
+ }
+ udelay(1); /* should be 0.5us */
+ }
+ if (i == 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 fail on vswing %d\n", j / 2);
+ continue;
+ }
+
+ /* Train 2 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(2); /* should be 1.5us */
+
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK ||
+ (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done, level %i.\n",
+ i);
+ goto train_done;
+ }
+ udelay(2); /* should be 1.5us */
+ }
+ if (i == 4)
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 fail on vswing %d\n", j / 2);
+ }
+
+train_done:
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
+}
+
+/* Starting with Haswell, different DDI ports can work in FDI mode for
+ * connection to the PCH-located connectors. For this, it is necessary to train
+ * both the DDI port and PCH receiver for the desired DDI buffer settings.
+ *
+ * The recommended port to work in FDI mode is DDI E, which we use here. Also,
+ * please note that when FDI mode is active on DDI E, it shares 2 lines with
+ * DDI A (which is used for eDP)
+ */
+void hsw_fdi_link_train(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 temp, i, rx_ctl_val;
+ int n_entries;
+
+ encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+
+ hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
+
+ /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
+ * mode set "sequence for CRT port" document:
+ * - TP1 to TP2 time with the default value
+ * - FDI delay to 90h
+ *
+ * WaFDIAutoLinkSetTimingOverrride:hsw
+ */
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+ /* Enable the PCH Receiver FDI PLL */
+ rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+ FDI_RX_PLL_ENABLE |
+ FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ udelay(220);
+
+ /* Switch from Rawclk to PCDclk */
+ rx_ctl_val |= FDI_PCDCLK;
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+
+ /* Configure Port Clock Select */
+ drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
+ intel_ddi_enable_clock(encoder, crtc_state);
+
+ /* Start the training iterating through available voltages and emphasis,
+ * testing each value twice. */
+ for (i = 0; i < n_entries * 2; i++) {
+ /* Configure DP_TP_CTL with auto-training */
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_LINK_TRAIN_PAT1 |
+ DP_TP_CTL_ENABLE);
+
+ /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
+ * DDI E does not support port reversal, the functionality is
+ * achieved on the PCH side in FDI_RX_CTL, so no need to set the
+ * port reversal bit */
+ intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
+ DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
+
+ udelay(600);
+
+ /* Program PCH FDI Receiver TU */
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
+
+ /* Enable PCH FDI Receiver with auto-training */
+ rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
+
+ /* Wait for FDI receiver lane calibration */
+ udelay(30);
+
+ /* Unset FDI_RX_MISC pwrdn lanes */
+ temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
+
+ /* Wait for FDI auto training time */
+ udelay(5);
+
+ temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
+ if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI link training done on step %d\n", i);
+ break;
+ }
+
+ /*
+ * Leave things enabled even if we failed to train FDI.
+ * Results in less fireworks from the state checker.
+ */
+ if (i == n_entries * 2 - 1) {
+ drm_err(&dev_priv->drm, "FDI link training failed!\n");
+ break;
+ }
+
+ rx_ctl_val &= ~FDI_RX_ENABLE;
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
+
+ temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
+ temp &= ~DDI_BUF_CTL_ENABLE;
+ intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
+
+ /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
+ temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
+ temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
+ intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
+
+ intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+ /* Reset FDI_RX_MISC pwrdn lanes */
+ temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
+ temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
+ }
+
+ /* Enable normal pixel sending for FDI */
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
+}
+
+void hsw_fdi_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 val;
+
+ /*
+ * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
+ * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
+ * step 13 is the correct place for it. Step 18 is where it was
+ * originally before the BUN.
+ */
+ val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ val &= ~FDI_RX_ENABLE;
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
+
+ val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
+ val &= ~DDI_BUF_CTL_ENABLE;
+ intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val);
+
+ intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+ intel_ddi_disable_clock(encoder);
+
+ val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
+ val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val);
+
+ val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ val &= ~FDI_PCDCLK;
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
+
+ val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ val &= ~FDI_RX_PLL_ENABLE;
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
+}
+
+void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(200);
+
+ /* Switch from Rawclk to PCDclk */
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(200);
+
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+ }
+}
+
+void ilk_fdi_pll_disable(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* Switch from PCDclk to Rawclk */
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
+
+ /* Disable CPU FDI TX PLL */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
+
+ /* Wait for the clocks to turn off. */
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+}
+
+void ilk_fdi_disable(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 temp;
+
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
+ intel_de_posting_read(dev_priv, reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(0x7 << 16);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev_priv))
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ intel_de_write(dev_priv, reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp);
+
+ intel_de_posting_read(dev_priv, reg);
+ udelay(100);
+}
+
+static const struct intel_fdi_funcs ilk_funcs = {
+ .fdi_link_train = ilk_fdi_link_train,
+};
+
+static const struct intel_fdi_funcs gen6_funcs = {
+ .fdi_link_train = gen6_fdi_link_train,
+};
+
+static const struct intel_fdi_funcs ivb_funcs = {
+ .fdi_link_train = ivb_manual_fdi_link_train,
+};
+
+void
+intel_fdi_init_hook(struct drm_i915_private *dev_priv)
+{
+ if (IS_IRONLAKE(dev_priv)) {
+ dev_priv->display.funcs.fdi = &ilk_funcs;
+ } else if (IS_SANDYBRIDGE(dev_priv)) {
+ dev_priv->display.funcs.fdi = &gen6_funcs;
+ } else if (IS_IVYBRIDGE(dev_priv)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ dev_priv->display.funcs.fdi = &ivb_funcs;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
new file mode 100644
index 000000000..1cdb86172
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _INTEL_FDI_H_
+#define _INTEL_FDI_H_
+
+enum pipe;
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_encoder;
+
+int intel_fdi_link_freq(struct drm_i915_private *i915,
+ const struct intel_crtc_state *pipe_config);
+int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *pipe_config);
+void intel_fdi_normal_train(struct intel_crtc *crtc);
+void ilk_fdi_disable(struct intel_crtc *crtc);
+void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc);
+void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state);
+void intel_fdi_init_hook(struct drm_i915_private *dev_priv);
+void hsw_fdi_link_train(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void hsw_fdi_disable(struct intel_encoder *encoder);
+void intel_fdi_pll_freq_update(struct drm_i915_private *i915);
+
+void intel_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
+
+void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
new file mode 100644
index 000000000..d636d21fa
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_de.h"
+#include "intel_display_trace.h"
+#include "intel_display_types.h"
+#include "intel_fbc.h"
+#include "intel_fifo_underrun.h"
+
+/**
+ * DOC: fifo underrun handling
+ *
+ * The i915 driver checks for display fifo underruns using the interrupt signals
+ * provided by the hardware. This is enabled by default and fairly useful to
+ * debug display issues, especially watermark settings.
+ *
+ * If an underrun is detected this is logged into dmesg. To avoid flooding logs
+ * and occupying the cpu underrun interrupts are disabled after the first
+ * occurrence until the next modeset on a given pipe.
+ *
+ * Note that underrun detection on gmch platforms is a bit more ugly since there
+ * is no interrupt (despite that the signalling bit is in the PIPESTAT pipe
+ * interrupt register). Also on some other platforms underrun interrupts are
+ * shared, which means that if we detect an underrun we need to disable underrun
+ * reporting on all pipes.
+ *
+ * The code also supports underrun detection on the PCH transcoder.
+ */
+
+static bool ivb_can_enable_err_int(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc;
+ enum pipe pipe;
+
+ lockdep_assert_held(&dev_priv->irq_lock);
+
+ for_each_pipe(dev_priv, pipe) {
+ crtc = intel_crtc_for_pipe(dev_priv, pipe);
+
+ if (crtc->cpu_fifo_underrun_disabled)
+ return false;
+ }
+
+ return true;
+}
+
+static bool cpt_can_enable_serr_int(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+
+ lockdep_assert_held(&dev_priv->irq_lock);
+
+ for_each_pipe(dev_priv, pipe) {
+ crtc = intel_crtc_for_pipe(dev_priv, pipe);
+
+ if (crtc->pch_fifo_underrun_disabled)
+ return false;
+ }
+
+ return true;
+}
+
+static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ i915_reg_t reg = PIPESTAT(crtc->pipe);
+ u32 enable_mask;
+
+ lockdep_assert_held(&dev_priv->irq_lock);
+
+ if ((intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+ return;
+
+ enable_mask = i915_pipestat_enable_mask(dev_priv, crtc->pipe);
+ intel_de_write(dev_priv, reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
+ intel_de_posting_read(dev_priv, reg);
+
+ trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
+ drm_err(&dev_priv->drm, "pipe %c underrun\n", pipe_name(crtc->pipe));
+}
+
+static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe,
+ bool enable, bool old)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ i915_reg_t reg = PIPESTAT(pipe);
+
+ lockdep_assert_held(&dev_priv->irq_lock);
+
+ if (enable) {
+ u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+
+ intel_de_write(dev_priv, reg,
+ enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
+ intel_de_posting_read(dev_priv, reg);
+ } else {
+ if (old && intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS)
+ drm_err(&dev_priv->drm, "pipe %c underrun\n",
+ pipe_name(pipe));
+ }
+}
+
+static void ilk_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 bit = (pipe == PIPE_A) ?
+ DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN;
+
+ if (enable)
+ ilk_enable_display_irq(dev_priv, bit);
+ else
+ ilk_disable_display_irq(dev_priv, bit);
+}
+
+static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 err_int = intel_de_read(dev_priv, GEN7_ERR_INT);
+
+ lockdep_assert_held(&dev_priv->irq_lock);
+
+ if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
+ return;
+
+ intel_de_write(dev_priv, GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+ intel_de_posting_read(dev_priv, GEN7_ERR_INT);
+
+ trace_intel_cpu_fifo_underrun(dev_priv, pipe);
+ drm_err(&dev_priv->drm, "fifo underrun on pipe %c\n", pipe_name(pipe));
+}
+
+static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable,
+ bool old)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ if (enable) {
+ intel_de_write(dev_priv, GEN7_ERR_INT,
+ ERR_INT_FIFO_UNDERRUN(pipe));
+
+ if (!ivb_can_enable_err_int(dev))
+ return;
+
+ ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ } else {
+ ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+
+ if (old &&
+ intel_de_read(dev_priv, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
+ drm_err(&dev_priv->drm,
+ "uncleared fifo underrun on pipe %c\n",
+ pipe_name(pipe));
+ }
+ }
+}
+
+static u32
+icl_pipe_status_underrun_mask(struct drm_i915_private *dev_priv)
+{
+ u32 mask = PIPE_STATUS_UNDERRUN;
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ mask |= PIPE_STATUS_SOFT_UNDERRUN_XELPD |
+ PIPE_STATUS_HARD_UNDERRUN_XELPD |
+ PIPE_STATUS_PORT_UNDERRUN_XELPD;
+
+ return mask;
+}
+
+static void bdw_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 mask = gen8_de_pipe_underrun_mask(dev_priv);
+
+ if (enable) {
+ if (DISPLAY_VER(dev_priv) >= 11)
+ intel_de_write(dev_priv, ICL_PIPESTATUS(pipe),
+ icl_pipe_status_underrun_mask(dev_priv));
+
+ bdw_enable_pipe_irq(dev_priv, pipe, mask);
+ } else {
+ bdw_disable_pipe_irq(dev_priv, pipe, mask);
+ }
+}
+
+static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pch_transcoder,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 bit = (pch_transcoder == PIPE_A) ?
+ SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
+
+ if (enable)
+ ibx_enable_display_interrupt(dev_priv, bit);
+ else
+ ibx_disable_display_interrupt(dev_priv, bit);
+}
+
+static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pch_transcoder = crtc->pipe;
+ u32 serr_int = intel_de_read(dev_priv, SERR_INT);
+
+ lockdep_assert_held(&dev_priv->irq_lock);
+
+ if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
+ return;
+
+ intel_de_write(dev_priv, SERR_INT,
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+ intel_de_posting_read(dev_priv, SERR_INT);
+
+ trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
+ drm_err(&dev_priv->drm, "pch fifo underrun on pch transcoder %c\n",
+ pipe_name(pch_transcoder));
+}
+
+static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pch_transcoder,
+ bool enable, bool old)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (enable) {
+ intel_de_write(dev_priv, SERR_INT,
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+
+ if (!cpt_can_enable_serr_int(dev))
+ return;
+
+ ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+ } else {
+ ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+
+ if (old && intel_de_read(dev_priv, SERR_INT) &
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
+ drm_err(&dev_priv->drm,
+ "uncleared pch fifo underrun on pch transcoder %c\n",
+ pipe_name(pch_transcoder));
+ }
+ }
+}
+
+static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+ enum pipe pipe, bool enable)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ bool old;
+
+ lockdep_assert_held(&dev_priv->irq_lock);
+
+ old = !crtc->cpu_fifo_underrun_disabled;
+ crtc->cpu_fifo_underrun_disabled = !enable;
+
+ if (HAS_GMCH(dev_priv))
+ i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
+ else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
+ ilk_set_fifo_underrun_reporting(dev, pipe, enable);
+ else if (DISPLAY_VER(dev_priv) == 7)
+ ivb_set_fifo_underrun_reporting(dev, pipe, enable, old);
+ else if (DISPLAY_VER(dev_priv) >= 8)
+ bdw_set_fifo_underrun_reporting(dev, pipe, enable);
+
+ return old;
+}
+
+/**
+ * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state
+ * @dev_priv: i915 device instance
+ * @pipe: (CPU) pipe to set state for
+ * @enable: whether underruns should be reported or not
+ *
+ * This function sets the fifo underrun state for @pipe. It is used in the
+ * modeset code to avoid false positives since on many platforms underruns are
+ * expected when disabling or enabling the pipe.
+ *
+ * Notice that on some platforms disabling underrun reports for one pipe
+ * disables for all due to shared interrupts. Actual reporting is still per-pipe
+ * though.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool enable)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe,
+ enable);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return ret;
+}
+
+/**
+ * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
+ * @dev_priv: i915 device instance
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ * @enable: whether underruns should be reported or not
+ *
+ * This function makes us disable or enable PCH fifo underruns for a specific
+ * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
+ * underrun reporting for one transcoder may also disable all the other PCH
+ * error interruts for the other transcoders, due to the fact that there's just
+ * one interrupt mask/enable bit for all the transcoders.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ enum pipe pch_transcoder,
+ bool enable)
+{
+ struct intel_crtc *crtc =
+ intel_crtc_for_pipe(dev_priv, pch_transcoder);
+ unsigned long flags;
+ bool old;
+
+ /*
+ * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
+ * has only one pch transcoder A that all pipes can use. To avoid racy
+ * pch transcoder -> pipe lookups from interrupt code simply store the
+ * underrun statistics in crtc A. Since we never expose this anywhere
+ * nor use it outside of the fifo underrun code here using the "wrong"
+ * crtc on LPT won't cause issues.
+ */
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+ old = !crtc->pch_fifo_underrun_disabled;
+ crtc->pch_fifo_underrun_disabled = !enable;
+
+ if (HAS_PCH_IBX(dev_priv))
+ ibx_set_fifo_underrun_reporting(&dev_priv->drm,
+ pch_transcoder,
+ enable);
+ else
+ cpt_set_fifo_underrun_reporting(&dev_priv->drm,
+ pch_transcoder,
+ enable, old);
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ return old;
+}
+
+/**
+ * intel_cpu_fifo_underrun_irq_handler - handle CPU fifo underrun interrupt
+ * @dev_priv: i915 device instance
+ * @pipe: (CPU) pipe to set state for
+ *
+ * This handles a CPU fifo underrun interrupt, generating an underrun warning
+ * into dmesg if underrun reporting is enabled and then disables the underrun
+ * interrupt to avoid an irq storm.
+ */
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ u32 underruns = 0;
+
+ /* We may be called too early in init, thanks BIOS! */
+ if (crtc == NULL)
+ return;
+
+ /* GMCH can't disable fifo underruns, filter them. */
+ if (HAS_GMCH(dev_priv) &&
+ crtc->cpu_fifo_underrun_disabled)
+ return;
+
+ /*
+ * Starting with display version 11, the PIPE_STAT register records
+ * whether an underrun has happened, and on XELPD+, it will also record
+ * whether the underrun was soft/hard and whether it was triggered by
+ * the downstream port logic. We should clear these bits (which use
+ * write-1-to-clear logic) too.
+ *
+ * Note that although the IIR gives us the same underrun and soft/hard
+ * information, PIPE_STAT is the only place we can find out whether
+ * the underrun was caused by the downstream port.
+ */
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ underruns = intel_de_read(dev_priv, ICL_PIPESTATUS(pipe)) &
+ icl_pipe_status_underrun_mask(dev_priv);
+ intel_de_write(dev_priv, ICL_PIPESTATUS(pipe), underruns);
+ }
+
+ if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
+ trace_intel_cpu_fifo_underrun(dev_priv, pipe);
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun: %s%s%s%s\n",
+ pipe_name(pipe),
+ underruns & PIPE_STATUS_SOFT_UNDERRUN_XELPD ? "soft," : "",
+ underruns & PIPE_STATUS_HARD_UNDERRUN_XELPD ? "hard," : "",
+ underruns & PIPE_STATUS_PORT_UNDERRUN_XELPD ? "port," : "",
+ underruns & PIPE_STATUS_UNDERRUN ? "transcoder," : "");
+ else
+ drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe));
+ }
+
+ intel_fbc_handle_fifo_underrun_irq(dev_priv);
+}
+
+/**
+ * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
+ * @dev_priv: i915 device instance
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ *
+ * This handles a PCH fifo underrun interrupt, generating an underrun warning
+ * into dmesg if underrun reporting is enabled and then disables the underrun
+ * interrupt to avoid an irq storm.
+ */
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pch_transcoder)
+{
+ if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
+ false)) {
+ trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
+ drm_err(&dev_priv->drm, "PCH transcoder %c FIFO underrun\n",
+ pipe_name(pch_transcoder));
+ }
+}
+
+/**
+ * intel_check_cpu_fifo_underruns - check for CPU fifo underruns immediately
+ * @dev_priv: i915 device instance
+ *
+ * Check for CPU fifo underruns immediately. Useful on IVB/HSW where the shared
+ * error interrupt may have been disabled, and so CPU fifo underruns won't
+ * necessarily raise an interrupt, and on GMCH platforms where underruns never
+ * raise an interrupt.
+ */
+void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ if (crtc->cpu_fifo_underrun_disabled)
+ continue;
+
+ if (HAS_GMCH(dev_priv))
+ i9xx_check_fifo_underruns(crtc);
+ else if (DISPLAY_VER(dev_priv) == 7)
+ ivb_check_fifo_underruns(crtc);
+ }
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+/**
+ * intel_check_pch_fifo_underruns - check for PCH fifo underruns immediately
+ * @dev_priv: i915 device instance
+ *
+ * Check for PCH fifo underruns immediately. Useful on CPT/PPT where the shared
+ * error interrupt may have been disabled, and so PCH fifo underruns won't
+ * necessarily raise an interrupt.
+ */
+void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ if (crtc->pch_fifo_underrun_disabled)
+ continue;
+
+ if (HAS_PCH_CPT(dev_priv))
+ cpt_check_pch_fifo_underruns(crtc);
+ }
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
new file mode 100644
index 000000000..e04f22ac1
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_FIFO_UNDERRUN_H__
+#define __INTEL_FIFO_UNDERRUN_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+
+struct drm_i915_private;
+
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool enable);
+bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+ enum pipe pch_transcoder,
+ bool enable);
+void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pch_transcoder);
+void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
+void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_FIFO_UNDERRUN_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
new file mode 100644
index 000000000..d80e3e8a9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+/**
+ * DOC: frontbuffer tracking
+ *
+ * Many features require us to track changes to the currently active
+ * frontbuffer, especially rendering targeted at the frontbuffer.
+ *
+ * To be able to do so we track frontbuffers using a bitmask for all possible
+ * frontbuffer slots through intel_frontbuffer_track(). The functions in this
+ * file are then called when the contents of the frontbuffer are invalidated,
+ * when frontbuffer rendering has stopped again to flush out all the changes
+ * and when the frontbuffer is exchanged with a flip. Subsystems interested in
+ * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
+ * into the relevant places and filter for the frontbuffer slots that they are
+ * interested int.
+ *
+ * On a high level there are two types of powersaving features. The first one
+ * work like a special cache (FBC and PSR) and are interested when they should
+ * stop caching and when to restart caching. This is done by placing callbacks
+ * into the invalidate and the flush functions: At invalidate the caching must
+ * be stopped and at flush time it can be restarted. And maybe they need to know
+ * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
+ * and flush on its own) which can be achieved with placing callbacks into the
+ * flip functions.
+ *
+ * The other type of display power saving feature only cares about busyness
+ * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
+ * busyness. There is no direct way to detect idleness. Instead an idle timer
+ * work delayed work should be started from the flush and flip functions and
+ * cancelled as soon as busyness is detected.
+ */
+
+#include "i915_drv.h"
+#include "intel_display_trace.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_drrs.h"
+#include "intel_fbc.h"
+#include "intel_frontbuffer.h"
+#include "intel_psr.h"
+
+/**
+ * frontbuffer_flush - flush frontbuffer
+ * @i915: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed and frontbuffer caching can be started again. Flushes will get
+ * delayed if they're blocked by some outstanding asynchronous rendering.
+ *
+ * Can be called without any locks held.
+ */
+static void frontbuffer_flush(struct drm_i915_private *i915,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
+{
+ /* Delay flushing when rings are still busy.*/
+ spin_lock(&i915->display.fb_tracking.lock);
+ frontbuffer_bits &= ~i915->display.fb_tracking.busy_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
+
+ if (!frontbuffer_bits)
+ return;
+
+ trace_intel_frontbuffer_flush(frontbuffer_bits, origin);
+
+ might_sleep();
+ intel_drrs_flush(i915, frontbuffer_bits);
+ intel_psr_flush(i915, frontbuffer_bits, origin);
+ intel_fbc_flush(i915, frontbuffer_bits, origin);
+}
+
+/**
+ * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
+ * @i915: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. The actual
+ * frontbuffer flushing will be delayed until completion is signalled with
+ * intel_frontbuffer_flip_complete. If an invalidate happens in between this
+ * flush will be cancelled.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
+ unsigned frontbuffer_bits)
+{
+ spin_lock(&i915->display.fb_tracking.lock);
+ i915->display.fb_tracking.flip_bits |= frontbuffer_bits;
+ /* Remove stale busy bits due to the old buffer. */
+ i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
+ * @i915: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after the flip has been latched and will complete
+ * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
+ unsigned frontbuffer_bits)
+{
+ spin_lock(&i915->display.fb_tracking.lock);
+ /* Mask any cancelled flips. */
+ frontbuffer_bits &= i915->display.fb_tracking.flip_bits;
+ i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
+
+ if (frontbuffer_bits)
+ frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
+}
+
+/**
+ * intel_frontbuffer_flip - synchronous frontbuffer flip
+ * @i915: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. This is for
+ * synchronous plane updates which will happen on the next vblank and which will
+ * not get delayed by pending gpu rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip(struct drm_i915_private *i915,
+ unsigned frontbuffer_bits)
+{
+ spin_lock(&i915->display.fb_tracking.lock);
+ /* Remove stale busy bits due to the old buffer. */
+ i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
+
+ frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
+}
+
+void __intel_fb_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
+{
+ struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+
+ if (origin == ORIGIN_CS) {
+ spin_lock(&i915->display.fb_tracking.lock);
+ i915->display.fb_tracking.busy_bits |= frontbuffer_bits;
+ i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
+ }
+
+ trace_intel_frontbuffer_invalidate(frontbuffer_bits, origin);
+
+ might_sleep();
+ intel_psr_invalidate(i915, frontbuffer_bits, origin);
+ intel_drrs_invalidate(i915, frontbuffer_bits);
+ intel_fbc_invalidate(i915, frontbuffer_bits, origin);
+}
+
+void __intel_fb_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
+{
+ struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+
+ if (origin == ORIGIN_CS) {
+ spin_lock(&i915->display.fb_tracking.lock);
+ /* Filter out new bits since rendering started. */
+ frontbuffer_bits &= i915->display.fb_tracking.busy_bits;
+ i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
+ }
+
+ if (frontbuffer_bits)
+ frontbuffer_flush(i915, frontbuffer_bits, origin);
+}
+
+static int frontbuffer_active(struct i915_active *ref)
+{
+ struct intel_frontbuffer *front =
+ container_of(ref, typeof(*front), write);
+
+ kref_get(&front->ref);
+ return 0;
+}
+
+static void frontbuffer_retire(struct i915_active *ref)
+{
+ struct intel_frontbuffer *front =
+ container_of(ref, typeof(*front), write);
+
+ intel_frontbuffer_flush(front, ORIGIN_CS);
+ intel_frontbuffer_put(front);
+}
+
+static void frontbuffer_release(struct kref *ref)
+ __releases(&to_i915(front->obj->base.dev)->display.fb_tracking.lock)
+{
+ struct intel_frontbuffer *front =
+ container_of(ref, typeof(*front), ref);
+ struct drm_i915_gem_object *obj = front->obj;
+ struct i915_vma *vma;
+
+ drm_WARN_ON(obj->base.dev, atomic_read(&front->bits));
+
+ spin_lock(&obj->vma.lock);
+ for_each_ggtt_vma(vma, obj) {
+ i915_vma_clear_scanout(vma);
+ vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+ }
+ spin_unlock(&obj->vma.lock);
+
+ RCU_INIT_POINTER(obj->frontbuffer, NULL);
+ spin_unlock(&to_i915(obj->base.dev)->display.fb_tracking.lock);
+
+ i915_active_fini(&front->write);
+
+ i915_gem_object_put(obj);
+ kfree_rcu(front, rcu);
+}
+
+struct intel_frontbuffer *
+intel_frontbuffer_get(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_frontbuffer *front;
+
+ front = __intel_frontbuffer_get(obj);
+ if (front)
+ return front;
+
+ front = kmalloc(sizeof(*front), GFP_KERNEL);
+ if (!front)
+ return NULL;
+
+ front->obj = obj;
+ kref_init(&front->ref);
+ atomic_set(&front->bits, 0);
+ i915_active_init(&front->write,
+ frontbuffer_active,
+ frontbuffer_retire,
+ I915_ACTIVE_RETIRE_SLEEPS);
+
+ spin_lock(&i915->display.fb_tracking.lock);
+ if (rcu_access_pointer(obj->frontbuffer)) {
+ kfree(front);
+ front = rcu_dereference_protected(obj->frontbuffer, true);
+ kref_get(&front->ref);
+ } else {
+ i915_gem_object_get(obj);
+ rcu_assign_pointer(obj->frontbuffer, front);
+ }
+ spin_unlock(&i915->display.fb_tracking.lock);
+
+ return front;
+}
+
+void intel_frontbuffer_put(struct intel_frontbuffer *front)
+{
+ kref_put_lock(&front->ref,
+ frontbuffer_release,
+ &to_i915(front->obj->base.dev)->display.fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_track - update frontbuffer tracking
+ * @old: current buffer for the frontbuffer slots
+ * @new: new buffer for the frontbuffer slots
+ * @frontbuffer_bits: bitmask of frontbuffer slots
+ *
+ * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
+ * from @old and setting them in @new. Both @old and @new can be NULL.
+ */
+void intel_frontbuffer_track(struct intel_frontbuffer *old,
+ struct intel_frontbuffer *new,
+ unsigned int frontbuffer_bits)
+{
+ /*
+ * Control of individual bits within the mask are guarded by
+ * the owning plane->mutex, i.e. we can never see concurrent
+ * manipulation of individual bits. But since the bitfield as a whole
+ * is updated using RMW, we need to use atomics in order to update
+ * the bits.
+ */
+ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
+ BITS_PER_TYPE(atomic_t));
+ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32);
+ BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE);
+
+ if (old) {
+ drm_WARN_ON(old->obj->base.dev,
+ !(atomic_read(&old->bits) & frontbuffer_bits));
+ atomic_andnot(frontbuffer_bits, &old->bits);
+ }
+
+ if (new) {
+ drm_WARN_ON(new->obj->base.dev,
+ atomic_read(&new->bits) & frontbuffer_bits);
+ atomic_or(frontbuffer_bits, &new->bits);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
new file mode 100644
index 000000000..3c474ed93
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __INTEL_FRONTBUFFER_H__
+#define __INTEL_FRONTBUFFER_H__
+
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/kref.h>
+
+#include "gem/i915_gem_object_types.h"
+#include "i915_active_types.h"
+
+struct drm_i915_private;
+
+enum fb_op_origin {
+ ORIGIN_CPU = 0,
+ ORIGIN_CS,
+ ORIGIN_FLIP,
+ ORIGIN_DIRTYFB,
+ ORIGIN_CURSOR_UPDATE,
+};
+
+struct intel_frontbuffer {
+ struct kref ref;
+ atomic_t bits;
+ struct i915_active write;
+ struct drm_i915_gem_object *obj;
+ struct rcu_head rcu;
+};
+
+/*
+ * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
+ * considered to be the frontbuffer for the given plane interface-wise. This
+ * doesn't mean that the hw necessarily already scans it out, but that any
+ * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
+ *
+ * We have one bit per pipe and per scanout plane type.
+ */
+#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
+#define INTEL_FRONTBUFFER(pipe, plane_id) \
+ BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe));
+#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
+ BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
+#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
+ GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
+ INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
+
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
+ unsigned frontbuffer_bits);
+void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
+ unsigned frontbuffer_bits);
+void intel_frontbuffer_flip(struct drm_i915_private *i915,
+ unsigned frontbuffer_bits);
+
+void intel_frontbuffer_put(struct intel_frontbuffer *front);
+
+static inline struct intel_frontbuffer *
+__intel_frontbuffer_get(const struct drm_i915_gem_object *obj)
+{
+ struct intel_frontbuffer *front;
+
+ if (likely(!rcu_access_pointer(obj->frontbuffer)))
+ return NULL;
+
+ rcu_read_lock();
+ do {
+ front = rcu_dereference(obj->frontbuffer);
+ if (!front)
+ break;
+
+ if (unlikely(!kref_get_unless_zero(&front->ref)))
+ continue;
+
+ if (likely(front == rcu_access_pointer(obj->frontbuffer)))
+ break;
+
+ intel_frontbuffer_put(front);
+ } while (1);
+ rcu_read_unlock();
+
+ return front;
+}
+
+struct intel_frontbuffer *
+intel_frontbuffer_get(struct drm_i915_gem_object *obj);
+
+void __intel_fb_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
+
+/**
+ * intel_frontbuffer_invalidate - invalidate frontbuffer object
+ * @front: GEM object to invalidate
+ * @origin: which operation caused the invalidation
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin)
+{
+ unsigned int frontbuffer_bits;
+
+ if (!front)
+ return false;
+
+ frontbuffer_bits = atomic_read(&front->bits);
+ if (!frontbuffer_bits)
+ return false;
+
+ __intel_fb_invalidate(front, origin, frontbuffer_bits);
+ return true;
+}
+
+void __intel_fb_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
+
+/**
+ * intel_frontbuffer_flush - flush frontbuffer object
+ * @front: GEM object to flush
+ * @origin: which operation caused the flush
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again.
+ */
+static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin)
+{
+ unsigned int frontbuffer_bits;
+
+ if (!front)
+ return;
+
+ frontbuffer_bits = atomic_read(&front->bits);
+ if (!frontbuffer_bits)
+ return;
+
+ __intel_fb_flush(front, origin, frontbuffer_bits);
+}
+
+void intel_frontbuffer_track(struct intel_frontbuffer *old,
+ struct intel_frontbuffer *new,
+ unsigned int frontbuffer_bits);
+
+#endif /* __INTEL_FRONTBUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
new file mode 100644
index 000000000..7a19215ad
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/string.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_display_types.h"
+#include "intel_global_state.h"
+
+static void __intel_atomic_global_state_free(struct kref *kref)
+{
+ struct intel_global_state *obj_state =
+ container_of(kref, struct intel_global_state, ref);
+ struct intel_global_obj *obj = obj_state->obj;
+
+ obj->funcs->atomic_destroy_state(obj, obj_state);
+}
+
+static void intel_atomic_global_state_put(struct intel_global_state *obj_state)
+{
+ kref_put(&obj_state->ref, __intel_atomic_global_state_free);
+}
+
+static struct intel_global_state *
+intel_atomic_global_state_get(struct intel_global_state *obj_state)
+{
+ kref_get(&obj_state->ref);
+
+ return obj_state;
+}
+
+void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+ struct intel_global_obj *obj,
+ struct intel_global_state *state,
+ const struct intel_global_state_funcs *funcs)
+{
+ memset(obj, 0, sizeof(*obj));
+
+ state->obj = obj;
+
+ kref_init(&state->ref);
+
+ obj->state = state;
+ obj->funcs = funcs;
+ list_add_tail(&obj->head, &dev_priv->global_obj_list);
+}
+
+void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)
+{
+ struct intel_global_obj *obj, *next;
+
+ list_for_each_entry_safe(obj, next, &dev_priv->global_obj_list, head) {
+ list_del(&obj->head);
+
+ drm_WARN_ON(&dev_priv->drm, kref_read(&obj->state->ref) != 1);
+ intel_atomic_global_state_put(obj->state);
+ }
+}
+
+static void assert_global_state_write_locked(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc)
+ drm_modeset_lock_assert_held(&crtc->base.mutex);
+}
+
+static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx,
+ struct drm_modeset_lock *lock)
+{
+ struct drm_modeset_lock *l;
+
+ list_for_each_entry(l, &ctx->locked, head) {
+ if (lock == l)
+ return true;
+ }
+
+ return false;
+}
+
+static void assert_global_state_read_locked(struct intel_atomic_state *state)
+{
+ struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ if (modeset_lock_is_held(ctx, &crtc->base.mutex))
+ return;
+ }
+
+ drm_WARN(&dev_priv->drm, 1, "Global state not read locked\n");
+}
+
+struct intel_global_state *
+intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ int index, num_objs, i;
+ size_t size;
+ struct __intel_global_objs_state *arr;
+ struct intel_global_state *obj_state;
+
+ for (i = 0; i < state->num_global_objs; i++)
+ if (obj == state->global_objs[i].ptr)
+ return state->global_objs[i].state;
+
+ assert_global_state_read_locked(state);
+
+ num_objs = state->num_global_objs + 1;
+ size = sizeof(*state->global_objs) * num_objs;
+ arr = krealloc(state->global_objs, size, GFP_KERNEL);
+ if (!arr)
+ return ERR_PTR(-ENOMEM);
+
+ state->global_objs = arr;
+ index = state->num_global_objs;
+ memset(&state->global_objs[index], 0, sizeof(*state->global_objs));
+
+ obj_state = obj->funcs->atomic_duplicate_state(obj);
+ if (!obj_state)
+ return ERR_PTR(-ENOMEM);
+
+ obj_state->obj = obj;
+ obj_state->changed = false;
+
+ kref_init(&obj_state->ref);
+
+ state->global_objs[index].state = obj_state;
+ state->global_objs[index].old_state =
+ intel_atomic_global_state_get(obj->state);
+ state->global_objs[index].new_state = obj_state;
+ state->global_objs[index].ptr = obj;
+ obj_state->state = state;
+
+ state->num_global_objs = num_objs;
+
+ drm_dbg_atomic(&i915->drm, "Added new global object %p state %p to %p\n",
+ obj, obj_state, state);
+
+ return obj_state;
+}
+
+struct intel_global_state *
+intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < state->num_global_objs; i++)
+ if (obj == state->global_objs[i].ptr)
+ return state->global_objs[i].old_state;
+
+ return NULL;
+}
+
+struct intel_global_state *
+intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < state->num_global_objs; i++)
+ if (obj == state->global_objs[i].ptr)
+ return state->global_objs[i].new_state;
+
+ return NULL;
+}
+
+void intel_atomic_swap_global_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *old_obj_state, *new_obj_state;
+ struct intel_global_obj *obj;
+ int i;
+
+ for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
+ new_obj_state, i) {
+ drm_WARN_ON(&dev_priv->drm, obj->state != old_obj_state);
+
+ /*
+ * If the new state wasn't modified (and properly
+ * locked for write access) we throw it away.
+ */
+ if (!new_obj_state->changed)
+ continue;
+
+ assert_global_state_write_locked(dev_priv);
+
+ old_obj_state->state = state;
+ new_obj_state->state = NULL;
+
+ state->global_objs[i].state = old_obj_state;
+
+ intel_atomic_global_state_put(obj->state);
+ obj->state = intel_atomic_global_state_get(new_obj_state);
+ }
+}
+
+void intel_atomic_clear_global_state(struct intel_atomic_state *state)
+{
+ int i;
+
+ for (i = 0; i < state->num_global_objs; i++) {
+ intel_atomic_global_state_put(state->global_objs[i].old_state);
+ intel_atomic_global_state_put(state->global_objs[i].new_state);
+
+ state->global_objs[i].ptr = NULL;
+ state->global_objs[i].state = NULL;
+ state->global_objs[i].old_state = NULL;
+ state->global_objs[i].new_state = NULL;
+ }
+ state->num_global_objs = 0;
+}
+
+int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
+{
+ struct intel_atomic_state *state = obj_state->state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ int ret;
+
+ ret = drm_modeset_lock(&crtc->base.mutex,
+ state->base.acquire_ctx);
+ if (ret)
+ return ret;
+ }
+
+ obj_state->changed = true;
+
+ return 0;
+}
+
+int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
+{
+ struct intel_atomic_state *state = obj_state->state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ obj_state->changed = true;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
new file mode 100644
index 000000000..1f16fa307
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_GLOBAL_STATE_H__
+#define __INTEL_GLOBAL_STATE_H__
+
+#include <linux/kref.h>
+#include <linux/list.h>
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_global_obj;
+struct intel_global_state;
+
+struct intel_global_state_funcs {
+ struct intel_global_state *(*atomic_duplicate_state)(struct intel_global_obj *obj);
+ void (*atomic_destroy_state)(struct intel_global_obj *obj,
+ struct intel_global_state *state);
+};
+
+struct intel_global_obj {
+ struct list_head head;
+ struct intel_global_state *state;
+ const struct intel_global_state_funcs *funcs;
+};
+
+#define intel_for_each_global_obj(obj, dev_priv) \
+ list_for_each_entry(obj, &(dev_priv)->global_obj_list, head)
+
+#define for_each_new_global_obj_in_state(__state, obj, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+#define for_each_old_global_obj_in_state(__state, obj, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (new_obj_state) = (__state)->global_objs[__i].old_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+#define for_each_oldnew_global_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (old_obj_state) = (__state)->global_objs[__i].old_state, \
+ (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+struct intel_global_state {
+ struct intel_global_obj *obj;
+ struct intel_atomic_state *state;
+ struct kref ref;
+ bool changed;
+};
+
+struct __intel_global_objs_state {
+ struct intel_global_obj *ptr;
+ struct intel_global_state *state, *old_state, *new_state;
+};
+
+void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+ struct intel_global_obj *obj,
+ struct intel_global_state *state,
+ const struct intel_global_state_funcs *funcs);
+void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv);
+
+struct intel_global_state *
+intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj);
+struct intel_global_state *
+intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj);
+struct intel_global_state *
+intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj);
+
+void intel_atomic_swap_global_state(struct intel_atomic_state *state);
+void intel_atomic_clear_global_state(struct intel_atomic_state *state);
+int intel_atomic_lock_global_state(struct intel_global_state *obj_state);
+int intel_atomic_serialize_global_state(struct intel_global_state *obj_state);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
new file mode 100644
index 000000000..74443f57f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -0,0 +1,1003 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2008,2010 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#include <linux/export.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/i2c.h>
+
+#include <drm/display/drm_hdcp_helper.h>
+
+#include "i915_drv.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_gmbus.h"
+#include "intel_gmbus_regs.h"
+
+struct intel_gmbus {
+ struct i2c_adapter adapter;
+#define GMBUS_FORCE_BIT_RETRY (1U << 31)
+ u32 force_bit;
+ u32 reg0;
+ i915_reg_t gpio_reg;
+ struct i2c_algo_bit_data bit_algo;
+ struct drm_i915_private *i915;
+};
+
+struct gmbus_pin {
+ const char *name;
+ enum i915_gpio gpio;
+};
+
+/* Map gmbus pin pairs to names and registers. */
+static const struct gmbus_pin gmbus_pins[] = {
+ [GMBUS_PIN_SSC] = { "ssc", GPIOB },
+ [GMBUS_PIN_VGADDC] = { "vga", GPIOA },
+ [GMBUS_PIN_PANEL] = { "panel", GPIOC },
+ [GMBUS_PIN_DPC] = { "dpc", GPIOD },
+ [GMBUS_PIN_DPB] = { "dpb", GPIOE },
+ [GMBUS_PIN_DPD] = { "dpd", GPIOF },
+};
+
+static const struct gmbus_pin gmbus_pins_bdw[] = {
+ [GMBUS_PIN_VGADDC] = { "vga", GPIOA },
+ [GMBUS_PIN_DPC] = { "dpc", GPIOD },
+ [GMBUS_PIN_DPB] = { "dpb", GPIOE },
+ [GMBUS_PIN_DPD] = { "dpd", GPIOF },
+};
+
+static const struct gmbus_pin gmbus_pins_skl[] = {
+ [GMBUS_PIN_DPC] = { "dpc", GPIOD },
+ [GMBUS_PIN_DPB] = { "dpb", GPIOE },
+ [GMBUS_PIN_DPD] = { "dpd", GPIOF },
+};
+
+static const struct gmbus_pin gmbus_pins_bxt[] = {
+ [GMBUS_PIN_1_BXT] = { "dpb", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpc", GPIOC },
+ [GMBUS_PIN_3_BXT] = { "misc", GPIOD },
+};
+
+static const struct gmbus_pin gmbus_pins_cnp[] = {
+ [GMBUS_PIN_1_BXT] = { "dpb", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpc", GPIOC },
+ [GMBUS_PIN_3_BXT] = { "misc", GPIOD },
+ [GMBUS_PIN_4_CNP] = { "dpd", GPIOE },
+};
+
+static const struct gmbus_pin gmbus_pins_icp[] = {
+ [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_3_BXT] = { "dpc", GPIOD },
+ [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
+ [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
+ [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
+ [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
+ [GMBUS_PIN_13_TC5_TGP] = { "tc5", GPION },
+ [GMBUS_PIN_14_TC6_TGP] = { "tc6", GPIOO },
+};
+
+static const struct gmbus_pin gmbus_pins_dg1[] = {
+ [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_3_BXT] = { "dpc", GPIOD },
+ [GMBUS_PIN_4_CNP] = { "dpd", GPIOE },
+};
+
+static const struct gmbus_pin gmbus_pins_dg2[] = {
+ [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_3_BXT] = { "dpc", GPIOD },
+ [GMBUS_PIN_4_CNP] = { "dpd", GPIOE },
+ [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
+};
+
+static const struct gmbus_pin gmbus_pins_mtp[] = {
+ [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_3_BXT] = { "dpc", GPIOD },
+ [GMBUS_PIN_4_CNP] = { "dpd", GPIOE },
+ [GMBUS_PIN_5_MTP] = { "dpe", GPIOF },
+ [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
+ [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
+ [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
+ [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
+};
+
+static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
+ unsigned int pin)
+{
+ const struct gmbus_pin *pins;
+ size_t size;
+
+ if (INTEL_PCH_TYPE(i915) >= PCH_DG2) {
+ pins = gmbus_pins_dg2;
+ size = ARRAY_SIZE(gmbus_pins_dg2);
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
+ pins = gmbus_pins_dg1;
+ size = ARRAY_SIZE(gmbus_pins_dg1);
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_MTP) {
+ pins = gmbus_pins_mtp;
+ size = ARRAY_SIZE(gmbus_pins_mtp);
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
+ pins = gmbus_pins_icp;
+ size = ARRAY_SIZE(gmbus_pins_icp);
+ } else if (HAS_PCH_CNP(i915)) {
+ pins = gmbus_pins_cnp;
+ size = ARRAY_SIZE(gmbus_pins_cnp);
+ } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
+ pins = gmbus_pins_bxt;
+ size = ARRAY_SIZE(gmbus_pins_bxt);
+ } else if (DISPLAY_VER(i915) == 9) {
+ pins = gmbus_pins_skl;
+ size = ARRAY_SIZE(gmbus_pins_skl);
+ } else if (IS_BROADWELL(i915)) {
+ pins = gmbus_pins_bdw;
+ size = ARRAY_SIZE(gmbus_pins_bdw);
+ } else {
+ pins = gmbus_pins;
+ size = ARRAY_SIZE(gmbus_pins);
+ }
+
+ if (pin >= size || !pins[pin].name)
+ return NULL;
+
+ return &pins[pin];
+}
+
+bool intel_gmbus_is_valid_pin(struct drm_i915_private *i915, unsigned int pin)
+{
+ return get_gmbus_pin(i915, pin);
+}
+
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 10
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+ return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+void
+intel_gmbus_reset(struct drm_i915_private *i915)
+{
+ intel_de_write(i915, GMBUS0(i915), 0);
+ intel_de_write(i915, GMBUS4(i915), 0);
+}
+
+static void pnv_gmbus_clock_gating(struct drm_i915_private *i915,
+ bool enable)
+{
+ u32 val;
+
+ /* When using bit bashing for I2C, this bit needs to be set to 1 */
+ val = intel_de_read(i915, DSPCLK_GATE_D(i915));
+ if (!enable)
+ val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
+ else
+ val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
+ intel_de_write(i915, DSPCLK_GATE_D(i915), val);
+}
+
+static void pch_gmbus_clock_gating(struct drm_i915_private *i915,
+ bool enable)
+{
+ u32 val;
+
+ val = intel_de_read(i915, SOUTH_DSPCLK_GATE_D);
+ if (!enable)
+ val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
+ else
+ val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
+ intel_de_write(i915, SOUTH_DSPCLK_GATE_D, val);
+}
+
+static void bxt_gmbus_clock_gating(struct drm_i915_private *i915,
+ bool enable)
+{
+ u32 val;
+
+ val = intel_de_read(i915, GEN9_CLKGATE_DIS_4);
+ if (!enable)
+ val |= BXT_GMBUS_GATING_DIS;
+ else
+ val &= ~BXT_GMBUS_GATING_DIS;
+ intel_de_write(i915, GEN9_CLKGATE_DIS_4, val);
+}
+
+static u32 get_reserved(struct intel_gmbus *bus)
+{
+ struct drm_i915_private *i915 = bus->i915;
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 reserved = 0;
+
+ /* On most chips, these bits must be preserved in software. */
+ if (!IS_I830(i915) && !IS_I845G(i915))
+ reserved = intel_uncore_read_notrace(uncore, bus->gpio_reg) &
+ (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ return reserved;
+}
+
+static int get_clock(void *data)
+{
+ struct intel_gmbus *bus = data;
+ struct intel_uncore *uncore = &bus->i915->uncore;
+ u32 reserved = get_reserved(bus);
+
+ intel_uncore_write_notrace(uncore,
+ bus->gpio_reg,
+ reserved | GPIO_CLOCK_DIR_MASK);
+ intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved);
+
+ return (intel_uncore_read_notrace(uncore, bus->gpio_reg) &
+ GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+ struct intel_gmbus *bus = data;
+ struct intel_uncore *uncore = &bus->i915->uncore;
+ u32 reserved = get_reserved(bus);
+
+ intel_uncore_write_notrace(uncore,
+ bus->gpio_reg,
+ reserved | GPIO_DATA_DIR_MASK);
+ intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved);
+
+ return (intel_uncore_read_notrace(uncore, bus->gpio_reg) &
+ GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+ struct intel_gmbus *bus = data;
+ struct intel_uncore *uncore = &bus->i915->uncore;
+ u32 reserved = get_reserved(bus);
+ u32 clock_bits;
+
+ if (state_high)
+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+ else
+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+ GPIO_CLOCK_VAL_MASK;
+
+ intel_uncore_write_notrace(uncore,
+ bus->gpio_reg,
+ reserved | clock_bits);
+ intel_uncore_posting_read(uncore, bus->gpio_reg);
+}
+
+static void set_data(void *data, int state_high)
+{
+ struct intel_gmbus *bus = data;
+ struct intel_uncore *uncore = &bus->i915->uncore;
+ u32 reserved = get_reserved(bus);
+ u32 data_bits;
+
+ if (state_high)
+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+ else
+ data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+ GPIO_DATA_VAL_MASK;
+
+ intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved | data_bits);
+ intel_uncore_posting_read(uncore, bus->gpio_reg);
+}
+
+static int
+intel_gpio_pre_xfer(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+ struct drm_i915_private *i915 = bus->i915;
+
+ intel_gmbus_reset(i915);
+
+ if (IS_PINEVIEW(i915))
+ pnv_gmbus_clock_gating(i915, false);
+
+ set_data(bus, 1);
+ set_clock(bus, 1);
+ udelay(I2C_RISEFALL_TIME);
+ return 0;
+}
+
+static void
+intel_gpio_post_xfer(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+ struct drm_i915_private *i915 = bus->i915;
+
+ set_data(bus, 1);
+ set_clock(bus, 1);
+
+ if (IS_PINEVIEW(i915))
+ pnv_gmbus_clock_gating(i915, true);
+}
+
+static void
+intel_gpio_setup(struct intel_gmbus *bus, i915_reg_t gpio_reg)
+{
+ struct i2c_algo_bit_data *algo;
+
+ algo = &bus->bit_algo;
+
+ bus->gpio_reg = gpio_reg;
+ bus->adapter.algo_data = algo;
+ algo->setsda = set_data;
+ algo->setscl = set_clock;
+ algo->getsda = get_data;
+ algo->getscl = get_clock;
+ algo->pre_xfer = intel_gpio_pre_xfer;
+ algo->post_xfer = intel_gpio_post_xfer;
+ algo->udelay = I2C_RISEFALL_TIME;
+ algo->timeout = usecs_to_jiffies(2200);
+ algo->data = bus;
+}
+
+static bool has_gmbus_irq(struct drm_i915_private *i915)
+{
+ /*
+ * encoder->shutdown() may want to use GMBUS
+ * after irqs have already been disabled.
+ */
+ return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915);
+}
+
+static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en)
+{
+ DEFINE_WAIT(wait);
+ u32 gmbus2;
+ int ret;
+
+ /* Important: The hw handles only the first bit, so set only one! Since
+ * we also need to check for NAKs besides the hw ready/idle signal, we
+ * need to wake up periodically and check that ourselves.
+ */
+ if (!has_gmbus_irq(i915))
+ irq_en = 0;
+
+ add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
+ intel_de_write_fw(i915, GMBUS4(i915), irq_en);
+
+ status |= GMBUS_SATOER;
+ ret = wait_for_us((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status,
+ 2);
+ if (ret)
+ ret = wait_for((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status,
+ 50);
+
+ intel_de_write_fw(i915, GMBUS4(i915), 0);
+ remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
+
+ if (gmbus2 & GMBUS_SATOER)
+ return -ENXIO;
+
+ return ret;
+}
+
+static int
+gmbus_wait_idle(struct drm_i915_private *i915)
+{
+ DEFINE_WAIT(wait);
+ u32 irq_enable;
+ int ret;
+
+ /* Important: The hw handles only the first bit, so set only one! */
+ irq_enable = 0;
+ if (has_gmbus_irq(i915))
+ irq_enable = GMBUS_IDLE_EN;
+
+ add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
+ intel_de_write_fw(i915, GMBUS4(i915), irq_enable);
+
+ ret = intel_wait_for_register_fw(&i915->uncore,
+ GMBUS2(i915), GMBUS_ACTIVE, 0,
+ 10);
+
+ intel_de_write_fw(i915, GMBUS4(i915), 0);
+ remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
+
+ return ret;
+}
+
+static unsigned int gmbus_max_xfer_size(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
+ GMBUS_BYTE_COUNT_MAX;
+}
+
+static int
+gmbus_xfer_read_chunk(struct drm_i915_private *i915,
+ unsigned short addr, u8 *buf, unsigned int len,
+ u32 gmbus0_reg, u32 gmbus1_index)
+{
+ unsigned int size = len;
+ bool burst_read = len > gmbus_max_xfer_size(i915);
+ bool extra_byte_added = false;
+
+ if (burst_read) {
+ /*
+ * As per HW Spec, for 512Bytes need to read extra Byte and
+ * Ignore the extra byte read.
+ */
+ if (len == 512) {
+ extra_byte_added = true;
+ len++;
+ }
+ size = len % 256 + 256;
+ intel_de_write_fw(i915, GMBUS0(i915),
+ gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
+ }
+
+ intel_de_write_fw(i915, GMBUS1(i915),
+ gmbus1_index | GMBUS_CYCLE_WAIT | (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+ u32 val, loop = 0;
+
+ ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
+ if (ret)
+ return ret;
+
+ val = intel_de_read_fw(i915, GMBUS3(i915));
+ do {
+ if (extra_byte_added && len == 1)
+ break;
+
+ *buf++ = val & 0xff;
+ val >>= 8;
+ } while (--len && ++loop < 4);
+
+ if (burst_read && len == size - 4)
+ /* Reset the override bit */
+ intel_de_write_fw(i915, GMBUS0(i915), gmbus0_reg);
+ }
+
+ return 0;
+}
+
+/*
+ * HW spec says that 512Bytes in Burst read need special treatment.
+ * But it doesn't talk about other multiple of 256Bytes. And couldn't locate
+ * an I2C slave, which supports such a lengthy burst read too for experiments.
+ *
+ * So until things get clarified on HW support, to avoid the burst read length
+ * in fold of 256Bytes except 512, max burst read length is fixed at 767Bytes.
+ */
+#define INTEL_GMBUS_BURST_READ_MAX_LEN 767U
+
+static int
+gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg,
+ u32 gmbus0_reg, u32 gmbus1_index)
+{
+ u8 *buf = msg->buf;
+ unsigned int rx_size = msg->len;
+ unsigned int len;
+ int ret;
+
+ do {
+ if (HAS_GMBUS_BURST_READ(i915))
+ len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN);
+ else
+ len = min(rx_size, gmbus_max_xfer_size(i915));
+
+ ret = gmbus_xfer_read_chunk(i915, msg->addr, buf, len,
+ gmbus0_reg, gmbus1_index);
+ if (ret)
+ return ret;
+
+ rx_size -= len;
+ buf += len;
+ } while (rx_size != 0);
+
+ return 0;
+}
+
+static int
+gmbus_xfer_write_chunk(struct drm_i915_private *i915,
+ unsigned short addr, u8 *buf, unsigned int len,
+ u32 gmbus1_index)
+{
+ unsigned int chunk_size = len;
+ u32 val, loop;
+
+ val = loop = 0;
+ while (len && loop < 4) {
+ val |= *buf++ << (8 * loop++);
+ len -= 1;
+ }
+
+ intel_de_write_fw(i915, GMBUS3(i915), val);
+ intel_de_write_fw(i915, GMBUS1(i915),
+ gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ intel_de_write_fw(i915, GMBUS3(i915), val);
+
+ ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+gmbus_xfer_write(struct drm_i915_private *i915, struct i2c_msg *msg,
+ u32 gmbus1_index)
+{
+ u8 *buf = msg->buf;
+ unsigned int tx_size = msg->len;
+ unsigned int len;
+ int ret;
+
+ do {
+ len = min(tx_size, gmbus_max_xfer_size(i915));
+
+ ret = gmbus_xfer_write_chunk(i915, msg->addr, buf, len,
+ gmbus1_index);
+ if (ret)
+ return ret;
+
+ buf += len;
+ tx_size -= len;
+ } while (tx_size != 0);
+
+ return 0;
+}
+
+/*
+ * The gmbus controller can combine a 1 or 2 byte write with another read/write
+ * that immediately follows it by using an "INDEX" cycle.
+ */
+static bool
+gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num)
+{
+ return (i + 1 < num &&
+ msgs[i].addr == msgs[i + 1].addr &&
+ !(msgs[i].flags & I2C_M_RD) &&
+ (msgs[i].len == 1 || msgs[i].len == 2) &&
+ msgs[i + 1].len > 0);
+}
+
+static int
+gmbus_index_xfer(struct drm_i915_private *i915, struct i2c_msg *msgs,
+ u32 gmbus0_reg)
+{
+ u32 gmbus1_index = 0;
+ u32 gmbus5 = 0;
+ int ret;
+
+ if (msgs[0].len == 2)
+ gmbus5 = GMBUS_2BYTE_INDEX_EN |
+ msgs[0].buf[1] | (msgs[0].buf[0] << 8);
+ if (msgs[0].len == 1)
+ gmbus1_index = GMBUS_CYCLE_INDEX |
+ (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
+
+ /* GMBUS5 holds 16-bit index */
+ if (gmbus5)
+ intel_de_write_fw(i915, GMBUS5(i915), gmbus5);
+
+ if (msgs[1].flags & I2C_M_RD)
+ ret = gmbus_xfer_read(i915, &msgs[1], gmbus0_reg,
+ gmbus1_index);
+ else
+ ret = gmbus_xfer_write(i915, &msgs[1], gmbus1_index);
+
+ /* Clear GMBUS5 after each index transfer */
+ if (gmbus5)
+ intel_de_write_fw(i915, GMBUS5(i915), 0);
+
+ return ret;
+}
+
+static int
+do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
+ u32 gmbus0_source)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+ struct drm_i915_private *i915 = bus->i915;
+ int i = 0, inc, try = 0;
+ int ret = 0;
+
+ /* Display WA #0868: skl,bxt,kbl,cfl,glk */
+ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ bxt_gmbus_clock_gating(i915, false);
+ else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ pch_gmbus_clock_gating(i915, false);
+
+retry:
+ intel_de_write_fw(i915, GMBUS0(i915), gmbus0_source | bus->reg0);
+
+ for (; i < num; i += inc) {
+ inc = 1;
+ if (gmbus_is_index_xfer(msgs, i, num)) {
+ ret = gmbus_index_xfer(i915, &msgs[i],
+ gmbus0_source | bus->reg0);
+ inc = 2; /* an index transmission is two msgs */
+ } else if (msgs[i].flags & I2C_M_RD) {
+ ret = gmbus_xfer_read(i915, &msgs[i],
+ gmbus0_source | bus->reg0, 0);
+ } else {
+ ret = gmbus_xfer_write(i915, &msgs[i], 0);
+ }
+
+ if (!ret)
+ ret = gmbus_wait(i915,
+ GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN);
+ if (ret == -ETIMEDOUT)
+ goto timeout;
+ else if (ret)
+ goto clear_err;
+ }
+
+ /* Generate a STOP condition on the bus. Note that gmbus can't generata
+ * a STOP on the very first cycle. To simplify the code we
+ * unconditionally generate the STOP condition with an additional gmbus
+ * cycle. */
+ intel_de_write_fw(i915, GMBUS1(i915), GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+
+ /* Mark the GMBUS interface as disabled after waiting for idle.
+ * We will re-enable it at the start of the next xfer,
+ * till then let it sleep.
+ */
+ if (gmbus_wait_idle(i915)) {
+ drm_dbg_kms(&i915->drm,
+ "GMBUS [%s] timed out waiting for idle\n",
+ adapter->name);
+ ret = -ETIMEDOUT;
+ }
+ intel_de_write_fw(i915, GMBUS0(i915), 0);
+ ret = ret ?: i;
+ goto out;
+
+clear_err:
+ /*
+ * Wait for bus to IDLE before clearing NAK.
+ * If we clear the NAK while bus is still active, then it will stay
+ * active and the next transaction may fail.
+ *
+ * If no ACK is received during the address phase of a transaction, the
+ * adapter must report -ENXIO. It is not clear what to return if no ACK
+ * is received at other times. But we have to be careful to not return
+ * spurious -ENXIO because that will prevent i2c and drm edid functions
+ * from retrying. So return -ENXIO only when gmbus properly quiescents -
+ * timing out seems to happen when there _is_ a ddc chip present, but
+ * it's slow responding and only answers on the 2nd retry.
+ */
+ ret = -ENXIO;
+ if (gmbus_wait_idle(i915)) {
+ drm_dbg_kms(&i915->drm,
+ "GMBUS [%s] timed out after NAK\n",
+ adapter->name);
+ ret = -ETIMEDOUT;
+ }
+
+ /* Toggle the Software Clear Interrupt bit. This has the effect
+ * of resetting the GMBUS controller and so clearing the
+ * BUS_ERROR raised by the slave's NAK.
+ */
+ intel_de_write_fw(i915, GMBUS1(i915), GMBUS_SW_CLR_INT);
+ intel_de_write_fw(i915, GMBUS1(i915), 0);
+ intel_de_write_fw(i915, GMBUS0(i915), 0);
+
+ drm_dbg_kms(&i915->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ adapter->name, msgs[i].addr,
+ (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+
+ /*
+ * Passive adapters sometimes NAK the first probe. Retry the first
+ * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
+ * has retries internally. See also the retry loop in
+ * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
+ */
+ if (ret == -ENXIO && i == 0 && try++ == 0) {
+ drm_dbg_kms(&i915->drm,
+ "GMBUS [%s] NAK on first message, retry\n",
+ adapter->name);
+ goto retry;
+ }
+
+ goto out;
+
+timeout:
+ drm_dbg_kms(&i915->drm,
+ "GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+ bus->adapter.name, bus->reg0 & 0xff);
+ intel_de_write_fw(i915, GMBUS0(i915), 0);
+
+ /*
+ * Hardware may not support GMBUS over these pins? Try GPIO bitbanging
+ * instead. Use EAGAIN to have i2c core retry.
+ */
+ ret = -EAGAIN;
+
+out:
+ /* Display WA #0868: skl,bxt,kbl,cfl,glk */
+ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ bxt_gmbus_clock_gating(i915, true);
+ else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ pch_gmbus_clock_gating(i915, true);
+
+ return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+ struct drm_i915_private *i915 = bus->i915;
+ intel_wakeref_t wakeref;
+ int ret;
+
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS);
+
+ if (bus->force_bit) {
+ ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+ if (ret < 0)
+ bus->force_bit &= ~GMBUS_FORCE_BIT_RETRY;
+ } else {
+ ret = do_gmbus_xfer(adapter, msgs, num, 0);
+ if (ret == -EAGAIN)
+ bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
+ }
+
+ intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref);
+
+ return ret;
+}
+
+int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+ struct drm_i915_private *i915 = bus->i915;
+ u8 cmd = DRM_HDCP_DDC_AKSV;
+ u8 buf[DRM_HDCP_KSV_LEN] = { 0 };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DRM_HDCP_DDC_ADDR,
+ .flags = 0,
+ .len = sizeof(cmd),
+ .buf = &cmd,
+ },
+ {
+ .addr = DRM_HDCP_DDC_ADDR,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+ intel_wakeref_t wakeref;
+ int ret;
+
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS);
+ mutex_lock(&i915->display.gmbus.mutex);
+
+ /*
+ * In order to output Aksv to the receiver, use an indexed write to
+ * pass the i2c command, and tell GMBUS to use the HW-provided value
+ * instead of sourcing GMBUS3 for the data.
+ */
+ ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT);
+
+ mutex_unlock(&i915->display.gmbus.mutex);
+ intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref);
+
+ return ret;
+}
+
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+ return i2c_bit_algo.functionality(adapter) &
+ (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ /* I2C_FUNC_10BIT_ADDR | */
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+ .master_xfer = gmbus_xfer,
+ .functionality = gmbus_func
+};
+
+static void gmbus_lock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+ struct drm_i915_private *i915 = bus->i915;
+
+ mutex_lock(&i915->display.gmbus.mutex);
+}
+
+static int gmbus_trylock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+ struct drm_i915_private *i915 = bus->i915;
+
+ return mutex_trylock(&i915->display.gmbus.mutex);
+}
+
+static void gmbus_unlock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+ struct drm_i915_private *i915 = bus->i915;
+
+ mutex_unlock(&i915->display.gmbus.mutex);
+}
+
+static const struct i2c_lock_operations gmbus_lock_ops = {
+ .lock_bus = gmbus_lock_bus,
+ .trylock_bus = gmbus_trylock_bus,
+ .unlock_bus = gmbus_unlock_bus,
+};
+
+/**
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
+ * @i915: i915 device private
+ */
+int intel_gmbus_setup(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ unsigned int pin;
+ int ret;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ i915->display.gmbus.mmio_base = VLV_DISPLAY_BASE;
+ else if (!HAS_GMCH(i915))
+ /*
+ * Broxton uses the same PCH offsets for South Display Engine,
+ * even though it doesn't have a PCH.
+ */
+ i915->display.gmbus.mmio_base = PCH_DISPLAY_BASE;
+
+ mutex_init(&i915->display.gmbus.mutex);
+ init_waitqueue_head(&i915->display.gmbus.wait_queue);
+
+ for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) {
+ const struct gmbus_pin *gmbus_pin;
+ struct intel_gmbus *bus;
+
+ gmbus_pin = get_gmbus_pin(i915, pin);
+ if (!gmbus_pin)
+ continue;
+
+ bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+ if (!bus) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ bus->adapter.owner = THIS_MODULE;
+ bus->adapter.class = I2C_CLASS_DDC;
+ snprintf(bus->adapter.name,
+ sizeof(bus->adapter.name),
+ "i915 gmbus %s", gmbus_pin->name);
+
+ bus->adapter.dev.parent = &pdev->dev;
+ bus->i915 = i915;
+
+ bus->adapter.algo = &gmbus_algorithm;
+ bus->adapter.lock_ops = &gmbus_lock_ops;
+
+ /*
+ * We wish to retry with bit banging
+ * after a timed out GMBUS attempt.
+ */
+ bus->adapter.retries = 1;
+
+ /* By default use a conservative clock rate */
+ bus->reg0 = pin | GMBUS_RATE_100KHZ;
+
+ /* gmbus seems to be broken on i830 */
+ if (IS_I830(i915))
+ bus->force_bit = 1;
+
+ intel_gpio_setup(bus, GPIO(i915, gmbus_pin->gpio));
+
+ ret = i2c_add_adapter(&bus->adapter);
+ if (ret) {
+ kfree(bus);
+ goto err;
+ }
+
+ i915->display.gmbus.bus[pin] = bus;
+ }
+
+ intel_gmbus_reset(i915);
+
+ return 0;
+
+err:
+ intel_gmbus_teardown(i915);
+
+ return ret;
+}
+
+struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *i915,
+ unsigned int pin)
+{
+ if (drm_WARN_ON(&i915->drm, pin >= ARRAY_SIZE(i915->display.gmbus.bus) ||
+ !i915->display.gmbus.bus[pin]))
+ return NULL;
+
+ return &i915->display.gmbus.bus[pin]->adapter;
+}
+
+void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+ struct drm_i915_private *i915 = bus->i915;
+
+ mutex_lock(&i915->display.gmbus.mutex);
+
+ bus->force_bit += force_bit ? 1 : -1;
+ drm_dbg_kms(&i915->drm,
+ "%sabling bit-banging on %s. force bit now %d\n",
+ force_bit ? "en" : "dis", adapter->name,
+ bus->force_bit);
+
+ mutex_unlock(&i915->display.gmbus.mutex);
+}
+
+bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ return bus->force_bit;
+}
+
+void intel_gmbus_teardown(struct drm_i915_private *i915)
+{
+ unsigned int pin;
+
+ for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) {
+ struct intel_gmbus *bus;
+
+ bus = i915->display.gmbus.bus[pin];
+ if (!bus)
+ continue;
+
+ i2c_del_adapter(&bus->adapter);
+
+ kfree(bus);
+ i915->display.gmbus.bus[pin] = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h
new file mode 100644
index 000000000..20f704bd4
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_GMBUS_H__
+#define __INTEL_GMBUS_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct i2c_adapter;
+
+#define GMBUS_PIN_DISABLED 0
+#define GMBUS_PIN_SSC 1
+#define GMBUS_PIN_VGADDC 2
+#define GMBUS_PIN_PANEL 3
+#define GMBUS_PIN_DPD_CHV 3 /* HDMID_CHV */
+#define GMBUS_PIN_DPC 4 /* HDMIC */
+#define GMBUS_PIN_DPB 5 /* SDVO, HDMIB */
+#define GMBUS_PIN_DPD 6 /* HDMID */
+#define GMBUS_PIN_RESERVED 7 /* 7 reserved */
+#define GMBUS_PIN_1_BXT 1 /* BXT+ (atom) and CNP+ (big core) */
+#define GMBUS_PIN_2_BXT 2
+#define GMBUS_PIN_3_BXT 3
+#define GMBUS_PIN_4_CNP 4
+#define GMBUS_PIN_5_MTP 5
+#define GMBUS_PIN_9_TC1_ICP 9
+#define GMBUS_PIN_10_TC2_ICP 10
+#define GMBUS_PIN_11_TC3_ICP 11
+#define GMBUS_PIN_12_TC4_ICP 12
+#define GMBUS_PIN_13_TC5_TGP 13
+#define GMBUS_PIN_14_TC6_TGP 14
+
+#define GMBUS_NUM_PINS 15 /* including 0 */
+
+int intel_gmbus_setup(struct drm_i915_private *dev_priv);
+void intel_gmbus_teardown(struct drm_i915_private *dev_priv);
+bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
+ unsigned int pin);
+int intel_gmbus_output_aksv(struct i2c_adapter *adapter);
+
+struct i2c_adapter *
+intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
+void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter);
+void intel_gmbus_reset(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_GMBUS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
new file mode 100644
index 000000000..53aacbda9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_GMBUS_REGS_H__
+#define __INTEL_GMBUS_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define GMBUS_MMIO_BASE(__i915) ((__i915)->display.gmbus.mmio_base)
+
+#define GPIO(__i915, gpio) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5010 + 4 * (gpio))
+#define GPIO_CLOCK_DIR_MASK (1 << 0)
+#define GPIO_CLOCK_DIR_IN (0 << 1)
+#define GPIO_CLOCK_DIR_OUT (1 << 1)
+#define GPIO_CLOCK_VAL_MASK (1 << 2)
+#define GPIO_CLOCK_VAL_OUT (1 << 3)
+#define GPIO_CLOCK_VAL_IN (1 << 4)
+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
+#define GPIO_DATA_DIR_MASK (1 << 8)
+#define GPIO_DATA_DIR_IN (0 << 9)
+#define GPIO_DATA_DIR_OUT (1 << 9)
+#define GPIO_DATA_VAL_MASK (1 << 10)
+#define GPIO_DATA_VAL_OUT (1 << 11)
+#define GPIO_DATA_VAL_IN (1 << 12)
+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+
+/* clock/port select */
+#define GMBUS0(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5100)
+#define GMBUS_AKSV_SELECT (1 << 11)
+#define GMBUS_RATE_100KHZ (0 << 8)
+#define GMBUS_RATE_50KHZ (1 << 8)
+#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
+
+/* command/status */
+#define GMBUS1(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5104)
+#define GMBUS_SW_CLR_INT (1 << 31)
+#define GMBUS_SW_RDY (1 << 30)
+#define GMBUS_ENT (1 << 29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0 << 25)
+#define GMBUS_CYCLE_WAIT (1 << 25)
+#define GMBUS_CYCLE_INDEX (2 << 25)
+#define GMBUS_CYCLE_STOP (4 << 25)
+#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_BYTE_COUNT_MAX 256U
+#define GEN9_GMBUS_BYTE_COUNT_MAX 511U
+#define GMBUS_SLAVE_INDEX_SHIFT 8
+#define GMBUS_SLAVE_ADDR_SHIFT 1
+#define GMBUS_SLAVE_READ (1 << 0)
+#define GMBUS_SLAVE_WRITE (0 << 0)
+
+/* status */
+#define GMBUS2(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5108)
+#define GMBUS_INUSE (1 << 15)
+#define GMBUS_HW_WAIT_PHASE (1 << 14)
+#define GMBUS_STALL_TIMEOUT (1 << 13)
+#define GMBUS_INT (1 << 12)
+#define GMBUS_HW_RDY (1 << 11)
+#define GMBUS_SATOER (1 << 10)
+#define GMBUS_ACTIVE (1 << 9)
+
+/* data buffer bytes 3-0 */
+#define GMBUS3(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x510c)
+
+/* interrupt mask (Pineview+) */
+#define GMBUS4(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5110)
+#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
+#define GMBUS_NAK_EN (1 << 3)
+#define GMBUS_IDLE_EN (1 << 2)
+#define GMBUS_HW_WAIT_EN (1 << 1)
+#define GMBUS_HW_RDY_EN (1 << 0)
+
+/* byte index */
+#define GMBUS5(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5120)
+#define GMBUS_2BYTE_INDEX_EN (1 << 31)
+
+#endif /* __INTEL_GMBUS_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
new file mode 100644
index 000000000..6406fd487
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -0,0 +1,2586 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2017 Google, Inc.
+ * Copyright _ 2017-2019, Intel Corporation.
+ *
+ * Authors:
+ * Sean Paul <seanpaul@chromium.org>
+ * Ramalingam C <ramalingam.c@intel.com>
+ */
+
+#include <linux/component.h>
+#include <linux/i2c.h>
+#include <linux/random.h>
+
+#include <drm/display/drm_hdcp_helper.h>
+#include <drm/i915_component.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_connector.h"
+#include "intel_de.h"
+#include "intel_display_power.h"
+#include "intel_display_power_well.h"
+#include "intel_display_types.h"
+#include "intel_hdcp.h"
+#include "intel_hdcp_regs.h"
+#include "intel_pcode.h"
+
+#define KEY_LOAD_TRIES 5
+#define HDCP2_LC_RETRY_CNT 3
+
+static int intel_conn_to_vcpi(struct intel_connector *connector)
+{
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_atomic_payload *payload;
+ struct drm_dp_mst_topology_state *mst_state;
+ int vcpi = 0;
+
+ /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
+ if (!connector->port)
+ return 0;
+ mgr = connector->port->mgr;
+
+ drm_modeset_lock(&mgr->base.lock, NULL);
+ mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
+ payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
+ if (drm_WARN_ON(mgr->dev, !payload))
+ goto out;
+
+ vcpi = payload->vcpi;
+ if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
+ vcpi = 0;
+ goto out;
+ }
+out:
+ drm_modeset_unlock(&mgr->base.lock);
+ return vcpi;
+}
+
+/*
+ * intel_hdcp_required_content_stream selects the most highest common possible HDCP
+ * content_type for all streams in DP MST topology because security f/w doesn't
+ * have any provision to mark content_type for each stream separately, it marks
+ * all available streams with the content_type proivided at the time of port
+ * authentication. This may prohibit the userspace to use type1 content on
+ * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
+ * DP MST topology. Though it is not compulsory, security fw should change its
+ * policy to mark different content_types for different streams.
+ */
+static int
+intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct intel_digital_port *conn_dig_port;
+ struct intel_connector *connector;
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ bool enforce_type0 = false;
+ int k;
+
+ data->k = 0;
+
+ if (dig_port->hdcp_auth_status)
+ return 0;
+
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ if (connector->base.status == connector_status_disconnected)
+ continue;
+
+ if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
+ continue;
+
+ conn_dig_port = intel_attached_dig_port(connector);
+ if (conn_dig_port != dig_port)
+ continue;
+
+ if (!enforce_type0 && !dig_port->hdcp_mst_type1_capable)
+ enforce_type0 = true;
+
+ data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
+ data->k++;
+
+ /* if there is only one active stream */
+ if (dig_port->dp.active_mst_links <= 1)
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
+ return -EINVAL;
+
+ /*
+ * Apply common protection level across all streams in DP MST Topology.
+ * Use highest supported content type for all streams in DP MST Topology.
+ */
+ for (k = 0; k < data->k; k++)
+ data->streams[k].stream_type =
+ enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
+
+ return 0;
+}
+
+static int intel_hdcp_prepare_streams(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
+ data->k = 1;
+ data->streams[0].stream_type = hdcp->content_type;
+ } else {
+ ret = intel_hdcp_required_content_stream(dig_port);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static
+bool intel_hdcp_is_ksv_valid(u8 *ksv)
+{
+ int i, ones = 0;
+ /* KSV has 20 1's and 20 0's */
+ for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
+ ones += hweight8(ksv[i]);
+ if (ones != 20)
+ return false;
+
+ return true;
+}
+
+static
+int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
+ const struct intel_hdcp_shim *shim, u8 *bksv)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ int ret, i, tries = 2;
+
+ /* HDCP spec states that we must retry the bksv if it is invalid */
+ for (i = 0; i < tries; i++) {
+ ret = shim->read_bksv(dig_port, bksv);
+ if (ret)
+ return ret;
+ if (intel_hdcp_is_ksv_valid(bksv))
+ break;
+ }
+ if (i == tries) {
+ drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/* Is HDCP1.4 capable on Platform and Sink */
+bool intel_hdcp_capable(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ const struct intel_hdcp_shim *shim = connector->hdcp.shim;
+ bool capable = false;
+ u8 bksv[5];
+
+ if (!shim)
+ return capable;
+
+ if (shim->hdcp_capable) {
+ shim->hdcp_capable(dig_port, &capable);
+ } else {
+ if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
+ capable = true;
+ }
+
+ return capable;
+}
+
+/* Is HDCP2.2 capable on Platform and Sink */
+bool intel_hdcp2_capable(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ bool capable = false;
+
+ /* I915 support for HDCP2.2 */
+ if (!hdcp->hdcp2_supported)
+ return false;
+
+ /* MEI interface is solid */
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ if (!dev_priv->display.hdcp.comp_added || !dev_priv->display.hdcp.master) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+ return false;
+ }
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+
+ /* Sink's capability for HDCP2.2 */
+ hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
+
+ return capable;
+}
+
+static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
+{
+ return intel_de_read(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ HDCP_STATUS_ENC;
+}
+
+static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
+{
+ return intel_de_read(dev_priv,
+ HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS;
+}
+
+static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
+ const struct intel_hdcp_shim *shim)
+{
+ int ret, read_ret;
+ bool ksv_ready;
+
+ /* Poll for ksv list ready (spec says max time allowed is 5s) */
+ ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
+ &ksv_ready),
+ read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
+ 100 * 1000);
+ if (ret)
+ return ret;
+ if (read_ret)
+ return read_ret;
+ if (!ksv_ready)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
+{
+ enum i915_power_well_id id;
+ intel_wakeref_t wakeref;
+ bool enabled = false;
+
+ /*
+ * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
+ * On all BXT+, SW can load the keys only when the PW#1 is turned on.
+ */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ id = HSW_DISP_PW_GLOBAL;
+ else
+ id = SKL_DISP_PW_1;
+
+ /* PG1 (power well #1) needs to be enabled */
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+ enabled = intel_display_power_well_is_enabled(dev_priv, id);
+
+ /*
+ * Another req for hdcp key loadability is enabled state of pll for
+ * cdclk. Without active crtc we wont land here. So we are assuming that
+ * cdclk is already on.
+ */
+
+ return enabled;
+}
+
+static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
+{
+ intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
+ intel_de_write(dev_priv, HDCP_KEY_STATUS,
+ HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
+}
+
+static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ u32 val;
+
+ val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
+ if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
+ return 0;
+
+ /*
+ * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
+ * out of reset. So if Key is not already loaded, its an error state.
+ */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
+ return -ENXIO;
+
+ /*
+ * Initiate loading the HDCP key from fuses.
+ *
+ * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
+ * version 9 platforms (minus BXT) differ in the key load trigger
+ * process from other platforms. These platforms use the GT Driver
+ * Mailbox interface.
+ */
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
+ ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "Failed to initiate HDCP key load (%d)\n",
+ ret);
+ return ret;
+ }
+ } else {
+ intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
+ }
+
+ /* Wait for the keys to load (500us) */
+ ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
+ HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
+ 10, 1, &val);
+ if (ret)
+ return ret;
+ else if (!(val & HDCP_KEY_LOAD_STATUS))
+ return -ENXIO;
+
+ /* Send Aksv over to PCH display for use in authentication */
+ intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
+
+ return 0;
+}
+
+/* Returns updated SHA-1 index */
+static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
+{
+ intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
+ if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
+ drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static
+u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
+{
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ switch (cpu_transcoder) {
+ case TRANSCODER_A:
+ return HDCP_TRANSA_REP_PRESENT |
+ HDCP_TRANSA_SHA1_M0;
+ case TRANSCODER_B:
+ return HDCP_TRANSB_REP_PRESENT |
+ HDCP_TRANSB_SHA1_M0;
+ case TRANSCODER_C:
+ return HDCP_TRANSC_REP_PRESENT |
+ HDCP_TRANSC_SHA1_M0;
+ case TRANSCODER_D:
+ return HDCP_TRANSD_REP_PRESENT |
+ HDCP_TRANSD_SHA1_M0;
+ default:
+ drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
+ cpu_transcoder);
+ return -EINVAL;
+ }
+ }
+
+ switch (port) {
+ case PORT_A:
+ return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
+ case PORT_B:
+ return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
+ case PORT_C:
+ return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
+ case PORT_D:
+ return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
+ case PORT_E:
+ return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
+ default:
+ drm_err(&dev_priv->drm, "Unknown port %d\n", port);
+ return -EINVAL;
+ }
+}
+
+static
+int intel_hdcp_validate_v_prime(struct intel_connector *connector,
+ const struct intel_hdcp_shim *shim,
+ u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
+ enum port port = dig_port->base.port;
+ u32 vprime, sha_text, sha_leftovers, rep_ctl;
+ int ret, i, j, sha_idx;
+
+ /* Process V' values from the receiver */
+ for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
+ ret = shim->read_v_prime_part(dig_port, i, &vprime);
+ if (ret)
+ return ret;
+ intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
+ }
+
+ /*
+ * We need to write the concatenation of all device KSVs, BINFO (DP) ||
+ * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
+ * stream is written via the HDCP_SHA_TEXT register in 32-bit
+ * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
+ * index will keep track of our progress through the 64 bytes as well as
+ * helping us work the 40-bit KSVs through our 32-bit register.
+ *
+ * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
+ */
+ sha_idx = 0;
+ sha_text = 0;
+ sha_leftovers = 0;
+ rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
+ intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ for (i = 0; i < num_downstream; i++) {
+ unsigned int sha_empty;
+ u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
+
+ /* Fill up the empty slots in sha_text and write it out */
+ sha_empty = sizeof(sha_text) - sha_leftovers;
+ for (j = 0; j < sha_empty; j++) {
+ u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
+ sha_text |= ksv[j] << off;
+ }
+
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+
+ /* Programming guide writes this every 64 bytes */
+ sha_idx += sizeof(sha_text);
+ if (!(sha_idx % 64))
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_32);
+
+ /* Store the leftover bytes from the ksv in sha_text */
+ sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
+ sha_text = 0;
+ for (j = 0; j < sha_leftovers; j++)
+ sha_text |= ksv[sha_empty + j] <<
+ ((sizeof(sha_text) - j - 1) * 8);
+
+ /*
+ * If we still have room in sha_text for more data, continue.
+ * Otherwise, write it out immediately.
+ */
+ if (sizeof(sha_text) > sha_leftovers)
+ continue;
+
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+ sha_leftovers = 0;
+ sha_text = 0;
+ sha_idx += sizeof(sha_text);
+ }
+
+ /*
+ * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
+ * bytes are leftover from the last ksv, we might be able to fit them
+ * all in sha_text (first 2 cases), or we might need to split them up
+ * into 2 writes (last 2 cases).
+ */
+ if (sha_leftovers == 0) {
+ /* Write 16 bits of text, 16 bits of M0 */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_16);
+ ret = intel_write_sha_text(dev_priv,
+ bstatus[0] << 8 | bstatus[1]);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 32 bits of M0 */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 16 bits of M0 */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_16);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ } else if (sha_leftovers == 1) {
+ /* Write 24 bits of text, 8 bits of M0 */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_24);
+ sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
+ /* Only 24-bits of data, must be in the LSB */
+ sha_text = (sha_text & 0xffffff00) >> 8;
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 32 bits of M0 */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 24 bits of M0 */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_8);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ } else if (sha_leftovers == 2) {
+ /* Write 32 bits of text */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_32);
+ sha_text |= bstatus[0] << 8 | bstatus[1];
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 64 bits of M0 */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
+ for (i = 0; i < 2; i++) {
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+ }
+
+ /*
+ * Terminate the SHA-1 stream by hand. For the other leftover
+ * cases this is appended by the hardware.
+ */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_32);
+ sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+ } else if (sha_leftovers == 3) {
+ /* Write 32 bits of text (filled from LSB) */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_32);
+ sha_text |= bstatus[0];
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_8);
+ ret = intel_write_sha_text(dev_priv, bstatus[1]);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 32 bits of M0 */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+
+ /* Write 8 bits of M0 */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_24);
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+ } else {
+ drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
+ sha_leftovers);
+ return -EINVAL;
+ }
+
+ intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
+ while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
+ ret = intel_write_sha_text(dev_priv, 0);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
+ }
+
+ /*
+ * Last write gets the length of the concatenation in bits. That is:
+ * - 5 bytes per device
+ * - 10 bytes for BINFO/BSTATUS(2), M0(8)
+ */
+ sha_text = (num_downstream * 5 + 10) * 8;
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+
+ /* Tell the HW we're done with the hash and wait for it to ACK */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_COMPLETE_HASH);
+ if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
+ HDCP_SHA1_COMPLETE, 1)) {
+ drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
+ return -ETIMEDOUT;
+ }
+ if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
+ drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+/* Implements Part 2 of the HDCP authorization procedure */
+static
+int intel_hdcp_auth_downstream(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ const struct intel_hdcp_shim *shim = connector->hdcp.shim;
+ u8 bstatus[2], num_downstream, *ksv_fifo;
+ int ret, i, tries = 3;
+
+ ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "KSV list failed to become ready (%d)\n", ret);
+ return ret;
+ }
+
+ ret = shim->read_bstatus(dig_port, bstatus);
+ if (ret)
+ return ret;
+
+ if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
+ DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
+ drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
+ return -EPERM;
+ }
+
+ /*
+ * When repeater reports 0 device count, HDCP1.4 spec allows disabling
+ * the HDCP encryption. That implies that repeater can't have its own
+ * display. As there is no consumption of encrypted content in the
+ * repeater with 0 downstream devices, we are failing the
+ * authentication.
+ */
+ num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
+ if (num_downstream == 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Repeater with zero downstream devices\n");
+ return -EINVAL;
+ }
+
+ ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
+ if (!ksv_fifo) {
+ drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
+ return -ENOMEM;
+ }
+
+ ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
+ if (ret)
+ goto err;
+
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
+ num_downstream) > 0) {
+ drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
+ ret = -EPERM;
+ goto err;
+ }
+
+ /*
+ * When V prime mismatches, DP Spec mandates re-read of
+ * V prime atleast twice.
+ */
+ for (i = 0; i < tries; i++) {
+ ret = intel_hdcp_validate_v_prime(connector, shim,
+ ksv_fifo, num_downstream,
+ bstatus);
+ if (!ret)
+ break;
+ }
+
+ if (i == tries) {
+ drm_dbg_kms(&dev_priv->drm,
+ "V Prime validation failed.(%d)\n", ret);
+ goto err;
+ }
+
+ drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
+ num_downstream);
+ ret = 0;
+err:
+ kfree(ksv_fifo);
+ return ret;
+}
+
+/* Implements Part 1 of the HDCP authorization procedure */
+static int intel_hdcp_auth(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
+ enum port port = dig_port->base.port;
+ unsigned long r0_prime_gen_start;
+ int ret, i, tries = 2;
+ union {
+ u32 reg[2];
+ u8 shim[DRM_HDCP_AN_LEN];
+ } an;
+ union {
+ u32 reg[2];
+ u8 shim[DRM_HDCP_KSV_LEN];
+ } bksv;
+ union {
+ u32 reg;
+ u8 shim[DRM_HDCP_RI_LEN];
+ } ri;
+ bool repeater_present, hdcp_capable;
+
+ /*
+ * Detects whether the display is HDCP capable. Although we check for
+ * valid Bksv below, the HDCP over DP spec requires that we check
+ * whether the display supports HDCP before we write An. For HDMI
+ * displays, this is not necessary.
+ */
+ if (shim->hdcp_capable) {
+ ret = shim->hdcp_capable(dig_port, &hdcp_capable);
+ if (ret)
+ return ret;
+ if (!hdcp_capable) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel is not HDCP capable\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Initialize An with 2 random values and acquire it */
+ for (i = 0; i < 2; i++)
+ intel_de_write(dev_priv,
+ HDCP_ANINIT(dev_priv, cpu_transcoder, port),
+ get_random_u32());
+ intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_CAPTURE_AN);
+
+ /* Wait for An to be acquired */
+ if (intel_de_wait_for_set(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port),
+ HDCP_STATUS_AN_READY, 1)) {
+ drm_err(&dev_priv->drm, "Timed out waiting for An\n");
+ return -ETIMEDOUT;
+ }
+
+ an.reg[0] = intel_de_read(dev_priv,
+ HDCP_ANLO(dev_priv, cpu_transcoder, port));
+ an.reg[1] = intel_de_read(dev_priv,
+ HDCP_ANHI(dev_priv, cpu_transcoder, port));
+ ret = shim->write_an_aksv(dig_port, an.shim);
+ if (ret)
+ return ret;
+
+ r0_prime_gen_start = jiffies;
+
+ memset(&bksv, 0, sizeof(bksv));
+
+ ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
+ if (ret < 0)
+ return ret;
+
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
+ drm_err(&dev_priv->drm, "BKSV is revoked\n");
+ return -EPERM;
+ }
+
+ intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
+ bksv.reg[0]);
+ intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
+ bksv.reg[1]);
+
+ ret = shim->repeater_present(dig_port, &repeater_present);
+ if (ret)
+ return ret;
+ if (repeater_present)
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
+
+ ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
+ if (ret)
+ return ret;
+
+ intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_AUTH_AND_ENC);
+
+ /* Wait for R0 ready */
+ if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
+ drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Wait for R0' to become available. The spec says 100ms from Aksv, but
+ * some monitors can take longer than this. We'll set the timeout at
+ * 300ms just to be sure.
+ *
+ * On DP, there's an R0_READY bit available but no such bit
+ * exists on HDMI. Since the upper-bound is the same, we'll just do
+ * the stupid thing instead of polling on one and not the other.
+ */
+ wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
+
+ tries = 3;
+
+ /*
+ * DP HDCP Spec mandates the two more reattempt to read R0, incase
+ * of R0 mismatch.
+ */
+ for (i = 0; i < tries; i++) {
+ ri.reg = 0;
+ ret = shim->read_ri_prime(dig_port, ri.shim);
+ if (ret)
+ return ret;
+ intel_de_write(dev_priv,
+ HDCP_RPRIME(dev_priv, cpu_transcoder, port),
+ ri.reg);
+
+ /* Wait for Ri prime match */
+ if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
+ break;
+ }
+
+ if (i == tries) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Timed out waiting for Ri prime match (%x)\n",
+ intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
+ cpu_transcoder, port)));
+ return -ETIMEDOUT;
+ }
+
+ /* Wait for encryption confirmation */
+ if (intel_de_wait_for_set(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port),
+ HDCP_STATUS_ENC,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
+ return -ETIMEDOUT;
+ }
+
+ /* DP MST Auth Part 1 Step 2.a and Step 2.b */
+ if (shim->stream_encryption) {
+ ret = shim->stream_encryption(connector, true);
+ if (ret) {
+ drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
+ transcoder_name(hdcp->stream_transcoder));
+ }
+
+ if (repeater_present)
+ return intel_hdcp_auth_downstream(connector);
+
+ drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
+ return 0;
+}
+
+static int _intel_hdcp_disable(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = dig_port->base.port;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
+ u32 repeater_ctl;
+ int ret;
+
+ drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
+ connector->base.name, connector->base.base.id);
+
+ if (hdcp->shim->stream_encryption) {
+ ret = hdcp->shim->stream_encryption(connector, false);
+ if (ret) {
+ drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
+ transcoder_name(hdcp->stream_transcoder));
+ /*
+ * If there are other connectors on this port using HDCP,
+ * don't disable it until it disabled HDCP encryption for
+ * all connectors in MST topology.
+ */
+ if (dig_port->num_hdcp_streams > 0)
+ return 0;
+ }
+
+ hdcp->hdcp_encrypted = false;
+ intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
+ if (intel_de_wait_for_clear(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port),
+ ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ drm_err(&dev_priv->drm,
+ "Failed to disable HDCP, timeout clearing status\n");
+ return -ETIMEDOUT;
+ }
+
+ repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
+ port);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
+
+ ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
+ if (ret) {
+ drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
+ return ret;
+ }
+
+ drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
+ return 0;
+}
+
+static int _intel_hdcp_enable(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int i, ret, tries = 3;
+
+ drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
+ connector->base.name, connector->base.base.id);
+
+ if (!hdcp_key_loadable(dev_priv)) {
+ drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
+ return -ENXIO;
+ }
+
+ for (i = 0; i < KEY_LOAD_TRIES; i++) {
+ ret = intel_hdcp_load_keys(dev_priv);
+ if (!ret)
+ break;
+ intel_hdcp_clear_keys(dev_priv);
+ }
+ if (ret) {
+ drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Incase of authentication failures, HDCP spec expects reauth. */
+ for (i = 0; i < tries; i++) {
+ ret = intel_hdcp_auth(connector);
+ if (!ret) {
+ hdcp->hdcp_encrypted = true;
+ return 0;
+ }
+
+ drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
+
+ /* Ensuring HDCP encryption and signalling are stopped. */
+ _intel_hdcp_disable(connector);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP authentication failed (%d tries/%d)\n", tries, ret);
+ return ret;
+}
+
+static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
+{
+ return container_of(hdcp, struct intel_connector, hdcp);
+}
+
+static void intel_hdcp_update_value(struct intel_connector *connector,
+ u64 value, bool update_property)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+
+ drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
+
+ if (hdcp->value == value)
+ return;
+
+ drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
+
+ if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
+ dig_port->num_hdcp_streams--;
+ } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ dig_port->num_hdcp_streams++;
+ }
+
+ hdcp->value = value;
+ if (update_property) {
+ drm_connector_get(&connector->base);
+ schedule_work(&hdcp->prop_work);
+ }
+}
+
+/* Implements Part 3 of the HDCP authorization procedure */
+static int intel_hdcp_check_link(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = dig_port->base.port;
+ enum transcoder cpu_transcoder;
+ int ret = 0;
+
+ mutex_lock(&hdcp->mutex);
+ mutex_lock(&dig_port->hdcp_mutex);
+
+ cpu_transcoder = hdcp->cpu_transcoder;
+
+ /* Check_link valid only when HDCP1.4 is enabled */
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
+ !hdcp->hdcp_encrypted) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
+ drm_err(&dev_priv->drm,
+ "%s:%d HDCP link stopped encryption,%x\n",
+ connector->base.name, connector->base.base.id,
+ intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
+ ret = -ENXIO;
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED,
+ true);
+ goto out;
+ }
+
+ if (hdcp->shim->check_link(dig_port, connector)) {
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
+ }
+ goto out;
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] HDCP link failed, retrying authentication\n",
+ connector->base.name, connector->base.base.id);
+
+ ret = _intel_hdcp_disable(connector);
+ if (ret) {
+ drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED,
+ true);
+ goto out;
+ }
+
+ ret = _intel_hdcp_enable(connector);
+ if (ret) {
+ drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED,
+ true);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&dig_port->hdcp_mutex);
+ mutex_unlock(&hdcp->mutex);
+ return ret;
+}
+
+static void intel_hdcp_prop_work(struct work_struct *work)
+{
+ struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
+ prop_work);
+ struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
+ mutex_lock(&hdcp->mutex);
+
+ /*
+ * This worker is only used to flip between ENABLED/DESIRED. Either of
+ * those to UNDESIRED is handled by core. If value == UNDESIRED,
+ * we're running just after hdcp has been disabled, so just exit
+ */
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ drm_hdcp_update_content_protection(&connector->base,
+ hdcp->value);
+
+ mutex_unlock(&hdcp->mutex);
+ drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
+
+ drm_connector_put(&connector->base);
+}
+
+bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
+{
+ return RUNTIME_INFO(dev_priv)->has_hdcp &&
+ (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
+}
+
+static int
+hdcp2_prepare_ake_init(struct intel_connector *connector,
+ struct hdcp2_ake_init *ake_data)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
+ ret);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
+ struct hdcp2_ake_send_cert *rx_cert,
+ bool *paired,
+ struct hdcp2_ake_no_stored_km *ek_pub_km,
+ size_t *msg_sz)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
+ rx_cert, paired,
+ ek_pub_km, msg_sz);
+ if (ret < 0)
+ drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
+ ret);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_verify_hprime(struct intel_connector *connector,
+ struct hdcp2_ake_send_hprime *rx_hprime)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
+ if (ret < 0)
+ drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_store_pairing_info(struct intel_connector *connector,
+ struct hdcp2_ake_send_pairing_info *pairing_info)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
+ if (ret < 0)
+ drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
+ ret);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_prepare_lc_init(struct intel_connector *connector,
+ struct hdcp2_lc_init *lc_init)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
+ if (ret < 0)
+ drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
+ ret);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_lprime(struct intel_connector *connector,
+ struct hdcp2_lc_send_lprime *rx_lprime)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
+ if (ret < 0)
+ drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
+ ret);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_prepare_skey(struct intel_connector *connector,
+ struct hdcp2_ske_send_eks *ske_data)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
+ if (ret < 0)
+ drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
+ ret);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
+ struct hdcp2_rep_send_receiverid_list
+ *rep_topology,
+ struct hdcp2_rep_send_ack *rep_send_ack)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
+ rep_topology,
+ rep_send_ack);
+ if (ret < 0)
+ drm_dbg_kms(&dev_priv->drm,
+ "Verify rep topology failed. %d\n", ret);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_mprime(struct intel_connector *connector,
+ struct hdcp2_rep_stream_ready *stream_ready)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
+ if (ret < 0)
+ drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_authenticate_port(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
+ if (ret < 0)
+ drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
+ ret);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_close_mei_session(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->close_hdcp_session(comp->mei_dev,
+ &dig_port->hdcp_port_data);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_deauthenticate_port(struct intel_connector *connector)
+{
+ return hdcp2_close_mei_session(connector);
+}
+
+/* Authentication flow starts from here */
+static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_ake_init ake_init;
+ struct hdcp2_ake_send_cert send_cert;
+ struct hdcp2_ake_no_stored_km no_stored_km;
+ struct hdcp2_ake_send_hprime send_hprime;
+ struct hdcp2_ake_send_pairing_info pairing_info;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ size_t size;
+ int ret;
+
+ /* Init for seq_num */
+ hdcp->seq_num_v = 0;
+ hdcp->seq_num_m = 0;
+
+ ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
+ if (ret < 0)
+ return ret;
+
+ ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
+ sizeof(msgs.ake_init));
+ if (ret < 0)
+ return ret;
+
+ ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
+ &msgs.send_cert, sizeof(msgs.send_cert));
+ if (ret < 0)
+ return ret;
+
+ if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
+ drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
+ return -EINVAL;
+ }
+
+ hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
+
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
+ msgs.send_cert.cert_rx.receiver_id,
+ 1) > 0) {
+ drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
+ return -EPERM;
+ }
+
+ /*
+ * Here msgs.no_stored_km will hold msgs corresponding to the km
+ * stored also.
+ */
+ ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
+ &hdcp->is_paired,
+ &msgs.no_stored_km, &size);
+ if (ret < 0)
+ return ret;
+
+ ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
+ if (ret < 0)
+ return ret;
+
+ ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
+ &msgs.send_hprime, sizeof(msgs.send_hprime));
+ if (ret < 0)
+ return ret;
+
+ ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
+ if (ret < 0)
+ return ret;
+
+ if (!hdcp->is_paired) {
+ /* Pairing is required */
+ ret = shim->read_2_2_msg(dig_port,
+ HDCP_2_2_AKE_SEND_PAIRING_INFO,
+ &msgs.pairing_info,
+ sizeof(msgs.pairing_info));
+ if (ret < 0)
+ return ret;
+
+ ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
+ if (ret < 0)
+ return ret;
+ hdcp->is_paired = true;
+ }
+
+ return 0;
+}
+
+static int hdcp2_locality_check(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_lc_init lc_init;
+ struct hdcp2_lc_send_lprime send_lprime;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ int tries = HDCP2_LC_RETRY_CNT, ret, i;
+
+ for (i = 0; i < tries; i++) {
+ ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
+ if (ret < 0)
+ continue;
+
+ ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
+ sizeof(msgs.lc_init));
+ if (ret < 0)
+ continue;
+
+ ret = shim->read_2_2_msg(dig_port,
+ HDCP_2_2_LC_SEND_LPRIME,
+ &msgs.send_lprime,
+ sizeof(msgs.send_lprime));
+ if (ret < 0)
+ continue;
+
+ ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
+ if (!ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int hdcp2_session_key_exchange(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ struct hdcp2_ske_send_eks send_eks;
+ int ret;
+
+ ret = hdcp2_prepare_skey(connector, &send_eks);
+ if (ret < 0)
+ return ret;
+
+ ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
+ sizeof(send_eks));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static
+int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_rep_stream_manage stream_manage;
+ struct hdcp2_rep_stream_ready stream_ready;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ int ret, streams_size_delta, i;
+
+ if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
+ return -ERANGE;
+
+ /* Prepare RepeaterAuth_Stream_Manage msg */
+ msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
+ drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
+
+ msgs.stream_manage.k = cpu_to_be16(data->k);
+
+ for (i = 0; i < data->k; i++) {
+ msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
+ msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
+ }
+
+ streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
+ sizeof(struct hdcp2_streamid_type);
+ /* Send it to Repeater */
+ ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
+ sizeof(msgs.stream_manage) - streams_size_delta);
+ if (ret < 0)
+ goto out;
+
+ ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
+ &msgs.stream_ready, sizeof(msgs.stream_ready));
+ if (ret < 0)
+ goto out;
+
+ data->seq_num_m = hdcp->seq_num_m;
+
+ ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
+
+out:
+ hdcp->seq_num_m++;
+
+ return ret;
+}
+
+static
+int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_rep_send_receiverid_list recvid_list;
+ struct hdcp2_rep_send_ack rep_ack;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ u32 seq_num_v, device_cnt;
+ u8 *rx_info;
+ int ret;
+
+ ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
+ &msgs.recvid_list, sizeof(msgs.recvid_list));
+ if (ret < 0)
+ return ret;
+
+ rx_info = msgs.recvid_list.rx_info;
+
+ if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
+ HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
+ drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
+ return -EINVAL;
+ }
+
+ /*
+ * MST topology is not Type 1 capable if it contains a downstream
+ * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
+ */
+ dig_port->hdcp_mst_type1_capable =
+ !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
+ !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
+
+ /* Converting and Storing the seq_num_v to local variable as DWORD */
+ seq_num_v =
+ drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
+
+ if (!hdcp->hdcp2_encrypted && seq_num_v) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Non zero Seq_num_v at first RecvId_List msg\n");
+ return -EINVAL;
+ }
+
+ if (seq_num_v < hdcp->seq_num_v) {
+ /* Roll over of the seq_num_v from repeater. Reauthenticate. */
+ drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
+ return -EINVAL;
+ }
+
+ device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
+ HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
+ msgs.recvid_list.receiver_ids,
+ device_cnt) > 0) {
+ drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
+ return -EPERM;
+ }
+
+ ret = hdcp2_verify_rep_topology_prepare_ack(connector,
+ &msgs.recvid_list,
+ &msgs.rep_ack);
+ if (ret < 0)
+ return ret;
+
+ hdcp->seq_num_v = seq_num_v;
+ ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
+ sizeof(msgs.rep_ack));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int hdcp2_authenticate_sink(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ int ret;
+
+ ret = hdcp2_authentication_key_exchange(connector);
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
+ return ret;
+ }
+
+ ret = hdcp2_locality_check(connector);
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm,
+ "Locality Check failed. Err : %d\n", ret);
+ return ret;
+ }
+
+ ret = hdcp2_session_key_exchange(connector);
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
+ return ret;
+ }
+
+ if (shim->config_stream_type) {
+ ret = shim->config_stream_type(dig_port,
+ hdcp->is_repeater,
+ hdcp->content_type);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (hdcp->is_repeater) {
+ ret = hdcp2_authenticate_repeater_topology(connector);
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm,
+ "Repeater Auth Failed. Err: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
+ enum port port = dig_port->base.port;
+ int ret = 0;
+
+ if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS)) {
+ drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
+ connector->base.name, connector->base.base.id);
+ ret = -EPERM;
+ goto link_recover;
+ }
+
+ if (hdcp->shim->stream_2_2_encryption) {
+ ret = hdcp->shim->stream_2_2_encryption(connector, true);
+ if (ret) {
+ drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
+ transcoder_name(hdcp->stream_transcoder));
+ }
+
+ return 0;
+
+link_recover:
+ if (hdcp2_deauthenticate_port(connector) < 0)
+ drm_dbg_kms(&dev_priv->drm, "Port deauth failed.\n");
+
+ dig_port->hdcp_auth_status = false;
+ data->k = 0;
+
+ return ret;
+}
+
+static int hdcp2_enable_encryption(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = dig_port->base.port;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
+ int ret;
+
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS);
+ if (hdcp->shim->toggle_signalling) {
+ ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
+ true);
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "Failed to enable HDCP signalling. %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_AUTH_STATUS) {
+ /* Link is Authenticated. Now set for Encryption */
+ intel_de_write(dev_priv,
+ HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
+ }
+
+ ret = intel_de_wait_for_set(dev_priv,
+ HDCP2_STATUS(dev_priv, cpu_transcoder,
+ port),
+ LINK_ENCRYPTION_STATUS,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ dig_port->hdcp_auth_status = true;
+
+ return ret;
+}
+
+static int hdcp2_disable_encryption(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = dig_port->base.port;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
+ int ret;
+
+ drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS));
+
+ intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
+
+ ret = intel_de_wait_for_clear(dev_priv,
+ HDCP2_STATUS(dev_priv, cpu_transcoder,
+ port),
+ LINK_ENCRYPTION_STATUS,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ if (ret == -ETIMEDOUT)
+ drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
+
+ if (hdcp->shim->toggle_signalling) {
+ ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
+ false);
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "Failed to disable HDCP signalling. %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int
+hdcp2_propagate_stream_management_info(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ int i, tries = 3, ret;
+
+ if (!connector->hdcp.is_repeater)
+ return 0;
+
+ for (i = 0; i < tries; i++) {
+ ret = _hdcp2_propagate_stream_management_info(connector);
+ if (!ret)
+ break;
+
+ /* Lets restart the auth incase of seq_num_m roll over */
+ if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
+ drm_dbg_kms(&i915->drm,
+ "seq_num_m roll over.(%d)\n", ret);
+ break;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "HDCP2 stream management %d of %d Failed.(%d)\n",
+ i + 1, tries, ret);
+ }
+
+ return ret;
+}
+
+static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ int ret = 0, i, tries = 3;
+
+ for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
+ ret = hdcp2_authenticate_sink(connector);
+ if (!ret) {
+ ret = intel_hdcp_prepare_streams(connector);
+ if (ret) {
+ drm_dbg_kms(&i915->drm,
+ "Prepare streams failed.(%d)\n",
+ ret);
+ break;
+ }
+
+ ret = hdcp2_propagate_stream_management_info(connector);
+ if (ret) {
+ drm_dbg_kms(&i915->drm,
+ "Stream management failed.(%d)\n",
+ ret);
+ break;
+ }
+
+ ret = hdcp2_authenticate_port(connector);
+ if (!ret)
+ break;
+ drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
+ ret);
+ }
+
+ /* Clearing the mei hdcp session */
+ drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
+ i + 1, tries, ret);
+ if (hdcp2_deauthenticate_port(connector) < 0)
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
+ }
+
+ if (!ret && !dig_port->hdcp_auth_status) {
+ /*
+ * Ensuring the required 200mSec min time interval between
+ * Session Key Exchange and encryption.
+ */
+ msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
+ ret = hdcp2_enable_encryption(connector);
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm,
+ "Encryption Enable Failed.(%d)\n", ret);
+ if (hdcp2_deauthenticate_port(connector) < 0)
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
+ }
+ }
+
+ if (!ret)
+ ret = hdcp2_enable_stream_encryption(connector);
+
+ return ret;
+}
+
+static int _intel_hdcp2_enable(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
+
+ ret = hdcp2_authenticate_and_encrypt(connector);
+ if (ret) {
+ drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
+ hdcp->content_type, ret);
+ return ret;
+ }
+
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
+
+ hdcp->hdcp2_encrypted = true;
+ return 0;
+}
+
+static int
+_intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
+ connector->base.name, connector->base.base.id);
+
+ if (hdcp->shim->stream_2_2_encryption) {
+ ret = hdcp->shim->stream_2_2_encryption(connector, false);
+ if (ret) {
+ drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
+ connector->base.name, connector->base.base.id);
+ return ret;
+ }
+ drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
+ transcoder_name(hdcp->stream_transcoder));
+
+ if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
+ return 0;
+ }
+
+ ret = hdcp2_disable_encryption(connector);
+
+ if (hdcp2_deauthenticate_port(connector) < 0)
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
+
+ connector->hdcp.hdcp2_encrypted = false;
+ dig_port->hdcp_auth_status = false;
+ data->k = 0;
+
+ return ret;
+}
+
+/* Implements the Link Integrity Check for HDCP2.2 */
+static int intel_hdcp2_check_link(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = dig_port->base.port;
+ enum transcoder cpu_transcoder;
+ int ret = 0;
+
+ mutex_lock(&hdcp->mutex);
+ mutex_lock(&dig_port->hdcp_mutex);
+ cpu_transcoder = hdcp->cpu_transcoder;
+
+ /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
+ !hdcp->hdcp2_encrypted) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
+ drm_err(&dev_priv->drm,
+ "HDCP2.2 link stopped the encryption, %x\n",
+ intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
+ ret = -ENXIO;
+ _intel_hdcp2_disable(connector, true);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED,
+ true);
+ goto out;
+ }
+
+ ret = hdcp->shim->check_2_2_link(dig_port, connector);
+ if (ret == HDCP_LINK_PROTECTED) {
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED,
+ true);
+ }
+ goto out;
+ }
+
+ if (ret == HDCP_TOPOLOGY_CHANGE) {
+ if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ goto out;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP2.2 Downstream topology change\n");
+ ret = hdcp2_authenticate_repeater_topology(connector);
+ if (!ret) {
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED,
+ true);
+ goto out;
+ }
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] Repeater topology auth failed.(%d)\n",
+ connector->base.name, connector->base.base.id,
+ ret);
+ } else {
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] HDCP2.2 link failed, retrying auth\n",
+ connector->base.name, connector->base.base.id);
+ }
+
+ ret = _intel_hdcp2_disable(connector, true);
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
+ connector->base.name, connector->base.base.id, ret);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
+ goto out;
+ }
+
+ ret = _intel_hdcp2_enable(connector);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
+ connector->base.name, connector->base.base.id,
+ ret);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED,
+ true);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&dig_port->hdcp_mutex);
+ mutex_unlock(&hdcp->mutex);
+ return ret;
+}
+
+static void intel_hdcp_check_work(struct work_struct *work)
+{
+ struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
+ struct intel_hdcp,
+ check_work);
+ struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+
+ if (drm_connector_is_unregistered(&connector->base))
+ return;
+
+ if (!intel_hdcp2_check_link(connector))
+ schedule_delayed_work(&hdcp->check_work,
+ DRM_HDCP2_CHECK_PERIOD_MS);
+ else if (!intel_hdcp_check_link(connector))
+ schedule_delayed_work(&hdcp->check_work,
+ DRM_HDCP_CHECK_PERIOD_MS);
+}
+
+static int i915_hdcp_component_bind(struct device *i915_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+
+ drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ dev_priv->display.hdcp.master = (struct i915_hdcp_comp_master *)data;
+ dev_priv->display.hdcp.master->mei_dev = mei_kdev;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+
+ return 0;
+}
+
+static void i915_hdcp_component_unbind(struct device *i915_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+
+ drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ dev_priv->display.hdcp.master = NULL;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+}
+
+static const struct component_ops i915_hdcp_component_ops = {
+ .bind = i915_hdcp_component_bind,
+ .unbind = i915_hdcp_component_unbind,
+};
+
+static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return MEI_DDI_A;
+ case PORT_B ... PORT_F:
+ return (enum mei_fw_ddi)port;
+ default:
+ return MEI_DDI_INVALID_PORT;
+ }
+}
+
+static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
+{
+ switch (cpu_transcoder) {
+ case TRANSCODER_A ... TRANSCODER_D:
+ return (enum mei_fw_tc)(cpu_transcoder | 0x10);
+ default: /* eDP, DSI TRANSCODERS are non HDCP capable */
+ return MEI_INVALID_TRANSCODER;
+ }
+}
+
+static int initialize_hdcp_port_data(struct intel_connector *connector,
+ struct intel_digital_port *dig_port,
+ const struct intel_hdcp_shim *shim)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = dig_port->base.port;
+
+ if (DISPLAY_VER(dev_priv) < 12)
+ data->fw_ddi = intel_get_mei_fw_ddi_index(port);
+ else
+ /*
+ * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
+ * with zero(INVALID PORT index).
+ */
+ data->fw_ddi = MEI_DDI_INVALID_PORT;
+
+ /*
+ * As associated transcoder is set and modified at modeset, here fw_tc
+ * is initialized to zero (invalid transcoder index). This will be
+ * retained for <Gen12 forever.
+ */
+ data->fw_tc = MEI_INVALID_TRANSCODER;
+
+ data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
+ data->protocol = (u8)shim->protocol;
+
+ if (!data->streams)
+ data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
+ sizeof(struct hdcp2_streamid_type),
+ GFP_KERNEL);
+ if (!data->streams) {
+ drm_err(&dev_priv->drm, "Out of Memory\n");
+ return -ENOMEM;
+ }
+ /* For SST */
+ data->streams[0].stream_id = 0;
+ data->streams[0].stream_type = hdcp->content_type;
+
+ return 0;
+}
+
+static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
+{
+ if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
+ return false;
+
+ return (DISPLAY_VER(dev_priv) >= 10 ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) ||
+ IS_COMETLAKE(dev_priv));
+}
+
+void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ if (!is_hdcp2_supported(dev_priv))
+ return;
+
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->display.hdcp.comp_added);
+
+ dev_priv->display.hdcp.comp_added = true;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+ ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
+ I915_COMPONENT_HDCP);
+ if (ret < 0) {
+ drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
+ ret);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ dev_priv->display.hdcp.comp_added = false;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+ return;
+ }
+}
+
+static void intel_hdcp2_init(struct intel_connector *connector,
+ struct intel_digital_port *dig_port,
+ const struct intel_hdcp_shim *shim)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ ret = initialize_hdcp_port_data(connector, dig_port, shim);
+ if (ret) {
+ drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
+ return;
+ }
+
+ hdcp->hdcp2_supported = true;
+}
+
+int intel_hdcp_init(struct intel_connector *connector,
+ struct intel_digital_port *dig_port,
+ const struct intel_hdcp_shim *shim)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ if (!shim)
+ return -EINVAL;
+
+ if (is_hdcp2_supported(dev_priv))
+ intel_hdcp2_init(connector, dig_port, shim);
+
+ ret =
+ drm_connector_attach_content_protection_property(&connector->base,
+ hdcp->hdcp2_supported);
+ if (ret) {
+ hdcp->hdcp2_supported = false;
+ kfree(dig_port->hdcp_port_data.streams);
+ return ret;
+ }
+
+ hdcp->shim = shim;
+ mutex_init(&hdcp->mutex);
+ INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
+ INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
+ init_waitqueue_head(&hdcp->cp_irq_queue);
+
+ return 0;
+}
+
+int intel_hdcp_enable(struct intel_connector *connector,
+ const struct intel_crtc_state *pipe_config, u8 content_type)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
+ int ret = -EINVAL;
+
+ if (!hdcp->shim)
+ return -ENOENT;
+
+ if (!connector->encoder) {
+ drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
+ connector->base.name, connector->base.base.id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&hdcp->mutex);
+ mutex_lock(&dig_port->hdcp_mutex);
+ drm_WARN_ON(&dev_priv->drm,
+ hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ hdcp->content_type = content_type;
+
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
+ hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
+ hdcp->stream_transcoder = pipe_config->cpu_transcoder;
+ } else {
+ hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
+ hdcp->stream_transcoder = INVALID_TRANSCODER;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
+
+ /*
+ * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
+ * is capable of HDCP2.2, it is preferred to use HDCP2.2.
+ */
+ if (intel_hdcp2_capable(connector)) {
+ ret = _intel_hdcp2_enable(connector);
+ if (!ret)
+ check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
+ }
+
+ /*
+ * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
+ * be attempted.
+ */
+ if (ret && intel_hdcp_capable(connector) &&
+ hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
+ ret = _intel_hdcp_enable(connector);
+ }
+
+ if (!ret) {
+ schedule_delayed_work(&hdcp->check_work, check_link_interval);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED,
+ true);
+ }
+
+ mutex_unlock(&dig_port->hdcp_mutex);
+ mutex_unlock(&hdcp->mutex);
+ return ret;
+}
+
+int intel_hdcp_disable(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret = 0;
+
+ if (!hdcp->shim)
+ return -ENOENT;
+
+ mutex_lock(&hdcp->mutex);
+ mutex_lock(&dig_port->hdcp_mutex);
+
+ if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ goto out;
+
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
+ if (hdcp->hdcp2_encrypted)
+ ret = _intel_hdcp2_disable(connector, false);
+ else if (hdcp->hdcp_encrypted)
+ ret = _intel_hdcp_disable(connector);
+
+out:
+ mutex_unlock(&dig_port->hdcp_mutex);
+ mutex_unlock(&hdcp->mutex);
+ cancel_delayed_work_sync(&hdcp->check_work);
+ return ret;
+}
+
+void intel_hdcp_update_pipe(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ bool content_protection_type_changed, desired_and_not_enabled = false;
+
+ if (!connector->hdcp.shim)
+ return;
+
+ content_protection_type_changed =
+ (conn_state->hdcp_content_type != hdcp->content_type &&
+ conn_state->content_protection !=
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
+
+ /*
+ * During the HDCP encryption session if Type change is requested,
+ * disable the HDCP and reenable it with new TYPE value.
+ */
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
+ content_protection_type_changed)
+ intel_hdcp_disable(connector);
+
+ /*
+ * Mark the hdcp state as DESIRED after the hdcp disable of type
+ * change procedure.
+ */
+ if (content_protection_type_changed) {
+ mutex_lock(&hdcp->mutex);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ drm_connector_get(&connector->base);
+ schedule_work(&hdcp->prop_work);
+ mutex_unlock(&hdcp->mutex);
+ }
+
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ mutex_lock(&hdcp->mutex);
+ /* Avoid enabling hdcp, if it already ENABLED */
+ desired_and_not_enabled =
+ hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ mutex_unlock(&hdcp->mutex);
+ /*
+ * If HDCP already ENABLED and CP property is DESIRED, schedule
+ * prop_work to update correct CP property to user space.
+ */
+ if (!desired_and_not_enabled && !content_protection_type_changed) {
+ drm_connector_get(&connector->base);
+ schedule_work(&hdcp->prop_work);
+ }
+ }
+
+ if (desired_and_not_enabled || content_protection_type_changed)
+ intel_hdcp_enable(connector,
+ crtc_state,
+ (u8)conn_state->hdcp_content_type);
+}
+
+void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ if (!dev_priv->display.hdcp.comp_added) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+ return;
+ }
+
+ dev_priv->display.hdcp.comp_added = false;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
+
+ component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
+}
+
+void intel_hdcp_cleanup(struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+
+ if (!hdcp->shim)
+ return;
+
+ /*
+ * If the connector is registered, it's possible userspace could kick
+ * off another HDCP enable, which would re-spawn the workers.
+ */
+ drm_WARN_ON(connector->base.dev,
+ connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
+
+ /*
+ * Now that the connector is not registered, check_work won't be run,
+ * but cancel any outstanding instances of it
+ */
+ cancel_delayed_work_sync(&hdcp->check_work);
+
+ /*
+ * We don't cancel prop_work in the same way as check_work since it
+ * requires connection_mutex which could be held while calling this
+ * function. Instead, we rely on the connector references grabbed before
+ * scheduling prop_work to ensure the connector is alive when prop_work
+ * is run. So if we're in the destroy path (which is where this
+ * function should be called), we're "guaranteed" that prop_work is not
+ * active (tl;dr This Should Never Happen).
+ */
+ drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
+
+ mutex_lock(&hdcp->mutex);
+ hdcp->shim = NULL;
+ mutex_unlock(&hdcp->mutex);
+}
+
+void intel_hdcp_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state)
+{
+ u64 old_cp = old_state->content_protection;
+ u64 new_cp = new_state->content_protection;
+ struct drm_crtc_state *crtc_state;
+
+ if (!new_state->crtc) {
+ /*
+ * If the connector is being disabled with CP enabled, mark it
+ * desired so it's re-enabled when the connector is brought back
+ */
+ if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ new_state->content_protection =
+ DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ return;
+ }
+
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
+ /*
+ * Fix the HDCP uapi content protection state in case of modeset.
+ * FIXME: As per HDCP content protection property uapi doc, an uevent()
+ * need to be sent if there is transition from ENABLED->DESIRED.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state) &&
+ (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
+ new_state->content_protection =
+ DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
+ /*
+ * Nothing to do if the state didn't change, or HDCP was activated since
+ * the last commit. And also no change in hdcp content type.
+ */
+ if (old_cp == new_cp ||
+ (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+ new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
+ if (old_state->hdcp_content_type ==
+ new_state->hdcp_content_type)
+ return;
+ }
+
+ crtc_state->mode_changed = true;
+}
+
+/* Handles the CP_IRQ raised from the DP HDCP sink */
+void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+
+ if (!hdcp->shim)
+ return;
+
+ atomic_inc(&connector->hdcp.cp_irq_count);
+ wake_up_all(&connector->hdcp.cp_irq_queue);
+
+ schedule_delayed_work(&hdcp->check_work, 0);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
new file mode 100644
index 000000000..8f53b0c7f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_HDCP_H__
+#define __INTEL_HDCP_H__
+
+#include <linux/types.h>
+
+#define HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
+
+struct drm_connector;
+struct drm_connector_state;
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_connector;
+struct intel_crtc_state;
+struct intel_encoder;
+struct intel_hdcp_shim;
+struct intel_digital_port;
+enum port;
+enum transcoder;
+
+void intel_hdcp_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state);
+int intel_hdcp_init(struct intel_connector *connector,
+ struct intel_digital_port *dig_port,
+ const struct intel_hdcp_shim *hdcp_shim);
+int intel_hdcp_enable(struct intel_connector *connector,
+ const struct intel_crtc_state *pipe_config, u8 content_type);
+int intel_hdcp_disable(struct intel_connector *connector);
+void intel_hdcp_update_pipe(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
+bool intel_hdcp_capable(struct intel_connector *connector);
+bool intel_hdcp2_capable(struct intel_connector *connector);
+void intel_hdcp_component_init(struct drm_i915_private *dev_priv);
+void intel_hdcp_component_fini(struct drm_i915_private *dev_priv);
+void intel_hdcp_cleanup(struct intel_connector *connector);
+void intel_hdcp_handle_cp_irq(struct intel_connector *connector);
+
+#endif /* __INTEL_HDCP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
new file mode 100644
index 000000000..2a3733e89
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_HDCP_REGS_H__
+#define __INTEL_HDCP_REGS_H__
+
+#include "i915_reg_defs.h"
+
+/* HDCP Key Registers */
+#define HDCP_KEY_CONF _MMIO(0x66c00)
+#define HDCP_AKSV_SEND_TRIGGER REG_BIT(31)
+#define HDCP_CLEAR_KEYS_TRIGGER REG_BIT(30)
+#define HDCP_KEY_LOAD_TRIGGER REG_BIT(8)
+#define HDCP_KEY_STATUS _MMIO(0x66c04)
+#define HDCP_FUSE_IN_PROGRESS REG_BIT(7)
+#define HDCP_FUSE_ERROR REG_BIT(6)
+#define HDCP_FUSE_DONE REG_BIT(5)
+#define HDCP_KEY_LOAD_STATUS REG_BIT(1)
+#define HDCP_KEY_LOAD_DONE REG_BIT(0)
+#define HDCP_AKSV_LO _MMIO(0x66c10)
+#define HDCP_AKSV_HI _MMIO(0x66c14)
+
+/* HDCP Repeater Registers */
+#define HDCP_REP_CTL _MMIO(0x66d00)
+#define HDCP_TRANSA_REP_PRESENT REG_BIT(31)
+#define HDCP_TRANSB_REP_PRESENT REG_BIT(30)
+#define HDCP_TRANSC_REP_PRESENT REG_BIT(29)
+#define HDCP_TRANSD_REP_PRESENT REG_BIT(28)
+#define HDCP_DDIB_REP_PRESENT REG_BIT(30)
+#define HDCP_DDIA_REP_PRESENT REG_BIT(29)
+#define HDCP_DDIC_REP_PRESENT REG_BIT(28)
+#define HDCP_DDID_REP_PRESENT REG_BIT(27)
+#define HDCP_DDIF_REP_PRESENT REG_BIT(26)
+#define HDCP_DDIE_REP_PRESENT REG_BIT(25)
+#define HDCP_TRANSA_SHA1_M0 (1 << 20)
+#define HDCP_TRANSB_SHA1_M0 (2 << 20)
+#define HDCP_TRANSC_SHA1_M0 (3 << 20)
+#define HDCP_TRANSD_SHA1_M0 (4 << 20)
+#define HDCP_DDIB_SHA1_M0 (1 << 20)
+#define HDCP_DDIA_SHA1_M0 (2 << 20)
+#define HDCP_DDIC_SHA1_M0 (3 << 20)
+#define HDCP_DDID_SHA1_M0 (4 << 20)
+#define HDCP_DDIF_SHA1_M0 (5 << 20)
+#define HDCP_DDIE_SHA1_M0 (6 << 20) /* Bspec says 5? */
+#define HDCP_SHA1_BUSY REG_BIT(16)
+#define HDCP_SHA1_READY REG_BIT(17)
+#define HDCP_SHA1_COMPLETE REG_BIT(18)
+#define HDCP_SHA1_V_MATCH REG_BIT(19)
+#define HDCP_SHA1_TEXT_32 (1 << 1)
+#define HDCP_SHA1_COMPLETE_HASH (2 << 1)
+#define HDCP_SHA1_TEXT_24 (4 << 1)
+#define HDCP_SHA1_TEXT_16 (5 << 1)
+#define HDCP_SHA1_TEXT_8 (6 << 1)
+#define HDCP_SHA1_TEXT_0 (7 << 1)
+#define HDCP_SHA_V_PRIME_H0 _MMIO(0x66d04)
+#define HDCP_SHA_V_PRIME_H1 _MMIO(0x66d08)
+#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C)
+#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10)
+#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14)
+#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4))
+#define HDCP_SHA_TEXT _MMIO(0x66d18)
+
+/* HDCP Auth Registers */
+#define _PORTA_HDCP_AUTHENC 0x66800
+#define _PORTB_HDCP_AUTHENC 0x66500
+#define _PORTC_HDCP_AUTHENC 0x66600
+#define _PORTD_HDCP_AUTHENC 0x66700
+#define _PORTE_HDCP_AUTHENC 0x66A00
+#define _PORTF_HDCP_AUTHENC 0x66900
+#define _PORT_HDCP_AUTHENC(port, x) _MMIO(_PICK(port, \
+ _PORTA_HDCP_AUTHENC, \
+ _PORTB_HDCP_AUTHENC, \
+ _PORTC_HDCP_AUTHENC, \
+ _PORTD_HDCP_AUTHENC, \
+ _PORTE_HDCP_AUTHENC, \
+ _PORTF_HDCP_AUTHENC) + (x))
+#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
+#define _TRANSA_HDCP_CONF 0x66400
+#define _TRANSB_HDCP_CONF 0x66500
+#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
+ _TRANSB_HDCP_CONF)
+#define HDCP_CONF(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_CONF(trans) : \
+ PORT_HDCP_CONF(port))
+
+#define HDCP_CONF_CAPTURE_AN REG_BIT(0)
+#define HDCP_CONF_AUTH_AND_ENC (REG_BIT(1) | REG_BIT(0))
+#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4)
+#define _TRANSA_HDCP_ANINIT 0x66404
+#define _TRANSB_HDCP_ANINIT 0x66504
+#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_ANINIT, \
+ _TRANSB_HDCP_ANINIT)
+#define HDCP_ANINIT(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANINIT(trans) : \
+ PORT_HDCP_ANINIT(port))
+
+#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8)
+#define _TRANSA_HDCP_ANLO 0x66408
+#define _TRANSB_HDCP_ANLO 0x66508
+#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
+ _TRANSB_HDCP_ANLO)
+#define HDCP_ANLO(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANLO(trans) : \
+ PORT_HDCP_ANLO(port))
+
+#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC)
+#define _TRANSA_HDCP_ANHI 0x6640C
+#define _TRANSB_HDCP_ANHI 0x6650C
+#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
+ _TRANSB_HDCP_ANHI)
+#define HDCP_ANHI(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANHI(trans) : \
+ PORT_HDCP_ANHI(port))
+
+#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10)
+#define _TRANSA_HDCP_BKSVLO 0x66410
+#define _TRANSB_HDCP_BKSVLO 0x66510
+#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVLO, \
+ _TRANSB_HDCP_BKSVLO)
+#define HDCP_BKSVLO(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVLO(trans) : \
+ PORT_HDCP_BKSVLO(port))
+
+#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14)
+#define _TRANSA_HDCP_BKSVHI 0x66414
+#define _TRANSB_HDCP_BKSVHI 0x66514
+#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVHI, \
+ _TRANSB_HDCP_BKSVHI)
+#define HDCP_BKSVHI(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVHI(trans) : \
+ PORT_HDCP_BKSVHI(port))
+
+#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18)
+#define _TRANSA_HDCP_RPRIME 0x66418
+#define _TRANSB_HDCP_RPRIME 0x66518
+#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_RPRIME, \
+ _TRANSB_HDCP_RPRIME)
+#define HDCP_RPRIME(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_RPRIME(trans) : \
+ PORT_HDCP_RPRIME(port))
+
+#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C)
+#define _TRANSA_HDCP_STATUS 0x6641C
+#define _TRANSB_HDCP_STATUS 0x6651C
+#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_STATUS, \
+ _TRANSB_HDCP_STATUS)
+#define HDCP_STATUS(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_STATUS(trans) : \
+ PORT_HDCP_STATUS(port))
+
+#define HDCP_STATUS_STREAM_A_ENC REG_BIT(31)
+#define HDCP_STATUS_STREAM_B_ENC REG_BIT(30)
+#define HDCP_STATUS_STREAM_C_ENC REG_BIT(29)
+#define HDCP_STATUS_STREAM_D_ENC REG_BIT(28)
+#define HDCP_STATUS_AUTH REG_BIT(21)
+#define HDCP_STATUS_ENC REG_BIT(20)
+#define HDCP_STATUS_RI_MATCH REG_BIT(19)
+#define HDCP_STATUS_R0_READY REG_BIT(18)
+#define HDCP_STATUS_AN_READY REG_BIT(17)
+#define HDCP_STATUS_CIPHER REG_BIT(16)
+#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff)
+
+/* HDCP2.2 Registers */
+#define _PORTA_HDCP2_BASE 0x66800
+#define _PORTB_HDCP2_BASE 0x66500
+#define _PORTC_HDCP2_BASE 0x66600
+#define _PORTD_HDCP2_BASE 0x66700
+#define _PORTE_HDCP2_BASE 0x66A00
+#define _PORTF_HDCP2_BASE 0x66900
+#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \
+ _PORTA_HDCP2_BASE, \
+ _PORTB_HDCP2_BASE, \
+ _PORTC_HDCP2_BASE, \
+ _PORTD_HDCP2_BASE, \
+ _PORTE_HDCP2_BASE, \
+ _PORTF_HDCP2_BASE) + (x))
+
+#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98)
+#define _TRANSA_HDCP2_AUTH 0x66498
+#define _TRANSB_HDCP2_AUTH 0x66598
+#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \
+ _TRANSB_HDCP2_AUTH)
+#define AUTH_LINK_AUTHENTICATED REG_BIT(31)
+#define AUTH_LINK_TYPE REG_BIT(30)
+#define AUTH_FORCE_CLR_INPUTCTR REG_BIT(19)
+#define AUTH_CLR_KEYS REG_BIT(18)
+#define HDCP2_AUTH(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH(trans) : \
+ PORT_HDCP2_AUTH(port))
+
+#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0)
+#define _TRANSA_HDCP2_CTL 0x664B0
+#define _TRANSB_HDCP2_CTL 0x665B0
+#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \
+ _TRANSB_HDCP2_CTL)
+#define CTL_LINK_ENCRYPTION_REQ REG_BIT(31)
+#define HDCP2_CTL(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_CTL(trans) : \
+ PORT_HDCP2_CTL(port))
+
+#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4)
+#define _TRANSA_HDCP2_STATUS 0x664B4
+#define _TRANSB_HDCP2_STATUS 0x665B4
+#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STATUS, \
+ _TRANSB_HDCP2_STATUS)
+#define LINK_TYPE_STATUS REG_BIT(22)
+#define LINK_AUTH_STATUS REG_BIT(21)
+#define LINK_ENCRYPTION_STATUS REG_BIT(20)
+#define HDCP2_STATUS(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STATUS(trans) : \
+ PORT_HDCP2_STATUS(port))
+
+#define _PIPEA_HDCP2_STREAM_STATUS 0x668C0
+#define _PIPEB_HDCP2_STREAM_STATUS 0x665C0
+#define _PIPEC_HDCP2_STREAM_STATUS 0x666C0
+#define _PIPED_HDCP2_STREAM_STATUS 0x667C0
+#define PIPE_HDCP2_STREAM_STATUS(pipe) _MMIO(_PICK((pipe), \
+ _PIPEA_HDCP2_STREAM_STATUS, \
+ _PIPEB_HDCP2_STREAM_STATUS, \
+ _PIPEC_HDCP2_STREAM_STATUS, \
+ _PIPED_HDCP2_STREAM_STATUS))
+
+#define _TRANSA_HDCP2_STREAM_STATUS 0x664C0
+#define _TRANSB_HDCP2_STREAM_STATUS 0x665C0
+#define TRANS_HDCP2_STREAM_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STREAM_STATUS, \
+ _TRANSB_HDCP2_STREAM_STATUS)
+#define STREAM_ENCRYPTION_STATUS REG_BIT(31)
+#define STREAM_TYPE_STATUS REG_BIT(30)
+#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STREAM_STATUS(trans) : \
+ PIPE_HDCP2_STREAM_STATUS(pipe))
+
+#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
+#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
+#define PORT_HDCP2_AUTH_STREAM(port) _MMIO_PORT(port, \
+ _PORTA_HDCP2_AUTH_STREAM, \
+ _PORTB_HDCP2_AUTH_STREAM)
+#define _TRANSA_HDCP2_AUTH_STREAM 0x66F00
+#define _TRANSB_HDCP2_AUTH_STREAM 0x66F04
+#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_AUTH_STREAM, \
+ _TRANSB_HDCP2_AUTH_STREAM)
+#define AUTH_STREAM_TYPE REG_BIT(31)
+#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH_STREAM(trans) : \
+ PORT_HDCP2_AUTH_STREAM(port))
+
+#endif /* __INTEL_HDCP_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
new file mode 100644
index 000000000..2600019fc
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -0,0 +1,3237 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+
+#include <drm/display/drm_hdcp_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_scdc_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/intel_lpe_audio.h>
+
+#include "i915_debugfs.h"
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_connector.h"
+#include "intel_ddi.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_gmbus.h"
+#include "intel_hdcp.h"
+#include "intel_hdcp_regs.h"
+#include "intel_hdmi.h"
+#include "intel_lspcon.h"
+#include "intel_panel.h"
+#include "intel_snps_phy.h"
+
+static struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi)
+{
+ return to_i915(hdmi_to_dig_port(intel_hdmi)->base.base.dev);
+}
+
+static void
+assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
+{
+ struct drm_i915_private *dev_priv = intel_hdmi_to_i915(intel_hdmi);
+ u32 enabled_bits;
+
+ enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+
+ drm_WARN(&dev_priv->drm,
+ intel_de_read(dev_priv, intel_hdmi->hdmi_reg) & enabled_bits,
+ "HDMI port enabled, expecting disabled\n");
+}
+
+static void
+assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ drm_WARN(&dev_priv->drm,
+ intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) &
+ TRANS_DDI_FUNC_ENABLE,
+ "HDMI transcoder function enabled, expecting disabled\n");
+}
+
+static u32 g4x_infoframe_index(unsigned int type)
+{
+ switch (type) {
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return VIDEO_DIP_SELECT_GAMUT;
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return VIDEO_DIP_SELECT_AVI;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return VIDEO_DIP_SELECT_SPD;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return VIDEO_DIP_SELECT_VENDOR;
+ default:
+ MISSING_CASE(type);
+ return 0;
+ }
+}
+
+static u32 g4x_infoframe_enable(unsigned int type)
+{
+ switch (type) {
+ case HDMI_PACKET_TYPE_GENERAL_CONTROL:
+ return VIDEO_DIP_ENABLE_GCP;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return VIDEO_DIP_ENABLE_GAMUT;
+ case DP_SDP_VSC:
+ return 0;
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return VIDEO_DIP_ENABLE_AVI;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return VIDEO_DIP_ENABLE_SPD;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return VIDEO_DIP_ENABLE_VENDOR;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ return 0;
+ default:
+ MISSING_CASE(type);
+ return 0;
+ }
+}
+
+static u32 hsw_infoframe_enable(unsigned int type)
+{
+ switch (type) {
+ case HDMI_PACKET_TYPE_GENERAL_CONTROL:
+ return VIDEO_DIP_ENABLE_GCP_HSW;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return VIDEO_DIP_ENABLE_GMP_HSW;
+ case DP_SDP_VSC:
+ return VIDEO_DIP_ENABLE_VSC_HSW;
+ case DP_SDP_PPS:
+ return VDIP_ENABLE_PPS;
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return VIDEO_DIP_ENABLE_AVI_HSW;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return VIDEO_DIP_ENABLE_SPD_HSW;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return VIDEO_DIP_ENABLE_VS_HSW;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ return VIDEO_DIP_ENABLE_DRM_GLK;
+ default:
+ MISSING_CASE(type);
+ return 0;
+ }
+}
+
+static i915_reg_t
+hsw_dip_data_reg(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder,
+ unsigned int type,
+ int i)
+{
+ switch (type) {
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return HSW_TVIDEO_DIP_GMP_DATA(cpu_transcoder, i);
+ case DP_SDP_VSC:
+ return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
+ case DP_SDP_PPS:
+ return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i);
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder, i);
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i);
+ case HDMI_INFOFRAME_TYPE_DRM:
+ return GLK_TVIDEO_DIP_DRM_DATA(cpu_transcoder, i);
+ default:
+ MISSING_CASE(type);
+ return INVALID_MMIO_REG;
+ }
+}
+
+static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
+ unsigned int type)
+{
+ switch (type) {
+ case DP_SDP_VSC:
+ return VIDEO_DIP_VSC_DATA_SIZE;
+ case DP_SDP_PPS:
+ return VIDEO_DIP_PPS_DATA_SIZE;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ if (DISPLAY_VER(dev_priv) >= 11)
+ return VIDEO_DIP_GMP_DATA_SIZE;
+ else
+ return VIDEO_DIP_DATA_SIZE;
+ default:
+ return VIDEO_DIP_DATA_SIZE;
+ }
+}
+
+static void g4x_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len)
+{
+ const u32 *data = frame;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
+ int i;
+
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ val &= ~g4x_infoframe_enable(type);
+
+ intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
+
+ for (i = 0; i < len; i += 4) {
+ intel_de_write(dev_priv, VIDEO_DIP_DATA, *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ intel_de_write(dev_priv, VIDEO_DIP_DATA, 0);
+
+ val |= g4x_infoframe_enable(type);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
+ intel_de_posting_read(dev_priv, VIDEO_DIP_CTL);
+}
+
+static void g4x_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 val, *data = frame;
+ int i;
+
+ val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA);
+}
+
+static u32 g4x_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
+
+ if ((val & VIDEO_DIP_ENABLE) == 0)
+ return 0;
+
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
+ return 0;
+
+ return val & (VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
+}
+
+static void ibx_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len)
+{
+ const u32 *data = frame;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
+ u32 val = intel_de_read(dev_priv, reg);
+ int i;
+
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ val &= ~g4x_infoframe_enable(type);
+
+ intel_de_write(dev_priv, reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe),
+ *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0);
+
+ val |= g4x_infoframe_enable(type);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+}
+
+static void ibx_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 val, *data = frame;
+ int i;
+
+ val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
+}
+
+static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
+ i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
+ u32 val = intel_de_read(dev_priv, reg);
+
+ if ((val & VIDEO_DIP_ENABLE) == 0)
+ return 0;
+
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
+ return 0;
+
+ return val & (VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+}
+
+static void cpt_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len)
+{
+ const u32 *data = frame;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
+ u32 val = intel_de_read(dev_priv, reg);
+ int i;
+
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ /* The DIP control register spec says that we need to update the AVI
+ * infoframe without clearing its enable bit */
+ if (type != HDMI_INFOFRAME_TYPE_AVI)
+ val &= ~g4x_infoframe_enable(type);
+
+ intel_de_write(dev_priv, reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe),
+ *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0);
+
+ val |= g4x_infoframe_enable(type);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+}
+
+static void cpt_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 val, *data = frame;
+ int i;
+
+ val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
+}
+
+static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
+ u32 val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(pipe));
+
+ if ((val & VIDEO_DIP_ENABLE) == 0)
+ return 0;
+
+ return val & (VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+}
+
+static void vlv_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len)
+{
+ const u32 *data = frame;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe);
+ u32 val = intel_de_read(dev_priv, reg);
+ int i;
+
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ val &= ~g4x_infoframe_enable(type);
+
+ intel_de_write(dev_priv, reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ intel_de_write(dev_priv,
+ VLV_TVIDEO_DIP_DATA(crtc->pipe), *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ intel_de_write(dev_priv,
+ VLV_TVIDEO_DIP_DATA(crtc->pipe), 0);
+
+ val |= g4x_infoframe_enable(type);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+}
+
+static void vlv_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 val, *data = frame;
+ int i;
+
+ val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe));
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ intel_de_write(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = intel_de_read(dev_priv,
+ VLV_TVIDEO_DIP_DATA(crtc->pipe));
+}
+
+static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
+ u32 val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(pipe));
+
+ if ((val & VIDEO_DIP_ENABLE) == 0)
+ return 0;
+
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
+ return 0;
+
+ return val & (VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+}
+
+void hsw_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len)
+{
+ const u32 *data = frame;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
+ int data_size;
+ int i;
+ u32 val = intel_de_read(dev_priv, ctl_reg);
+
+ data_size = hsw_dip_data_size(dev_priv, type);
+
+ drm_WARN_ON(&dev_priv->drm, len > data_size);
+
+ val &= ~hsw_infoframe_enable(type);
+ intel_de_write(dev_priv, ctl_reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ intel_de_write(dev_priv,
+ hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+ *data);
+ data++;
+ }
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < data_size; i += 4)
+ intel_de_write(dev_priv,
+ hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+ 0);
+
+ /* Wa_14013475917 */
+ if (DISPLAY_VER(dev_priv) == 13 && crtc_state->has_psr &&
+ type == DP_SDP_VSC)
+ return;
+
+ val |= hsw_infoframe_enable(type);
+ intel_de_write(dev_priv, ctl_reg, val);
+ intel_de_posting_read(dev_priv, ctl_reg);
+}
+
+void hsw_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type, void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 *data = frame;
+ int i;
+
+ for (i = 0; i < len; i += 4)
+ *data++ = intel_de_read(dev_priv,
+ hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2));
+}
+
+static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 val = intel_de_read(dev_priv,
+ HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
+ u32 mask;
+
+ mask = (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+ VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+ VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ mask |= VIDEO_DIP_ENABLE_DRM_GLK;
+
+ return val & mask;
+}
+
+static const u8 infoframe_type_to_idx[] = {
+ HDMI_PACKET_TYPE_GENERAL_CONTROL,
+ HDMI_PACKET_TYPE_GAMUT_METADATA,
+ DP_SDP_VSC,
+ HDMI_INFOFRAME_TYPE_AVI,
+ HDMI_INFOFRAME_TYPE_SPD,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ HDMI_INFOFRAME_TYPE_DRM,
+};
+
+u32 intel_hdmi_infoframe_enable(unsigned int type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
+ if (infoframe_type_to_idx[i] == type)
+ return BIT(i);
+ }
+
+ return 0;
+}
+
+u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ u32 val, ret = 0;
+ int i;
+
+ val = dig_port->infoframes_enabled(encoder, crtc_state);
+
+ /* map from hardware bits to dip idx */
+ for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
+ unsigned int type = infoframe_type_to_idx[i];
+
+ if (HAS_DDI(dev_priv)) {
+ if (val & hsw_infoframe_enable(type))
+ ret |= BIT(i);
+ } else {
+ if (val & g4x_infoframe_enable(type))
+ ret |= BIT(i);
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * The data we write to the DIP data buffer registers is 1 byte bigger than the
+ * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
+ * at 0). It's also a byte used by DisplayPort so the same DIP registers can be
+ * used for both technologies.
+ *
+ * DW0: Reserved/ECC/DP | HB2 | HB1 | HB0
+ * DW1: DB3 | DB2 | DB1 | DB0
+ * DW2: DB7 | DB6 | DB5 | DB4
+ * DW3: ...
+ *
+ * (HB is Header Byte, DB is Data Byte)
+ *
+ * The hdmi pack() functions don't know about that hardware specific hole so we
+ * trick them by giving an offset into the buffer and moving back the header
+ * bytes by one.
+ */
+static void intel_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ enum hdmi_infoframe_type type,
+ const union hdmi_infoframe *frame)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ u8 buffer[VIDEO_DIP_DATA_SIZE];
+ ssize_t len;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ if (drm_WARN_ON(encoder->base.dev, frame->any.type != type))
+ return;
+
+ /* see comment above for the reason for this offset */
+ len = hdmi_infoframe_pack_only(frame, buffer + 1, sizeof(buffer) - 1);
+ if (drm_WARN_ON(encoder->base.dev, len < 0))
+ return;
+
+ /* Insert the 'hole' (see big comment above) at position 3 */
+ memmove(&buffer[0], &buffer[1], 3);
+ buffer[3] = 0;
+ len++;
+
+ dig_port->write_infoframe(encoder, crtc_state, type, buffer, len);
+}
+
+void intel_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ enum hdmi_infoframe_type type,
+ union hdmi_infoframe *frame)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ u8 buffer[VIDEO_DIP_DATA_SIZE];
+ int ret;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ dig_port->read_infoframe(encoder, crtc_state,
+ type, buffer, sizeof(buffer));
+
+ /* Fill the 'hole' (see big comment above) at position 3 */
+ memmove(&buffer[1], &buffer[0], 3);
+
+ /* see comment above for the reason for this offset */
+ ret = hdmi_infoframe_unpack(frame, buffer + 1, sizeof(buffer) - 1);
+ if (ret) {
+ drm_dbg_kms(encoder->base.dev,
+ "Failed to unpack infoframe type 0x%02x\n", type);
+ return;
+ }
+
+ if (frame->any.type != type)
+ drm_dbg_kms(encoder->base.dev,
+ "Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+ frame->any.type, type);
+}
+
+static bool
+intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ struct drm_connector *connector = conn_state->connector;
+ int ret;
+
+ if (!crtc_state->has_infoframe)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector,
+ adjusted_mode);
+ if (ret)
+ return false;
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ frame->colorspace = HDMI_COLORSPACE_YUV420;
+ else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ frame->colorspace = HDMI_COLORSPACE_YUV444;
+ else
+ frame->colorspace = HDMI_COLORSPACE_RGB;
+
+ drm_hdmi_avi_infoframe_colorimetry(frame, conn_state);
+
+ /* nonsense combination */
+ drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
+ drm_hdmi_avi_infoframe_quant_range(frame, connector,
+ adjusted_mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+ } else {
+ frame->quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ frame->ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ }
+
+ drm_hdmi_avi_infoframe_content_type(frame, conn_state);
+
+ /* TODO: handle pixel repetition for YCBCR420 outputs */
+
+ ret = hdmi_avi_infoframe_check(frame);
+ if (drm_WARN_ON(encoder->base.dev, ret))
+ return false;
+
+ return true;
+}
+
+static bool
+intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct hdmi_spd_infoframe *frame = &crtc_state->infoframes.spd.spd;
+ int ret;
+
+ if (!crtc_state->has_infoframe)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
+
+ ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
+ if (drm_WARN_ON(encoder->base.dev, ret))
+ return false;
+
+ frame->sdi = HDMI_SPD_SDI_PC;
+
+ ret = hdmi_spd_infoframe_check(frame);
+ if (drm_WARN_ON(encoder->base.dev, ret))
+ return false;
+
+ return true;
+}
+
+static bool
+intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct hdmi_vendor_infoframe *frame =
+ &crtc_state->infoframes.hdmi.vendor.hdmi;
+ const struct drm_display_info *info =
+ &conn_state->connector->display_info;
+ int ret;
+
+ if (!crtc_state->has_infoframe || !info->has_hdmi_infoframe)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR);
+
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(frame,
+ conn_state->connector,
+ &crtc_state->hw.adjusted_mode);
+ if (drm_WARN_ON(encoder->base.dev, ret))
+ return false;
+
+ ret = hdmi_vendor_infoframe_check(frame);
+ if (drm_WARN_ON(encoder->base.dev, ret))
+ return false;
+
+ return true;
+}
+
+static bool
+intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct hdmi_drm_infoframe *frame = &crtc_state->infoframes.drm.drm;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int ret;
+
+ if (DISPLAY_VER(dev_priv) < 10)
+ return true;
+
+ if (!crtc_state->has_infoframe)
+ return true;
+
+ if (!conn_state->hdr_output_metadata)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM);
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state);
+ if (ret < 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "couldn't set HDR metadata in infoframe\n");
+ return false;
+ }
+
+ ret = hdmi_drm_infoframe_check(frame);
+ if (drm_WARN_ON(&dev_priv->drm, ret))
+ return false;
+
+ return true;
+}
+
+static void g4x_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
+ i915_reg_t reg = VIDEO_DIP_CTL;
+ u32 val = intel_de_read(dev_priv, reg);
+ u32 port = VIDEO_DIP_PORT(encoder->port);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* If the registers were not initialized yet, they might be zeroes,
+ * which means we're selecting the AVI DIP and we're setting its
+ * frequency to once. This seems to really confuse the HW and make
+ * things stop working (the register spec says the AVI always needs to
+ * be sent every VSync). So here we avoid writing to the register more
+ * than we need and also explicitly select the AVI DIP and explicitly
+ * set its frequency to every VSync. Avoiding to write it twice seems to
+ * be enough to solve the problem, but being defensive shouldn't hurt us
+ * either. */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!enable) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "video DIP still enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
+ return;
+ }
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ if (val & VIDEO_DIP_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "video DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
+ return;
+ }
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
+
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
+}
+
+/*
+ * Determine if default_phase=1 can be indicated in the GCP infoframe.
+ *
+ * From HDMI specification 1.4a:
+ * - The first pixel of each Video Data Period shall always have a pixel packing phase of 0
+ * - The first pixel following each Video Data Period shall have a pixel packing phase of 0
+ * - The PP bits shall be constant for all GCPs and will be equal to the last packing phase
+ * - The first pixel following every transition of HSYNC or VSYNC shall have a pixel packing
+ * phase of 0
+ */
+static bool gcp_default_phase_possible(int pipe_bpp,
+ const struct drm_display_mode *mode)
+{
+ unsigned int pixels_per_group;
+
+ switch (pipe_bpp) {
+ case 30:
+ /* 4 pixels in 5 clocks */
+ pixels_per_group = 4;
+ break;
+ case 36:
+ /* 2 pixels in 3 clocks */
+ pixels_per_group = 2;
+ break;
+ case 48:
+ /* 1 pixel in 2 clocks */
+ pixels_per_group = 1;
+ break;
+ default:
+ /* phase information not relevant for 8bpc */
+ return false;
+ }
+
+ return mode->crtc_hdisplay % pixels_per_group == 0 &&
+ mode->crtc_htotal % pixels_per_group == 0 &&
+ mode->crtc_hblank_start % pixels_per_group == 0 &&
+ mode->crtc_hblank_end % pixels_per_group == 0 &&
+ mode->crtc_hsync_start % pixels_per_group == 0 &&
+ mode->crtc_hsync_end % pixels_per_group == 0 &&
+ ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0 ||
+ mode->crtc_htotal/2 % pixels_per_group == 0);
+}
+
+static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ i915_reg_t reg;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
+ return false;
+
+ if (HAS_DDI(dev_priv))
+ reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
+ else if (HAS_PCH_SPLIT(dev_priv))
+ reg = TVIDEO_DIP_GCP(crtc->pipe);
+ else
+ return false;
+
+ intel_de_write(dev_priv, reg, crtc_state->infoframes.gcp);
+
+ return true;
+}
+
+void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ i915_reg_t reg;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
+ return;
+
+ if (HAS_DDI(dev_priv))
+ reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
+ else if (HAS_PCH_SPLIT(dev_priv))
+ reg = TVIDEO_DIP_GCP(crtc->pipe);
+ else
+ return;
+
+ crtc_state->infoframes.gcp = intel_de_read(dev_priv, reg);
+}
+
+static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_G4X(dev_priv) || !crtc_state->has_infoframe)
+ return;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL);
+
+ /* Indicate color indication for deep color mode */
+ if (crtc_state->pipe_bpp > 24)
+ crtc_state->infoframes.gcp |= GCP_COLOR_INDICATION;
+
+ /* Enable default_phase whenever the display mode is suitably aligned */
+ if (gcp_default_phase_possible(crtc_state->pipe_bpp,
+ &crtc_state->hw.adjusted_mode))
+ crtc_state->infoframes.gcp |= GCP_DEFAULT_PHASE_ENABLE;
+}
+
+static void ibx_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
+ i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
+ u32 val = intel_de_read(dev_priv, reg);
+ u32 port = VIDEO_DIP_PORT(encoder->port);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!enable) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+ "DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+ if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
+ val |= VIDEO_DIP_ENABLE_GCP;
+
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
+}
+
+static void cpt_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
+ u32 val = intel_de_read(dev_priv, reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!enable) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+ return;
+ }
+
+ /* Set both together, unset both together: see the spec. */
+ val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+ if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
+ val |= VIDEO_DIP_ENABLE_GCP;
+
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
+}
+
+static void vlv_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe);
+ u32 val = intel_de_read(dev_priv, reg);
+ u32 port = VIDEO_DIP_PORT(encoder->port);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!enable) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+ "DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE_AVI |
+ VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+ if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
+ val |= VIDEO_DIP_ENABLE_GCP;
+
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
+}
+
+static void hsw_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
+ u32 val = intel_de_read(dev_priv, reg);
+
+ assert_hdmi_transcoder_func_disabled(dev_priv,
+ crtc_state->cpu_transcoder);
+
+ val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+ VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+ VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
+ VIDEO_DIP_ENABLE_DRM_GLK);
+
+ if (!enable) {
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+ return;
+ }
+
+ if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
+ val |= VIDEO_DIP_ENABLE_GCP_HSW;
+
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
+
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_DRM,
+ &crtc_state->infoframes.drm);
+}
+
+void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
+{
+ struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
+ struct i2c_adapter *adapter;
+
+ if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
+ return;
+
+ adapter = intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+
+ drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n",
+ enable ? "Enabling" : "Disabling");
+
+ drm_dp_dual_mode_set_tmds_output(&dev_priv->drm, hdmi->dp_dual_mode.type, adapter, enable);
+}
+
+static int intel_hdmi_hdcp_read(struct intel_digital_port *dig_port,
+ unsigned int offset, void *buffer, size_t size)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_hdmi *hdmi = &dig_port->hdmi;
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
+ hdmi->ddc_bus);
+ int ret;
+ u8 start = offset & 0xff;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DRM_HDCP_DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ },
+ {
+ .addr = DRM_HDCP_DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = size,
+ .buf = buffer
+ }
+ };
+ ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret == ARRAY_SIZE(msgs))
+ return 0;
+ return ret >= 0 ? -EIO : ret;
+}
+
+static int intel_hdmi_hdcp_write(struct intel_digital_port *dig_port,
+ unsigned int offset, void *buffer, size_t size)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_hdmi *hdmi = &dig_port->hdmi;
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
+ hdmi->ddc_bus);
+ int ret;
+ u8 *write_buf;
+ struct i2c_msg msg;
+
+ write_buf = kzalloc(size + 1, GFP_KERNEL);
+ if (!write_buf)
+ return -ENOMEM;
+
+ write_buf[0] = offset & 0xff;
+ memcpy(&write_buf[1], buffer, size);
+
+ msg.addr = DRM_HDCP_DDC_ADDR;
+ msg.flags = 0,
+ msg.len = size + 1,
+ msg.buf = write_buf;
+
+ ret = i2c_transfer(adapter, &msg, 1);
+ if (ret == 1)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+
+ kfree(write_buf);
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
+ u8 *an)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_hdmi *hdmi = &dig_port->hdmi;
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
+ hdmi->ddc_bus);
+ int ret;
+
+ ret = intel_hdmi_hdcp_write(dig_port, DRM_HDCP_DDC_AN, an,
+ DRM_HDCP_AN_LEN);
+ if (ret) {
+ drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = intel_gmbus_output_aksv(adapter);
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm, "Failed to output aksv (%d)\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *dig_port,
+ u8 *bksv)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ int ret;
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BKSV, bksv,
+ DRM_HDCP_KSV_LEN);
+ if (ret)
+ drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n",
+ ret);
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *dig_port,
+ u8 *bstatus)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ int ret;
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BSTATUS,
+ bstatus, DRM_HDCP_BSTATUS_LEN);
+ if (ret)
+ drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n",
+ ret);
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *dig_port,
+ bool *repeater_present)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ int ret;
+ u8 val;
+
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
+ if (ret) {
+ drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+ ret);
+ return ret;
+ }
+ *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT;
+ return 0;
+}
+
+static
+int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
+ u8 *ri_prime)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ int ret;
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_RI_PRIME,
+ ri_prime, DRM_HDCP_RI_LEN);
+ if (ret)
+ drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n",
+ ret);
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
+ bool *ksv_ready)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ int ret;
+ u8 val;
+
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
+ if (ret) {
+ drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+ ret);
+ return ret;
+ }
+ *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY;
+ return 0;
+}
+
+static
+int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
+ int num_downstream, u8 *ksv_fifo)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ int ret;
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_KSV_FIFO,
+ ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
+ if (ret) {
+ drm_dbg_kms(&i915->drm,
+ "Read ksv fifo over DDC failed (%d)\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static
+int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
+ int i, u32 *part)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ int ret;
+
+ if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
+ return -EINVAL;
+
+ ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_V_PRIME(i),
+ part, DRM_HDCP_V_PRIME_PART_LEN);
+ if (ret)
+ drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n",
+ i, ret);
+ return ret;
+}
+
+static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
+ enum transcoder cpu_transcoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct intel_crtc *crtc = to_intel_crtc(connector->base.state->crtc);
+ u32 scanline;
+ int ret;
+
+ for (;;) {
+ scanline = intel_de_read(dev_priv, PIPEDSL(crtc->pipe));
+ if (scanline > 100 && scanline < 200)
+ break;
+ usleep_range(25, 50);
+ }
+
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
+ false, TRANS_DDI_HDCP_SIGNALLING);
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "Disable HDCP signalling failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
+ true, TRANS_DDI_HDCP_SIGNALLING);
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "Enable HDCP signalling failed (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static
+int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
+ enum transcoder cpu_transcoder,
+ bool enable)
+{
+ struct intel_hdmi *hdmi = &dig_port->hdmi;
+ struct intel_connector *connector = hdmi->attached_connector;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ int ret;
+
+ if (!enable)
+ usleep_range(6, 60); /* Bspec says >= 6us */
+
+ ret = intel_ddi_toggle_hdcp_bits(&dig_port->base,
+ cpu_transcoder, enable,
+ TRANS_DDI_HDCP_SIGNALLING);
+ if (ret) {
+ drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
+ enable ? "Enable" : "Disable", ret);
+ return ret;
+ }
+
+ /*
+ * WA: To fix incorrect positioning of the window of
+ * opportunity and enc_en signalling in KABYLAKE.
+ */
+ if (IS_KABYLAKE(dev_priv) && enable)
+ return kbl_repositioning_enc_en_signal(connector,
+ cpu_transcoder);
+
+ return 0;
+}
+
+static
+bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
+ enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
+ int ret;
+ union {
+ u32 reg;
+ u8 shim[DRM_HDCP_RI_LEN];
+ } ri;
+
+ ret = intel_hdmi_hdcp_read_ri_prime(dig_port, ri.shim);
+ if (ret)
+ return false;
+
+ intel_de_write(i915, HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg);
+
+ /* Wait for Ri prime match */
+ if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
+ (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
+ (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
+ drm_dbg_kms(&i915->drm, "Ri' mismatch detected (%x)\n",
+ intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
+ port)));
+ return false;
+ }
+ return true;
+}
+
+static
+bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ int retry;
+
+ for (retry = 0; retry < 3; retry++)
+ if (intel_hdmi_hdcp_check_link_once(dig_port, connector))
+ return true;
+
+ drm_err(&i915->drm, "Link check failed\n");
+ return false;
+}
+
+struct hdcp2_hdmi_msg_timeout {
+ u8 msg_id;
+ u16 timeout;
+};
+
+static const struct hdcp2_hdmi_msg_timeout hdcp2_msg_timeout[] = {
+ { HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, },
+ { HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, },
+ { HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, },
+ { HDCP_2_2_REP_SEND_RECVID_LIST, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, },
+ { HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, },
+};
+
+static
+int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *dig_port,
+ u8 *rx_status)
+{
+ return intel_hdmi_hdcp_read(dig_port,
+ HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET,
+ rx_status,
+ HDCP_2_2_HDMI_RXSTATUS_LEN);
+}
+
+static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
+{
+ int i;
+
+ if (msg_id == HDCP_2_2_AKE_SEND_HPRIME) {
+ if (is_paired)
+ return HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS;
+ else
+ return HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hdcp2_msg_timeout); i++) {
+ if (hdcp2_msg_timeout[i].msg_id == msg_id)
+ return hdcp2_msg_timeout[i].timeout;
+ }
+
+ return -EINVAL;
+}
+
+static int
+hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
+ u8 msg_id, bool *msg_ready,
+ ssize_t *msg_sz)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
+ int ret;
+
+ ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status);
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n",
+ ret);
+ return ret;
+ }
+
+ *msg_sz = ((HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(rx_status[1]) << 8) |
+ rx_status[0]);
+
+ if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST)
+ *msg_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1]) &&
+ *msg_sz);
+ else
+ *msg_ready = *msg_sz;
+
+ return 0;
+}
+
+static ssize_t
+intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
+ u8 msg_id, bool paired)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ bool msg_ready = false;
+ int timeout, ret;
+ ssize_t msg_sz = 0;
+
+ timeout = get_hdcp2_msg_timeout(msg_id, paired);
+ if (timeout < 0)
+ return timeout;
+
+ ret = __wait_for(ret = hdcp2_detect_msg_availability(dig_port,
+ msg_id, &msg_ready,
+ &msg_sz),
+ !ret && msg_ready && msg_sz, timeout * 1000,
+ 1000, 5 * 1000);
+ if (ret)
+ drm_dbg_kms(&i915->drm, "msg_id: %d, ret: %d, timeout: %d\n",
+ msg_id, ret, timeout);
+
+ return ret ? ret : msg_sz;
+}
+
+static
+int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *dig_port,
+ void *buf, size_t size)
+{
+ unsigned int offset;
+
+ offset = HDCP_2_2_HDMI_REG_WR_MSG_OFFSET;
+ return intel_hdmi_hdcp_write(dig_port, offset, buf, size);
+}
+
+static
+int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *dig_port,
+ u8 msg_id, void *buf, size_t size)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_hdmi *hdmi = &dig_port->hdmi;
+ struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
+ unsigned int offset;
+ ssize_t ret;
+
+ ret = intel_hdmi_hdcp2_wait_for_msg(dig_port, msg_id,
+ hdcp->is_paired);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Available msg size should be equal to or lesser than the
+ * available buffer.
+ */
+ if (ret > size) {
+ drm_dbg_kms(&i915->drm,
+ "msg_sz(%zd) is more than exp size(%zu)\n",
+ ret, size);
+ return -EINVAL;
+ }
+
+ offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
+ ret = intel_hdmi_hdcp_read(dig_port, offset, buf, ret);
+ if (ret)
+ drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n",
+ msg_id, ret);
+
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
+{
+ u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
+ int ret;
+
+ ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status);
+ if (ret)
+ return ret;
+
+ /*
+ * Re-auth request and Link Integrity Failures are represented by
+ * same bit. i.e reauth_req.
+ */
+ if (HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(rx_status[1]))
+ ret = HDCP_REAUTH_REQUEST;
+ else if (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1]))
+ ret = HDCP_TOPOLOGY_CHANGE;
+
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp2_capable(struct intel_digital_port *dig_port,
+ bool *capable)
+{
+ u8 hdcp2_version;
+ int ret;
+
+ *capable = false;
+ ret = intel_hdmi_hdcp_read(dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET,
+ &hdcp2_version, sizeof(hdcp2_version));
+ if (!ret && hdcp2_version & HDCP_2_2_HDMI_SUPPORT_MASK)
+ *capable = true;
+
+ return ret;
+}
+
+static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
+ .write_an_aksv = intel_hdmi_hdcp_write_an_aksv,
+ .read_bksv = intel_hdmi_hdcp_read_bksv,
+ .read_bstatus = intel_hdmi_hdcp_read_bstatus,
+ .repeater_present = intel_hdmi_hdcp_repeater_present,
+ .read_ri_prime = intel_hdmi_hdcp_read_ri_prime,
+ .read_ksv_ready = intel_hdmi_hdcp_read_ksv_ready,
+ .read_ksv_fifo = intel_hdmi_hdcp_read_ksv_fifo,
+ .read_v_prime_part = intel_hdmi_hdcp_read_v_prime_part,
+ .toggle_signalling = intel_hdmi_hdcp_toggle_signalling,
+ .check_link = intel_hdmi_hdcp_check_link,
+ .write_2_2_msg = intel_hdmi_hdcp2_write_msg,
+ .read_2_2_msg = intel_hdmi_hdcp2_read_msg,
+ .check_2_2_link = intel_hdmi_hdcp2_check_link,
+ .hdcp_2_2_capable = intel_hdmi_hdcp2_capable,
+ .protocol = HDCP_PROTOCOL_HDMI,
+};
+
+static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int max_tmds_clock, vbt_max_tmds_clock;
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ max_tmds_clock = 594000;
+ else if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv))
+ max_tmds_clock = 300000;
+ else if (DISPLAY_VER(dev_priv) >= 5)
+ max_tmds_clock = 225000;
+ else
+ max_tmds_clock = 165000;
+
+ vbt_max_tmds_clock = intel_bios_max_tmds_clock(encoder);
+ if (vbt_max_tmds_clock)
+ max_tmds_clock = min(max_tmds_clock, vbt_max_tmds_clock);
+
+ return max_tmds_clock;
+}
+
+static bool intel_has_hdmi_sink(struct intel_hdmi *hdmi,
+ const struct drm_connector_state *conn_state)
+{
+ return hdmi->has_hdmi_sink &&
+ READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
+}
+
+static bool intel_hdmi_is_ycbcr420(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420;
+}
+
+static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
+ bool respect_downstream_limits,
+ bool has_hdmi_sink)
+{
+ struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
+ int max_tmds_clock = intel_hdmi_source_max_tmds_clock(encoder);
+
+ if (respect_downstream_limits) {
+ struct intel_connector *connector = hdmi->attached_connector;
+ const struct drm_display_info *info = &connector->base.display_info;
+
+ if (hdmi->dp_dual_mode.max_tmds_clock)
+ max_tmds_clock = min(max_tmds_clock,
+ hdmi->dp_dual_mode.max_tmds_clock);
+
+ if (info->max_tmds_clock)
+ max_tmds_clock = min(max_tmds_clock,
+ info->max_tmds_clock);
+ else if (!has_hdmi_sink)
+ max_tmds_clock = min(max_tmds_clock, 165000);
+ }
+
+ return max_tmds_clock;
+}
+
+static enum drm_mode_status
+hdmi_port_clock_valid(struct intel_hdmi *hdmi,
+ int clock, bool respect_downstream_limits,
+ bool has_hdmi_sink)
+{
+ struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
+ enum phy phy = intel_port_to_phy(dev_priv, hdmi_to_dig_port(hdmi)->base.port);
+
+ if (clock < 25000)
+ return MODE_CLOCK_LOW;
+ if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits,
+ has_hdmi_sink))
+ return MODE_CLOCK_HIGH;
+
+ /* GLK DPLL can't generate 446-480 MHz */
+ if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000)
+ return MODE_CLOCK_RANGE;
+
+ /* BXT/GLK DPLL can't generate 223-240 MHz */
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ clock > 223333 && clock < 240000)
+ return MODE_CLOCK_RANGE;
+
+ /* CHV DPLL can't generate 216-240 MHz */
+ if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000)
+ return MODE_CLOCK_RANGE;
+
+ /* ICL+ combo PHY PLL can't generate 500-533.2 MHz */
+ if (intel_phy_is_combo(dev_priv, phy) && clock > 500000 && clock < 533200)
+ return MODE_CLOCK_RANGE;
+
+ /* ICL+ TC PHY PLL can't generate 500-532.8 MHz */
+ if (intel_phy_is_tc(dev_priv, phy) && clock > 500000 && clock < 532800)
+ return MODE_CLOCK_RANGE;
+
+ /*
+ * SNPS PHYs' MPLLB table-based programming can only handle a fixed
+ * set of link rates.
+ *
+ * FIXME: We will hopefully get an algorithmic way of programming
+ * the MPLLB for HDMI in the future.
+ */
+ if (IS_DG2(dev_priv))
+ return intel_snps_phy_check_hdmi_link_rate(clock);
+
+ return MODE_OK;
+}
+
+int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output)
+{
+ /* YCBCR420 TMDS rate requirement is half the pixel clock */
+ if (ycbcr420_output)
+ clock /= 2;
+
+ /*
+ * Need to adjust the port link by:
+ * 1.5x for 12bpc
+ * 1.25x for 10bpc
+ */
+ return DIV_ROUND_CLOSEST(clock * bpc, 8);
+}
+
+static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc)
+{
+ switch (bpc) {
+ case 12:
+ return !HAS_GMCH(i915);
+ case 10:
+ return DISPLAY_VER(i915) >= 11;
+ case 8:
+ return true;
+ default:
+ MISSING_CASE(bpc);
+ return false;
+ }
+}
+
+static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
+ int bpc, bool has_hdmi_sink, bool ycbcr420_output)
+{
+ const struct drm_display_info *info = &connector->display_info;
+ const struct drm_hdmi_info *hdmi = &info->hdmi;
+
+ switch (bpc) {
+ case 12:
+ if (!has_hdmi_sink)
+ return false;
+
+ if (ycbcr420_output)
+ return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36;
+ else
+ return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_36;
+ case 10:
+ if (!has_hdmi_sink)
+ return false;
+
+ if (ycbcr420_output)
+ return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_30;
+ else
+ return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30;
+ case 8:
+ return true;
+ default:
+ MISSING_CASE(bpc);
+ return false;
+ }
+}
+
+static enum drm_mode_status
+intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
+ bool has_hdmi_sink, bool ycbcr420_output)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ enum drm_mode_status status = MODE_OK;
+ int bpc;
+
+ /*
+ * Try all color depths since valid port clock range
+ * can have holes. Any mode that can be used with at
+ * least one color depth is accepted.
+ */
+ for (bpc = 12; bpc >= 8; bpc -= 2) {
+ int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output);
+
+ if (!intel_hdmi_source_bpc_possible(i915, bpc))
+ continue;
+
+ if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output))
+ continue;
+
+ status = hdmi_port_clock_valid(hdmi, tmds_clock, true, has_hdmi_sink);
+ if (status == MODE_OK)
+ return MODE_OK;
+ }
+
+ /* can never happen */
+ drm_WARN_ON(&i915->drm, status == MODE_OK);
+
+ return status;
+}
+
+static enum drm_mode_status
+intel_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
+ enum drm_mode_status status;
+ int clock = mode->clock;
+ int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
+ bool ycbcr_420_only;
+
+ status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
+ if (status != MODE_OK)
+ return status;
+
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
+ clock *= 2;
+
+ if (clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ if (!has_hdmi_sink)
+ return MODE_CLOCK_LOW;
+ clock *= 2;
+ }
+
+ /*
+ * HDMI2.1 requires higher resolution modes like 8k60, 4K120 to be
+ * enumerated only if FRL is supported. Current platforms do not support
+ * FRL so prune the higher resolution modes that require doctclock more
+ * than 600MHz.
+ */
+ if (clock > 600000)
+ return MODE_CLOCK_HIGH;
+
+ ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, mode);
+
+ status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, ycbcr_420_only);
+ if (status != MODE_OK) {
+ if (ycbcr_420_only ||
+ !connector->ycbcr_420_allowed ||
+ !drm_mode_is_420_also(&connector->display_info, mode))
+ return status;
+
+ status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, true);
+ if (status != MODE_OK)
+ return status;
+ }
+
+ return intel_mode_valid_max_plane_size(dev_priv, mode, false);
+}
+
+bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
+ int bpc, bool has_hdmi_sink, bool ycbcr420_output)
+{
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct drm_connector_state *connector_state;
+ struct drm_connector *connector;
+ int i;
+
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != crtc_state->uapi.crtc)
+ continue;
+
+ if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output))
+ return false;
+ }
+
+ return true;
+}
+
+static bool hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, int bpc)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!intel_hdmi_source_bpc_possible(dev_priv, bpc))
+ return false;
+
+ /*
+ * HDMI deep color affects the clocks, so it's only possible
+ * when not cloning with other encoder types.
+ */
+ if (bpc > 8 && crtc_state->output_types != BIT(INTEL_OUTPUT_HDMI))
+ return false;
+
+ /* Display Wa_1405510057:icl,ehl */
+ if (intel_hdmi_is_ycbcr420(crtc_state) &&
+ bpc == 10 && DISPLAY_VER(dev_priv) == 11 &&
+ (adjusted_mode->crtc_hblank_end -
+ adjusted_mode->crtc_hblank_start) % 8 == 2)
+ return false;
+
+ return intel_hdmi_bpc_possible(crtc_state, bpc, crtc_state->has_hdmi_sink,
+ intel_hdmi_is_ycbcr420(crtc_state));
+}
+
+static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int clock, bool respect_downstream_limits)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ bool ycbcr420_output = intel_hdmi_is_ycbcr420(crtc_state);
+ int bpc;
+
+ /*
+ * pipe_bpp could already be below 8bpc due to FDI
+ * bandwidth constraints. HDMI minimum is 8bpc however.
+ */
+ bpc = max(crtc_state->pipe_bpp / 3, 8);
+
+ /*
+ * We will never exceed downstream TMDS clock limits while
+ * attempting deep color. If the user insists on forcing an
+ * out of spec mode they will have to be satisfied with 8bpc.
+ */
+ if (!respect_downstream_limits)
+ bpc = 8;
+
+ for (; bpc >= 8; bpc -= 2) {
+ int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output);
+
+ if (hdmi_bpc_possible(crtc_state, bpc) &&
+ hdmi_port_clock_valid(intel_hdmi, tmds_clock,
+ respect_downstream_limits,
+ crtc_state->has_hdmi_sink) == MODE_OK)
+ return bpc;
+ }
+
+ return -EINVAL;
+}
+
+static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ bool respect_downstream_limits)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int bpc, clock = adjusted_mode->crtc_clock;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ clock *= 2;
+
+ bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock,
+ respect_downstream_limits);
+ if (bpc < 0)
+ return bpc;
+
+ crtc_state->port_clock =
+ intel_hdmi_tmds_clock(clock, bpc, intel_hdmi_is_ycbcr420(crtc_state));
+
+ /*
+ * pipe_bpp could already be below 8bpc due to
+ * FDI bandwidth constraints. We shouldn't bump it
+ * back up to the HDMI minimum 8bpc in that case.
+ */
+ crtc_state->pipe_bpp = min(crtc_state->pipe_bpp, bpc * 3);
+
+ drm_dbg_kms(&i915->drm,
+ "picking %d bpc for HDMI output (pipe bpp: %d)\n",
+ bpc, crtc_state->pipe_bpp);
+
+ return 0;
+}
+
+bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ /*
+ * Our YCbCr output is always limited range.
+ * crtc_state->limited_color_range only applies to RGB,
+ * and it must never be set for YCbCr or we risk setting
+ * some conflicting bits in PIPECONF which will mess up
+ * the colors on the monitor.
+ */
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ return false;
+
+ if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
+ /* See CEA-861-E - 5.1 Default Encoding Parameters */
+ return crtc_state->has_hdmi_sink &&
+ drm_default_rgb_quant_range(adjusted_mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+ } else {
+ return intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
+ }
+}
+
+static bool intel_hdmi_has_audio(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+
+ if (!crtc_state->has_hdmi_sink)
+ return false;
+
+ if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+ return intel_hdmi->has_audio;
+ else
+ return intel_conn_state->force_audio == HDMI_AUDIO_ON;
+}
+
+static enum intel_output_format
+intel_hdmi_output_format(struct intel_connector *connector,
+ bool ycbcr_420_output)
+{
+ if (connector->base.ycbcr_420_allowed && ycbcr_420_output)
+ return INTEL_OUTPUT_FORMAT_YCBCR420;
+ else
+ return INTEL_OUTPUT_FORMAT_RGB;
+}
+
+static int intel_hdmi_compute_output_format(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state,
+ bool respect_downstream_limits)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ const struct drm_display_info *info = &connector->base.display_info;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ bool ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
+ int ret;
+
+ crtc_state->output_format = intel_hdmi_output_format(connector, ycbcr_420_only);
+
+ if (ycbcr_420_only && !intel_hdmi_is_ycbcr420(crtc_state)) {
+ drm_dbg_kms(&i915->drm,
+ "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ }
+
+ ret = intel_hdmi_compute_clock(encoder, crtc_state, respect_downstream_limits);
+ if (ret) {
+ if (intel_hdmi_is_ycbcr420(crtc_state) ||
+ !connector->base.ycbcr_420_allowed ||
+ !drm_mode_is_420_also(info, adjusted_mode))
+ return ret;
+
+ crtc_state->output_format = intel_hdmi_output_format(connector, true);
+ ret = intel_hdmi_compute_clock(encoder, crtc_state, respect_downstream_limits);
+ }
+
+ return ret;
+}
+
+int intel_hdmi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_connector *connector = conn_state->connector;
+ struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
+ int ret;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return -EINVAL;
+
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_hdmi,
+ conn_state);
+
+ if (pipe_config->has_hdmi_sink)
+ pipe_config->has_infoframe = true;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ pipe_config->pixel_multiplier = 2;
+
+ if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
+ pipe_config->has_pch_encoder = true;
+
+ pipe_config->has_audio =
+ intel_hdmi_has_audio(encoder, pipe_config, conn_state);
+
+ /*
+ * Try to respect downstream TMDS clock limits first, if
+ * that fails assume the user might know something we don't.
+ */
+ ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state, true);
+ if (ret)
+ ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state, false);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "unsupported HDMI clock (%d kHz), rejecting mode\n",
+ pipe_config->hw.adjusted_mode.crtc_clock);
+ return ret;
+ }
+
+ if (intel_hdmi_is_ycbcr420(pipe_config)) {
+ ret = intel_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
+ }
+
+ pipe_config->limited_color_range =
+ intel_hdmi_limited_color_range(pipe_config, conn_state);
+
+ if (conn_state->picture_aspect_ratio)
+ adjusted_mode->picture_aspect_ratio =
+ conn_state->picture_aspect_ratio;
+
+ pipe_config->lane_count = 4;
+
+ if (scdc->scrambling.supported && DISPLAY_VER(dev_priv) >= 10) {
+ if (scdc->scrambling.low_rates)
+ pipe_config->hdmi_scrambling = true;
+
+ if (pipe_config->port_clock > 340000) {
+ pipe_config->hdmi_scrambling = true;
+ pipe_config->hdmi_high_tmds_clock_ratio = true;
+ }
+ }
+
+ intel_hdmi_compute_gcp_infoframe(encoder, pipe_config,
+ conn_state);
+
+ if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) {
+ drm_dbg_kms(&dev_priv->drm, "bad AVI infoframe\n");
+ return -EINVAL;
+ }
+
+ if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) {
+ drm_dbg_kms(&dev_priv->drm, "bad SPD infoframe\n");
+ return -EINVAL;
+ }
+
+ if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) {
+ drm_dbg_kms(&dev_priv->drm, "bad HDMI infoframe\n");
+ return -EINVAL;
+ }
+
+ if (!intel_hdmi_compute_drm_infoframe(encoder, pipe_config, conn_state)) {
+ drm_dbg_kms(&dev_priv->drm, "bad DRM infoframe\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ /*
+ * Give a hand to buggy BIOSen which forget to turn
+ * the TMDS output buffers back on after a reboot.
+ */
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+}
+
+static void
+intel_hdmi_unset_edid(struct drm_connector *connector)
+{
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
+
+ intel_hdmi->has_hdmi_sink = false;
+ intel_hdmi->has_audio = false;
+
+ intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
+ intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
+
+ kfree(to_intel_connector(connector)->detect_edid);
+ to_intel_connector(connector)->detect_edid = NULL;
+}
+
+static void
+intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ enum port port = hdmi_to_dig_port(hdmi)->base.port;
+ struct i2c_adapter *adapter =
+ intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+ enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(&dev_priv->drm, adapter);
+
+ /*
+ * Type 1 DVI adaptors are not required to implement any
+ * registers, so we can't always detect their presence.
+ * Ideally we should be able to check the state of the
+ * CONFIG1 pin, but no such luck on our hardware.
+ *
+ * The only method left to us is to check the VBT to see
+ * if the port is a dual mode capable DP port. But let's
+ * only do that when we sucesfully read the EDID, to avoid
+ * confusing log messages about DP dual mode adaptors when
+ * there's nothing connected to the port.
+ */
+ if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
+ /* An overridden EDID imply that we want this port for testing.
+ * Make sure not to set limits for that port.
+ */
+ if (has_edid && !connector->override_edid &&
+ intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Assuming DP dual mode adaptor presence based on VBT\n");
+ type = DRM_DP_DUAL_MODE_TYPE1_DVI;
+ } else {
+ type = DRM_DP_DUAL_MODE_NONE;
+ }
+ }
+
+ if (type == DRM_DP_DUAL_MODE_NONE)
+ return;
+
+ hdmi->dp_dual_mode.type = type;
+ hdmi->dp_dual_mode.max_tmds_clock =
+ drm_dp_dual_mode_max_tmds_clock(&dev_priv->drm, type, adapter);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
+ drm_dp_get_dual_mode_type_name(type),
+ hdmi->dp_dual_mode.max_tmds_clock);
+
+ /* Older VBTs are often buggy and can't be trusted :( Play it safe. */
+ if ((DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) &&
+ !intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Ignoring DP dual mode adaptor max TMDS clock for native HDMI port\n");
+ hdmi->dp_dual_mode.max_tmds_clock = 0;
+ }
+}
+
+static bool
+intel_hdmi_set_edid(struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ intel_wakeref_t wakeref;
+ struct edid *edid;
+ bool connected = false;
+ struct i2c_adapter *i2c;
+
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+
+ i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+
+ edid = drm_get_edid(connector, i2c);
+
+ if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ intel_gmbus_force_bit(i2c, true);
+ edid = drm_get_edid(connector, i2c);
+ intel_gmbus_force_bit(i2c, false);
+ }
+
+ intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
+
+ to_intel_connector(connector)->detect_edid = edid;
+ if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
+ intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
+ intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+
+ connected = true;
+ }
+
+ cec_notifier_set_phys_addr_from_edid(intel_hdmi->cec_notifier, edid);
+
+ return connected;
+}
+
+static enum drm_connector_status
+intel_hdmi_detect(struct drm_connector *connector, bool force)
+{
+ enum drm_connector_status status = connector_status_disconnected;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
+ struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
+ intel_wakeref_t wakeref;
+
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+ if (!INTEL_DISPLAY_ENABLED(dev_priv))
+ return connector_status_disconnected;
+
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+
+ if (DISPLAY_VER(dev_priv) >= 11 &&
+ !intel_digital_port_connected(encoder))
+ goto out;
+
+ intel_hdmi_unset_edid(connector);
+
+ if (intel_hdmi_set_edid(connector))
+ status = connector_status_connected;
+
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
+
+ if (status != connector_status_connected)
+ cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
+
+ /*
+ * Make sure the refs for power wells enabled during detect are
+ * dropped to avoid a new detect cycle triggered by HPD polling.
+ */
+ intel_display_power_flush_work(dev_priv);
+
+ return status;
+}
+
+static void
+intel_hdmi_force(struct drm_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+ intel_hdmi_unset_edid(connector);
+
+ if (connector->status != connector_status_connected)
+ return;
+
+ intel_hdmi_set_edid(connector);
+}
+
+static int intel_hdmi_get_modes(struct drm_connector *connector)
+{
+ struct edid *edid;
+
+ edid = to_intel_connector(connector)->detect_edid;
+ if (edid == NULL)
+ return 0;
+
+ return intel_connector_update_modes(connector, edid);
+}
+
+static struct i2c_adapter *
+intel_hdmi_get_i2c_adapter(struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
+
+ return intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+}
+
+static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector);
+ struct kobject *i2c_kobj = &adapter->dev.kobj;
+ struct kobject *connector_kobj = &connector->kdev->kobj;
+ int ret;
+
+ ret = sysfs_create_link(connector_kobj, i2c_kobj, i2c_kobj->name);
+ if (ret)
+ drm_err(&i915->drm, "Failed to create i2c symlink (%d)\n", ret);
+}
+
+static void intel_hdmi_remove_i2c_symlink(struct drm_connector *connector)
+{
+ struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector);
+ struct kobject *i2c_kobj = &adapter->dev.kobj;
+ struct kobject *connector_kobj = &connector->kdev->kobj;
+
+ sysfs_remove_link(connector_kobj, i2c_kobj->name);
+}
+
+static int
+intel_hdmi_connector_register(struct drm_connector *connector)
+{
+ int ret;
+
+ ret = intel_connector_register(connector);
+ if (ret)
+ return ret;
+
+ intel_hdmi_create_i2c_symlink(connector);
+
+ return ret;
+}
+
+static void intel_hdmi_connector_unregister(struct drm_connector *connector)
+{
+ struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier;
+
+ cec_notifier_conn_unregister(n);
+
+ intel_hdmi_remove_i2c_symlink(connector);
+ intel_connector_unregister(connector);
+}
+
+static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
+ .detect = intel_hdmi_detect,
+ .force = intel_hdmi_force,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_digital_connector_atomic_get_property,
+ .atomic_set_property = intel_digital_connector_atomic_set_property,
+ .late_register = intel_hdmi_connector_register,
+ .early_unregister = intel_hdmi_connector_unregister,
+ .destroy = intel_connector_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
+ .get_modes = intel_hdmi_get_modes,
+ .mode_valid = intel_hdmi_mode_valid,
+ .atomic_check = intel_digital_connector_atomic_check,
+};
+
+static void
+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+
+ intel_attach_force_audio_property(connector);
+ intel_attach_broadcast_rgb_property(connector);
+ intel_attach_aspect_ratio_property(connector);
+
+ intel_attach_hdmi_colorspace_property(connector);
+ drm_connector_attach_content_type_property(connector);
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ drm_connector_attach_hdr_output_metadata_property(connector);
+
+ if (!HAS_GMCH(dev_priv))
+ drm_connector_attach_max_bpc_property(connector, 8, 12);
+}
+
+/*
+ * intel_hdmi_handle_sink_scrambling: handle sink scrambling/clock ratio setup
+ * @encoder: intel_encoder
+ * @connector: drm_connector
+ * @high_tmds_clock_ratio = bool to indicate if the function needs to set
+ * or reset the high tmds clock ratio for scrambling
+ * @scrambling: bool to Indicate if the function needs to set or reset
+ * sink scrambling
+ *
+ * This function handles scrambling on HDMI 2.0 capable sinks.
+ * If required clock rate is > 340 Mhz && scrambling is supported by sink
+ * it enables scrambling. This should be called before enabling the HDMI
+ * 2.0 port, as the sink can choose to disable the scrambling if it doesn't
+ * detect a scrambled clock within 100 ms.
+ *
+ * Returns:
+ * True on success, false on failure.
+ */
+bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
+ struct drm_connector *connector,
+ bool high_tmds_clock_ratio,
+ bool scrambling)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct drm_scrambling *sink_scrambling =
+ &connector->display_info.hdmi.scdc.scrambling;
+ struct i2c_adapter *adapter =
+ intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+
+ if (!sink_scrambling->supported)
+ return true;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
+ connector->base.id, connector->name,
+ str_yes_no(scrambling), high_tmds_clock_ratio ? 40 : 10);
+
+ /* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */
+ return drm_scdc_set_high_tmds_clock_ratio(adapter,
+ high_tmds_clock_ratio) &&
+ drm_scdc_set_scrambling(adapter, scrambling);
+}
+
+static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ u8 ddc_pin;
+
+ switch (port) {
+ case PORT_B:
+ ddc_pin = GMBUS_PIN_DPB;
+ break;
+ case PORT_C:
+ ddc_pin = GMBUS_PIN_DPC;
+ break;
+ case PORT_D:
+ ddc_pin = GMBUS_PIN_DPD_CHV;
+ break;
+ default:
+ MISSING_CASE(port);
+ ddc_pin = GMBUS_PIN_DPB;
+ break;
+ }
+ return ddc_pin;
+}
+
+static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ u8 ddc_pin;
+
+ switch (port) {
+ case PORT_B:
+ ddc_pin = GMBUS_PIN_1_BXT;
+ break;
+ case PORT_C:
+ ddc_pin = GMBUS_PIN_2_BXT;
+ break;
+ default:
+ MISSING_CASE(port);
+ ddc_pin = GMBUS_PIN_1_BXT;
+ break;
+ }
+ return ddc_pin;
+}
+
+static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ u8 ddc_pin;
+
+ switch (port) {
+ case PORT_B:
+ ddc_pin = GMBUS_PIN_1_BXT;
+ break;
+ case PORT_C:
+ ddc_pin = GMBUS_PIN_2_BXT;
+ break;
+ case PORT_D:
+ ddc_pin = GMBUS_PIN_4_CNP;
+ break;
+ case PORT_F:
+ ddc_pin = GMBUS_PIN_3_BXT;
+ break;
+ default:
+ MISSING_CASE(port);
+ ddc_pin = GMBUS_PIN_1_BXT;
+ break;
+ }
+ return ddc_pin;
+}
+
+static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+
+ if (intel_phy_is_combo(dev_priv, phy))
+ return GMBUS_PIN_1_BXT + port;
+ else if (intel_phy_is_tc(dev_priv, phy))
+ return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port);
+
+ drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port));
+ return GMBUS_PIN_2_BXT;
+}
+
+static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+ u8 ddc_pin;
+
+ switch (phy) {
+ case PHY_A:
+ ddc_pin = GMBUS_PIN_1_BXT;
+ break;
+ case PHY_B:
+ ddc_pin = GMBUS_PIN_2_BXT;
+ break;
+ case PHY_C:
+ ddc_pin = GMBUS_PIN_9_TC1_ICP;
+ break;
+ default:
+ MISSING_CASE(phy);
+ ddc_pin = GMBUS_PIN_1_BXT;
+ break;
+ }
+ return ddc_pin;
+}
+
+static u8 rkl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+
+ WARN_ON(port == PORT_C);
+
+ /*
+ * Pin mapping for RKL depends on which PCH is present. With TGP, the
+ * final two outputs use type-c pins, even though they're actually
+ * combo outputs. With CMP, the traditional DDI A-D pins are used for
+ * all outputs.
+ */
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && phy >= PHY_C)
+ return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
+
+ return GMBUS_PIN_1_BXT + phy;
+}
+
+static u8 gen9bc_tgp_port_to_ddc_pin(struct drm_i915_private *i915, enum port port)
+{
+ enum phy phy = intel_port_to_phy(i915, port);
+
+ drm_WARN_ON(&i915->drm, port == PORT_A);
+
+ /*
+ * Pin mapping for GEN9 BC depends on which PCH is present. With TGP,
+ * final two outputs use type-c pins, even though they're actually
+ * combo outputs. With CMP, the traditional DDI A-D pins are used for
+ * all outputs.
+ */
+ if (INTEL_PCH_TYPE(i915) >= PCH_TGP && phy >= PHY_C)
+ return GMBUS_PIN_9_TC1_ICP + phy - PHY_C;
+
+ return GMBUS_PIN_1_BXT + phy;
+}
+
+static u8 dg1_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ return intel_port_to_phy(dev_priv, port) + 1;
+}
+
+static u8 adls_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+
+ WARN_ON(port == PORT_B || port == PORT_C);
+
+ /*
+ * Pin mapping for ADL-S requires TC pins for all combo phy outputs
+ * except first combo output.
+ */
+ if (phy == PHY_A)
+ return GMBUS_PIN_1_BXT;
+
+ return GMBUS_PIN_9_TC1_ICP + phy - PHY_B;
+}
+
+static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ u8 ddc_pin;
+
+ switch (port) {
+ case PORT_B:
+ ddc_pin = GMBUS_PIN_DPB;
+ break;
+ case PORT_C:
+ ddc_pin = GMBUS_PIN_DPC;
+ break;
+ case PORT_D:
+ ddc_pin = GMBUS_PIN_DPD;
+ break;
+ default:
+ MISSING_CASE(port);
+ ddc_pin = GMBUS_PIN_DPB;
+ break;
+ }
+ return ddc_pin;
+}
+
+static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u8 ddc_pin;
+
+ ddc_pin = intel_bios_alternate_ddc_pin(encoder);
+ if (ddc_pin) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Using DDC pin 0x%x for port %c (VBT)\n",
+ ddc_pin, port_name(port));
+ return ddc_pin;
+ }
+
+ if (IS_ALDERLAKE_S(dev_priv))
+ ddc_pin = adls_port_to_ddc_pin(dev_priv, port);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
+ ddc_pin = dg1_port_to_ddc_pin(dev_priv, port);
+ else if (IS_ROCKETLAKE(dev_priv))
+ ddc_pin = rkl_port_to_ddc_pin(dev_priv, port);
+ else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv))
+ ddc_pin = gen9bc_tgp_port_to_ddc_pin(dev_priv, port);
+ else if (IS_JSL_EHL(dev_priv) && HAS_PCH_TGP(dev_priv))
+ ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
+ else if (HAS_PCH_CNP(dev_priv))
+ ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
+ else if (IS_CHERRYVIEW(dev_priv))
+ ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
+ else
+ ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Using DDC pin 0x%x for port %c (platform default)\n",
+ ddc_pin, port_name(port));
+
+ return ddc_pin;
+}
+
+void intel_infoframe_init(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(dig_port->base.base.dev);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ dig_port->write_infoframe = vlv_write_infoframe;
+ dig_port->read_infoframe = vlv_read_infoframe;
+ dig_port->set_infoframes = vlv_set_infoframes;
+ dig_port->infoframes_enabled = vlv_infoframes_enabled;
+ } else if (IS_G4X(dev_priv)) {
+ dig_port->write_infoframe = g4x_write_infoframe;
+ dig_port->read_infoframe = g4x_read_infoframe;
+ dig_port->set_infoframes = g4x_set_infoframes;
+ dig_port->infoframes_enabled = g4x_infoframes_enabled;
+ } else if (HAS_DDI(dev_priv)) {
+ if (intel_bios_is_lspcon_present(dev_priv, dig_port->base.port)) {
+ dig_port->write_infoframe = lspcon_write_infoframe;
+ dig_port->read_infoframe = lspcon_read_infoframe;
+ dig_port->set_infoframes = lspcon_set_infoframes;
+ dig_port->infoframes_enabled = lspcon_infoframes_enabled;
+ } else {
+ dig_port->write_infoframe = hsw_write_infoframe;
+ dig_port->read_infoframe = hsw_read_infoframe;
+ dig_port->set_infoframes = hsw_set_infoframes;
+ dig_port->infoframes_enabled = hsw_infoframes_enabled;
+ }
+ } else if (HAS_PCH_IBX(dev_priv)) {
+ dig_port->write_infoframe = ibx_write_infoframe;
+ dig_port->read_infoframe = ibx_read_infoframe;
+ dig_port->set_infoframes = ibx_set_infoframes;
+ dig_port->infoframes_enabled = ibx_infoframes_enabled;
+ } else {
+ dig_port->write_infoframe = cpt_write_infoframe;
+ dig_port->read_infoframe = cpt_read_infoframe;
+ dig_port->set_infoframes = cpt_set_infoframes;
+ dig_port->infoframes_enabled = cpt_infoframes_enabled;
+ }
+}
+
+void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
+ struct intel_connector *intel_connector)
+{
+ struct drm_connector *connector = &intel_connector->base;
+ struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
+ struct intel_encoder *intel_encoder = &dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i2c_adapter *ddc;
+ enum port port = intel_encoder->port;
+ struct cec_connector_info conn_info;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Adding HDMI connector on [ENCODER:%d:%s]\n",
+ intel_encoder->base.base.id, intel_encoder->base.name);
+
+ if (DISPLAY_VER(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
+ return;
+
+ if (drm_WARN(dev, dig_port->max_lanes < 4,
+ "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
+ dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
+ return;
+
+ intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(intel_encoder);
+ ddc = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+
+ drm_connector_init_with_ddc(dev, connector,
+ &intel_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ ddc);
+ drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
+
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+ connector->stereo_allowed = 1;
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ connector->ycbcr_420_allowed = true;
+
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ if (HAS_DDI(dev_priv))
+ intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ else
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ intel_hdmi_add_properties(intel_hdmi, connector);
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_hdmi->attached_connector = intel_connector;
+
+ if (is_hdcp_supported(dev_priv, port)) {
+ int ret = intel_hdcp_init(intel_connector, dig_port,
+ &intel_hdmi_hdcp_shim);
+ if (ret)
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP init failed, skipping.\n");
+ }
+
+ /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+ */
+ if (IS_G45(dev_priv)) {
+ u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
+ intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
+ (temp & ~0xf) | 0xd);
+ }
+
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ intel_hdmi->cec_notifier =
+ cec_notifier_conn_register(dev->dev, port_identifier(port),
+ &conn_info);
+ if (!intel_hdmi->cec_notifier)
+ drm_dbg_kms(&dev_priv->drm, "CEC notifier get failed\n");
+}
+
+/*
+ * intel_hdmi_dsc_get_slice_height - get the dsc slice_height
+ * @vactive: Vactive of a display mode
+ *
+ * @return: appropriate dsc slice height for a given mode.
+ */
+int intel_hdmi_dsc_get_slice_height(int vactive)
+{
+ int slice_height;
+
+ /*
+ * Slice Height determination : HDMI2.1 Section 7.7.5.2
+ * Select smallest slice height >=96, that results in a valid PPS and
+ * requires minimum padding lines required for final slice.
+ *
+ * Assumption : Vactive is even.
+ */
+ for (slice_height = 96; slice_height <= vactive; slice_height += 2)
+ if (vactive % slice_height == 0)
+ return slice_height;
+
+ return 0;
+}
+
+/*
+ * intel_hdmi_dsc_get_num_slices - get no. of dsc slices based on dsc encoder
+ * and dsc decoder capabilities
+ *
+ * @crtc_state: intel crtc_state
+ * @src_max_slices: maximum slices supported by the DSC encoder
+ * @src_max_slice_width: maximum slice width supported by DSC encoder
+ * @hdmi_max_slices: maximum slices supported by sink DSC decoder
+ * @hdmi_throughput: maximum clock per slice (MHz) supported by HDMI sink
+ *
+ * @return: num of dsc slices that can be supported by the dsc encoder
+ * and decoder.
+ */
+int
+intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
+ int src_max_slices, int src_max_slice_width,
+ int hdmi_max_slices, int hdmi_throughput)
+{
+/* Pixel rates in KPixels/sec */
+#define HDMI_DSC_PEAK_PIXEL_RATE 2720000
+/*
+ * Rates at which the source and sink are required to process pixels in each
+ * slice, can be two levels: either atleast 340000KHz or atleast 40000KHz.
+ */
+#define HDMI_DSC_MAX_ENC_THROUGHPUT_0 340000
+#define HDMI_DSC_MAX_ENC_THROUGHPUT_1 400000
+
+/* Spec limits the slice width to 2720 pixels */
+#define MAX_HDMI_SLICE_WIDTH 2720
+ int kslice_adjust;
+ int adjusted_clk_khz;
+ int min_slices;
+ int target_slices;
+ int max_throughput; /* max clock freq. in khz per slice */
+ int max_slice_width;
+ int slice_width;
+ int pixel_clock = crtc_state->hw.adjusted_mode.crtc_clock;
+
+ if (!hdmi_throughput)
+ return 0;
+
+ /*
+ * Slice Width determination : HDMI2.1 Section 7.7.5.1
+ * kslice_adjust factor for 4:2:0, and 4:2:2 formats is 0.5, where as
+ * for 4:4:4 is 1.0. Multiplying these factors by 10 and later
+ * dividing adjusted clock value by 10.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
+ kslice_adjust = 10;
+ else
+ kslice_adjust = 5;
+
+ /*
+ * As per spec, the rate at which the source and the sink process
+ * the pixels per slice are at two levels: atleast 340Mhz or 400Mhz.
+ * This depends upon the pixel clock rate and output formats
+ * (kslice adjust).
+ * If pixel clock * kslice adjust >= 2720MHz slices can be processed
+ * at max 340MHz, otherwise they can be processed at max 400MHz.
+ */
+
+ adjusted_clk_khz = DIV_ROUND_UP(kslice_adjust * pixel_clock, 10);
+
+ if (adjusted_clk_khz <= HDMI_DSC_PEAK_PIXEL_RATE)
+ max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_0;
+ else
+ max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_1;
+
+ /*
+ * Taking into account the sink's capability for maximum
+ * clock per slice (in MHz) as read from HF-VSDB.
+ */
+ max_throughput = min(max_throughput, hdmi_throughput * 1000);
+
+ min_slices = DIV_ROUND_UP(adjusted_clk_khz, max_throughput);
+ max_slice_width = min(MAX_HDMI_SLICE_WIDTH, src_max_slice_width);
+
+ /*
+ * Keep on increasing the num of slices/line, starting from min_slices
+ * per line till we get such a number, for which the slice_width is
+ * just less than max_slice_width. The slices/line selected should be
+ * less than or equal to the max horizontal slices that the combination
+ * of PCON encoder and HDMI decoder can support.
+ */
+ slice_width = max_slice_width;
+
+ do {
+ if (min_slices <= 1 && src_max_slices >= 1 && hdmi_max_slices >= 1)
+ target_slices = 1;
+ else if (min_slices <= 2 && src_max_slices >= 2 && hdmi_max_slices >= 2)
+ target_slices = 2;
+ else if (min_slices <= 4 && src_max_slices >= 4 && hdmi_max_slices >= 4)
+ target_slices = 4;
+ else if (min_slices <= 8 && src_max_slices >= 8 && hdmi_max_slices >= 8)
+ target_slices = 8;
+ else if (min_slices <= 12 && src_max_slices >= 12 && hdmi_max_slices >= 12)
+ target_slices = 12;
+ else if (min_slices <= 16 && src_max_slices >= 16 && hdmi_max_slices >= 16)
+ target_slices = 16;
+ else
+ return 0;
+
+ slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, target_slices);
+ if (slice_width >= max_slice_width)
+ min_slices = target_slices + 1;
+ } while (slice_width >= max_slice_width);
+
+ return target_slices;
+}
+
+/*
+ * intel_hdmi_dsc_get_bpp - get the appropriate compressed bits_per_pixel based on
+ * source and sink capabilities.
+ *
+ * @src_fraction_bpp: fractional bpp supported by the source
+ * @slice_width: dsc slice width supported by the source and sink
+ * @num_slices: num of slices supported by the source and sink
+ * @output_format: video output format
+ * @hdmi_all_bpp: sink supports decoding of 1/16th bpp setting
+ * @hdmi_max_chunk_bytes: max bytes in a line of chunks supported by sink
+ *
+ * @return: compressed bits_per_pixel in step of 1/16 of bits_per_pixel
+ */
+int
+intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width, int num_slices,
+ int output_format, bool hdmi_all_bpp,
+ int hdmi_max_chunk_bytes)
+{
+ int max_dsc_bpp, min_dsc_bpp;
+ int target_bytes;
+ bool bpp_found = false;
+ int bpp_decrement_x16;
+ int bpp_target;
+ int bpp_target_x16;
+
+ /*
+ * Get min bpp and max bpp as per Table 7.23, in HDMI2.1 spec
+ * Start with the max bpp and keep on decrementing with
+ * fractional bpp, if supported by PCON DSC encoder
+ *
+ * for each bpp we check if no of bytes can be supported by HDMI sink
+ */
+
+ /* Assuming: bpc as 8*/
+ if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ min_dsc_bpp = 6;
+ max_dsc_bpp = 3 * 4; /* 3*bpc/2 */
+ } else if (output_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
+ output_format == INTEL_OUTPUT_FORMAT_RGB) {
+ min_dsc_bpp = 8;
+ max_dsc_bpp = 3 * 8; /* 3*bpc */
+ } else {
+ /* Assuming 4:2:2 encoding */
+ min_dsc_bpp = 7;
+ max_dsc_bpp = 2 * 8; /* 2*bpc */
+ }
+
+ /*
+ * Taking into account if all dsc_all_bpp supported by HDMI2.1 sink
+ * Section 7.7.34 : Source shall not enable compressed Video
+ * Transport with bpp_target settings above 12 bpp unless
+ * DSC_all_bpp is set to 1.
+ */
+ if (!hdmi_all_bpp)
+ max_dsc_bpp = min(max_dsc_bpp, 12);
+
+ /*
+ * The Sink has a limit of compressed data in bytes for a scanline,
+ * as described in max_chunk_bytes field in HFVSDB block of edid.
+ * The no. of bytes depend on the target bits per pixel that the
+ * source configures. So we start with the max_bpp and calculate
+ * the target_chunk_bytes. We keep on decrementing the target_bpp,
+ * till we get the target_chunk_bytes just less than what the sink's
+ * max_chunk_bytes, or else till we reach the min_dsc_bpp.
+ *
+ * The decrement is according to the fractional support from PCON DSC
+ * encoder. For fractional BPP we use bpp_target as a multiple of 16.
+ *
+ * bpp_target_x16 = bpp_target * 16
+ * So we need to decrement by {1, 2, 4, 8, 16} for fractional bpps
+ * {1/16, 1/8, 1/4, 1/2, 1} respectively.
+ */
+
+ bpp_target = max_dsc_bpp;
+
+ /* src does not support fractional bpp implies decrement by 16 for bppx16 */
+ if (!src_fractional_bpp)
+ src_fractional_bpp = 1;
+ bpp_decrement_x16 = DIV_ROUND_UP(16, src_fractional_bpp);
+ bpp_target_x16 = (bpp_target * 16) - bpp_decrement_x16;
+
+ while (bpp_target_x16 > (min_dsc_bpp * 16)) {
+ int bpp;
+
+ bpp = DIV_ROUND_UP(bpp_target_x16, 16);
+ target_bytes = DIV_ROUND_UP((num_slices * slice_width * bpp), 8);
+ if (target_bytes <= hdmi_max_chunk_bytes) {
+ bpp_found = true;
+ break;
+ }
+ bpp_target_x16 -= bpp_decrement_x16;
+ }
+ if (bpp_found)
+ return bpp_target_x16;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
new file mode 100644
index 000000000..93f65a917
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_HDMI_H__
+#define __INTEL_HDMI_H__
+
+#include <linux/hdmi.h>
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_encoder;
+struct drm_i915_private;
+struct intel_connector;
+struct intel_digital_port;
+struct intel_encoder;
+struct intel_crtc_state;
+struct intel_hdmi;
+struct drm_connector_state;
+union hdmi_infoframe;
+enum port;
+
+void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
+ struct intel_connector *intel_connector);
+int intel_hdmi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state);
+void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder);
+bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
+ struct drm_connector *connector,
+ bool high_tmds_clock_ratio,
+ bool scrambling);
+void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
+void intel_infoframe_init(struct intel_digital_port *dig_port);
+u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+u32 intel_hdmi_infoframe_enable(unsigned int type);
+void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
+void intel_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ enum hdmi_infoframe_type type,
+ union hdmi_infoframe *frame);
+bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
+ int bpc, bool has_hdmi_sink, bool ycbcr420_output);
+int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output);
+int intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width,
+ int num_slices, int output_format, bool hdmi_all_bpp,
+ int hdmi_max_chunk_bytes);
+int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
+ int src_max_slices, int src_max_slice_width,
+ int hdmi_max_slices, int hdmi_throughput);
+int intel_hdmi_dsc_get_slice_height(int vactive);
+
+#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
new file mode 100644
index 000000000..f7a2f485b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -0,0 +1,769 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "intel_display_types.h"
+#include "intel_hotplug.h"
+
+/**
+ * DOC: Hotplug
+ *
+ * Simply put, hotplug occurs when a display is connected to or disconnected
+ * from the system. However, there may be adapters and docking stations and
+ * Display Port short pulses and MST devices involved, complicating matters.
+ *
+ * Hotplug in i915 is handled in many different levels of abstraction.
+ *
+ * The platform dependent interrupt handling code in i915_irq.c enables,
+ * disables, and does preliminary handling of the interrupts. The interrupt
+ * handlers gather the hotplug detect (HPD) information from relevant registers
+ * into a platform independent mask of hotplug pins that have fired.
+ *
+ * The platform independent interrupt handler intel_hpd_irq_handler() in
+ * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
+ * further processing to appropriate bottom halves (Display Port specific and
+ * regular hotplug).
+ *
+ * The Display Port work function i915_digport_work_func() calls into
+ * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
+ * pulses, with failures and non-MST long pulses triggering regular hotplug
+ * processing on the connector.
+ *
+ * The regular hotplug work function i915_hotplug_work_func() calls connector
+ * detect hooks, and, if connector status changes, triggers sending of hotplug
+ * uevent to userspace via drm_kms_helper_hotplug_event().
+ *
+ * Finally, the userspace is responsible for triggering a modeset upon receiving
+ * the hotplug uevent, disabling or enabling the crtc as needed.
+ *
+ * The hotplug interrupt storm detection and mitigation code keeps track of the
+ * number of interrupts per hotplug pin per a period of time, and if the number
+ * of interrupts exceeds a certain threshold, the interrupt is disabled for a
+ * while before being re-enabled. The intention is to mitigate issues raising
+ * from broken hardware triggering massive amounts of interrupts and grinding
+ * the system to a halt.
+ *
+ * Current implementation expects that hotplug interrupt storm will not be
+ * seen when display port sink is connected, hence on platforms whose DP
+ * callback is handled by i915_digport_work_func reenabling of hpd is not
+ * performed (it was never expected to be disabled in the first place ;) )
+ * this is specific to DP sinks handled by this routine and any other display
+ * such as HDMI or DVI enabled on the same port will have proper logic since
+ * it will use i915_hotplug_work_func where this logic is handled.
+ */
+
+/**
+ * intel_hpd_pin_default - return default pin associated with certain port.
+ * @dev_priv: private driver data pointer
+ * @port: the hpd port to get associated pin
+ *
+ * It is only valid and used by digital port encoder.
+ *
+ * Return pin that is associatade with @port.
+ */
+enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ return HPD_PORT_A + port - PORT_A;
+}
+
+#define HPD_STORM_DETECT_PERIOD 1000
+#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
+#define HPD_RETRY_DELAY 1000
+
+static enum hpd_pin
+intel_connector_hpd_pin(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+
+ /*
+ * MST connectors get their encoder attached dynamically
+ * so need to make sure we have an encoder here. But since
+ * MST encoders have their hpd_pin set to HPD_NONE we don't
+ * have to special case them beyond that.
+ */
+ return encoder ? encoder->hpd_pin : HPD_NONE;
+}
+
+/**
+ * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
+ * @dev_priv: private driver data pointer
+ * @pin: the pin to gather stats on
+ * @long_hpd: whether the HPD IRQ was long or short
+ *
+ * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
+ * storms. Only the pin specific stats and state are changed, the caller is
+ * responsible for further action.
+ *
+ * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
+ * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to
+ * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
+ * short IRQs count as +1. If this threshold is exceeded, it's considered an
+ * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
+ *
+ * By default, most systems will only count long IRQs towards
+ * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also
+ * suffer from short IRQ storms and must also track these. Because short IRQ
+ * storms are naturally caused by sideband interactions with DP MST devices,
+ * short IRQ detection is only enabled for systems without DP MST support.
+ * Systems which are new enough to support DP MST are far less likely to
+ * suffer from IRQ storms at all, so this is fine.
+ *
+ * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
+ * and should only be adjusted for automated hotplug testing.
+ *
+ * Return true if an IRQ storm was detected on @pin.
+ */
+static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
+ enum hpd_pin pin, bool long_hpd)
+{
+ struct intel_hotplug *hpd = &dev_priv->display.hotplug;
+ unsigned long start = hpd->stats[pin].last_jiffies;
+ unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
+ const int increment = long_hpd ? 10 : 1;
+ const int threshold = hpd->hpd_storm_threshold;
+ bool storm = false;
+
+ if (!threshold ||
+ (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled))
+ return false;
+
+ if (!time_in_range(jiffies, start, end)) {
+ hpd->stats[pin].last_jiffies = jiffies;
+ hpd->stats[pin].count = 0;
+ }
+
+ hpd->stats[pin].count += increment;
+ if (hpd->stats[pin].count > threshold) {
+ hpd->stats[pin].state = HPD_MARK_DISABLED;
+ drm_dbg_kms(&dev_priv->drm,
+ "HPD interrupt storm detected on PIN %d\n", pin);
+ storm = true;
+ } else {
+ drm_dbg_kms(&dev_priv->drm,
+ "Received HPD interrupt on PIN %d - cnt: %d\n",
+ pin,
+ hpd->stats[pin].count);
+ }
+
+ return storm;
+}
+
+static void
+intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
+ bool hpd_disabled = false;
+
+ lockdep_assert_held(&dev_priv->irq_lock);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ enum hpd_pin pin;
+
+ if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
+ continue;
+
+ pin = intel_connector_hpd_pin(connector);
+ if (pin == HPD_NONE ||
+ dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED)
+ continue;
+
+ drm_info(&dev_priv->drm,
+ "HPD interrupt storm detected on connector %s: "
+ "switching from hotplug detection to polling\n",
+ connector->base.name);
+
+ dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
+ connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ hpd_disabled = true;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ /* Enable polling and queue hotplug re-enabling. */
+ if (hpd_disabled) {
+ drm_kms_helper_poll_enable(dev);
+ mod_delayed_work(system_wq, &dev_priv->display.hotplug.reenable_work,
+ msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
+ }
+}
+
+static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv),
+ display.hotplug.reenable_work.work);
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
+ intel_wakeref_t wakeref;
+ enum hpd_pin pin;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ pin = intel_connector_hpd_pin(connector);
+ if (pin == HPD_NONE ||
+ dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED)
+ continue;
+
+ if (connector->base.polled != connector->polled)
+ drm_dbg(&dev_priv->drm,
+ "Reenabling HPD on connector %s\n",
+ connector->base.name);
+ connector->base.polled = connector->polled;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ for_each_hpd_pin(pin) {
+ if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
+ dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
+ }
+
+ intel_hpd_irq_setup(dev_priv);
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+}
+
+enum intel_hotplug_state
+intel_encoder_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ enum drm_connector_status old_status;
+ u64 old_epoch_counter;
+ bool ret = false;
+
+ drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
+ old_status = connector->base.status;
+ old_epoch_counter = connector->base.epoch_counter;
+
+ connector->base.status =
+ drm_helper_probe_detect(&connector->base, NULL, false);
+
+ if (old_epoch_counter != connector->base.epoch_counter)
+ ret = true;
+
+ if (ret) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
+ connector->base.base.id,
+ connector->base.name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->base.status),
+ old_epoch_counter,
+ connector->base.epoch_counter);
+ return INTEL_HOTPLUG_CHANGED;
+ }
+ return INTEL_HOTPLUG_UNCHANGED;
+}
+
+static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
+{
+ return intel_encoder_is_dig_port(encoder) &&
+ enc_to_dig_port(encoder)->hpd_pulse != NULL;
+}
+
+static void i915_digport_work_func(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, display.hotplug.dig_port_work);
+ u32 long_port_mask, short_port_mask;
+ struct intel_encoder *encoder;
+ u32 old_bits = 0;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ long_port_mask = dev_priv->display.hotplug.long_port_mask;
+ dev_priv->display.hotplug.long_port_mask = 0;
+ short_port_mask = dev_priv->display.hotplug.short_port_mask;
+ dev_priv->display.hotplug.short_port_mask = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_digital_port *dig_port;
+ enum port port = encoder->port;
+ bool long_hpd, short_hpd;
+ enum irqreturn ret;
+
+ if (!intel_encoder_has_hpd_pulse(encoder))
+ continue;
+
+ long_hpd = long_port_mask & BIT(port);
+ short_hpd = short_port_mask & BIT(port);
+
+ if (!long_hpd && !short_hpd)
+ continue;
+
+ dig_port = enc_to_dig_port(encoder);
+
+ ret = dig_port->hpd_pulse(dig_port, long_hpd);
+ if (ret == IRQ_NONE) {
+ /* fall back to old school hpd */
+ old_bits |= BIT(encoder->hpd_pin);
+ }
+ }
+
+ if (old_bits) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->display.hotplug.event_bits |= old_bits;
+ spin_unlock_irq(&dev_priv->irq_lock);
+ queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0);
+ }
+}
+
+/**
+ * intel_hpd_trigger_irq - trigger an hpd irq event for a port
+ * @dig_port: digital port
+ *
+ * Trigger an HPD interrupt event for the given port, emulating a short pulse
+ * generated by the sink, and schedule the dig port work to handle it.
+ */
+void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port);
+ spin_unlock_irq(&i915->irq_lock);
+
+ queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work);
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void i915_hotplug_work_func(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ display.hotplug.hotplug_work.work);
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
+ u32 changed = 0, retry = 0;
+ u32 hpd_event_bits;
+ u32 hpd_retry_bits;
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ hpd_event_bits = dev_priv->display.hotplug.event_bits;
+ dev_priv->display.hotplug.event_bits = 0;
+ hpd_retry_bits = dev_priv->display.hotplug.retry_bits;
+ dev_priv->display.hotplug.retry_bits = 0;
+
+ /* Enable polling for connectors which had HPD IRQ storms */
+ intel_hpd_irq_storm_switch_to_polling(dev_priv);
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ enum hpd_pin pin;
+ u32 hpd_bit;
+
+ pin = intel_connector_hpd_pin(connector);
+ if (pin == HPD_NONE)
+ continue;
+
+ hpd_bit = BIT(pin);
+ if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
+ struct intel_encoder *encoder =
+ intel_attached_encoder(connector);
+
+ if (hpd_event_bits & hpd_bit)
+ connector->hotplug_retries = 0;
+ else
+ connector->hotplug_retries++;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Connector %s (pin %i) received hotplug event. (retry %d)\n",
+ connector->base.name, pin,
+ connector->hotplug_retries);
+
+ switch (encoder->hotplug(encoder, connector)) {
+ case INTEL_HOTPLUG_UNCHANGED:
+ break;
+ case INTEL_HOTPLUG_CHANGED:
+ changed |= hpd_bit;
+ break;
+ case INTEL_HOTPLUG_RETRY:
+ retry |= hpd_bit;
+ break;
+ }
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
+
+ /* Remove shared HPD pins that have changed */
+ retry &= ~changed;
+ if (retry) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->display.hotplug.retry_bits |= retry;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ mod_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work,
+ msecs_to_jiffies(HPD_RETRY_DELAY));
+ }
+}
+
+
+/**
+ * intel_hpd_irq_handler - main hotplug irq handler
+ * @dev_priv: drm_i915_private
+ * @pin_mask: a mask of hpd pins that have triggered the irq
+ * @long_mask: a mask of hpd pins that may be long hpd pulses
+ *
+ * This is the main hotplug irq handler for all platforms. The platform specific
+ * irq handlers call the platform specific hotplug irq handlers, which read and
+ * decode the appropriate registers into bitmasks about hpd pins that have
+ * triggered (@pin_mask), and which of those pins may be long pulses
+ * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
+ * is not a digital port.
+ *
+ * Here, we do hotplug irq storm detection and mitigation, and pass further
+ * processing to appropriate bottom halves.
+ */
+void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 pin_mask, u32 long_mask)
+{
+ struct intel_encoder *encoder;
+ bool storm_detected = false;
+ bool queue_dig = false, queue_hp = false;
+ u32 long_hpd_pulse_mask = 0;
+ u32 short_hpd_pulse_mask = 0;
+ enum hpd_pin pin;
+
+ if (!pin_mask)
+ return;
+
+ spin_lock(&dev_priv->irq_lock);
+
+ /*
+ * Determine whether ->hpd_pulse() exists for each pin, and
+ * whether we have a short or a long pulse. This is needed
+ * as each pin may have up to two encoders (HDMI and DP) and
+ * only the one of them (DP) will have ->hpd_pulse().
+ */
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ enum port port = encoder->port;
+ bool long_hpd;
+
+ pin = encoder->hpd_pin;
+ if (!(BIT(pin) & pin_mask))
+ continue;
+
+ if (!intel_encoder_has_hpd_pulse(encoder))
+ continue;
+
+ long_hpd = long_mask & BIT(pin);
+
+ drm_dbg(&dev_priv->drm,
+ "digital hpd on [ENCODER:%d:%s] - %s\n",
+ encoder->base.base.id, encoder->base.name,
+ long_hpd ? "long" : "short");
+ queue_dig = true;
+
+ if (long_hpd) {
+ long_hpd_pulse_mask |= BIT(pin);
+ dev_priv->display.hotplug.long_port_mask |= BIT(port);
+ } else {
+ short_hpd_pulse_mask |= BIT(pin);
+ dev_priv->display.hotplug.short_port_mask |= BIT(port);
+ }
+ }
+
+ /* Now process each pin just once */
+ for_each_hpd_pin(pin) {
+ bool long_hpd;
+
+ if (!(BIT(pin) & pin_mask))
+ continue;
+
+ if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) {
+ /*
+ * On GMCH platforms the interrupt mask bits only
+ * prevent irq generation, not the setting of the
+ * hotplug bits itself. So only WARN about unexpected
+ * interrupts on saner platforms.
+ */
+ drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
+ "Received HPD interrupt on pin %d although disabled\n",
+ pin);
+ continue;
+ }
+
+ if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED)
+ continue;
+
+ /*
+ * Delegate to ->hpd_pulse() if one of the encoders for this
+ * pin has it, otherwise let the hotplug_work deal with this
+ * pin directly.
+ */
+ if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
+ long_hpd = long_hpd_pulse_mask & BIT(pin);
+ } else {
+ dev_priv->display.hotplug.event_bits |= BIT(pin);
+ long_hpd = true;
+ queue_hp = true;
+ }
+
+ if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
+ dev_priv->display.hotplug.event_bits &= ~BIT(pin);
+ storm_detected = true;
+ queue_hp = true;
+ }
+ }
+
+ /*
+ * Disable any IRQs that storms were detected on. Polling enablement
+ * happens later in our hotplug work.
+ */
+ if (storm_detected)
+ intel_hpd_irq_setup(dev_priv);
+ spin_unlock(&dev_priv->irq_lock);
+
+ /*
+ * Our hotplug handler can grab modeset locks (by calling down into the
+ * fb helpers). Hence it must not be run on our own dev-priv->wq work
+ * queue for otherwise the flush_work in the pageflip code will
+ * deadlock.
+ */
+ if (queue_dig)
+ queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
+ if (queue_hp)
+ queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0);
+}
+
+/**
+ * intel_hpd_init - initializes and enables hpd support
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hotplug support. It requires that interrupts have
+ * already been enabled with intel_irq_init_hw(). From this point on hotplug and
+ * poll request can run concurrently to other code, so locking rules must be
+ * obeyed.
+ *
+ * This is a separate step from interrupt enabling to simplify the locking rules
+ * in the driver load and resume code.
+ *
+ * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable().
+ */
+void intel_hpd_init(struct drm_i915_private *dev_priv)
+{
+ int i;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ for_each_hpd_pin(i) {
+ dev_priv->display.hotplug.stats[i].count = 0;
+ dev_priv->display.hotplug.stats[i].state = HPD_ENABLED;
+ }
+
+ /*
+ * Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked checks happy.
+ */
+ spin_lock_irq(&dev_priv->irq_lock);
+ intel_hpd_irq_setup(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+static void i915_hpd_poll_init_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ display.hotplug.poll_init_work);
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
+ bool enabled;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ enum hpd_pin pin;
+
+ pin = intel_connector_hpd_pin(connector);
+ if (pin == HPD_NONE)
+ continue;
+
+ connector->base.polled = connector->polled;
+
+ if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
+ connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (enabled)
+ drm_kms_helper_poll_enable(dev);
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ /*
+ * We might have missed any hotplugs that happened while we were
+ * in the middle of disabling polling
+ */
+ if (!enabled)
+ drm_helper_hpd_irq_event(dev);
+}
+
+/**
+ * intel_hpd_poll_enable - enable polling for connectors with hpd
+ * @dev_priv: i915 device instance
+ *
+ * This function enables polling for all connectors which support HPD.
+ * Under certain conditions HPD may not be functional. On most Intel GPUs,
+ * this happens when we enter runtime suspend.
+ * On Valleyview and Cherryview systems, this also happens when we shut off all
+ * of the powerwells.
+ *
+ * Since this function can get called in contexts where we're already holding
+ * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
+ * worker.
+ *
+ * Also see: intel_hpd_init() and intel_hpd_poll_disable().
+ */
+void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_DISPLAY(dev_priv) ||
+ !INTEL_DISPLAY_ENABLED(dev_priv))
+ return;
+
+ WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true);
+
+ /*
+ * We might already be holding dev->mode_config.mutex, so do this in a
+ * seperate worker
+ * As well, there's no issue if we race here since we always reschedule
+ * this worker anyway
+ */
+ schedule_work(&dev_priv->display.hotplug.poll_init_work);
+}
+
+/**
+ * intel_hpd_poll_disable - disable polling for connectors with hpd
+ * @dev_priv: i915 device instance
+ *
+ * This function disables polling for all connectors which support HPD.
+ * Under certain conditions HPD may not be functional. On most Intel GPUs,
+ * this happens when we enter runtime suspend.
+ * On Valleyview and Cherryview systems, this also happens when we shut off all
+ * of the powerwells.
+ *
+ * Since this function can get called in contexts where we're already holding
+ * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
+ * worker.
+ *
+ * Also used during driver init to initialize connector->polled
+ * appropriately for all connectors.
+ *
+ * Also see: intel_hpd_init() and intel_hpd_poll_enable().
+ */
+void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
+ schedule_work(&dev_priv->display.hotplug.poll_init_work);
+}
+
+void intel_hpd_init_work(struct drm_i915_private *dev_priv)
+{
+ INIT_DELAYED_WORK(&dev_priv->display.hotplug.hotplug_work,
+ i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->display.hotplug.dig_port_work, i915_digport_work_func);
+ INIT_WORK(&dev_priv->display.hotplug.poll_init_work, i915_hpd_poll_init_work);
+ INIT_DELAYED_WORK(&dev_priv->display.hotplug.reenable_work,
+ intel_hpd_irq_storm_reenable_work);
+}
+
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+
+ dev_priv->display.hotplug.long_port_mask = 0;
+ dev_priv->display.hotplug.short_port_mask = 0;
+ dev_priv->display.hotplug.event_bits = 0;
+ dev_priv->display.hotplug.retry_bits = 0;
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
+ cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work);
+ cancel_work_sync(&dev_priv->display.hotplug.poll_init_work);
+ cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work);
+}
+
+bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+{
+ bool ret = false;
+
+ if (pin == HPD_NONE)
+ return false;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) {
+ dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
+ ret = true;
+ }
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ return ret;
+}
+
+void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+{
+ if (pin == HPD_NONE)
+ return;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
new file mode 100644
index 000000000..b87e95d60
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_HOTPLUG_H__
+#define __INTEL_HOTPLUG_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_connector;
+struct intel_digital_port;
+struct intel_encoder;
+enum port;
+
+void intel_hpd_poll_enable(struct drm_i915_private *dev_priv);
+void intel_hpd_poll_disable(struct drm_i915_private *dev_priv);
+enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector);
+void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 pin_mask, u32 long_mask);
+void intel_hpd_trigger_irq(struct intel_digital_port *dig_port);
+void intel_hpd_init(struct drm_i915_private *dev_priv);
+void intel_hpd_init_work(struct drm_i915_private *dev_priv);
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
+ enum port port);
+bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
+void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
+
+#endif /* __INTEL_HOTPLUG_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
new file mode 100644
index 000000000..dca6003cc
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+ * Jerome Anand <jerome.anand@intel.com>
+ * based on VED patches
+ *
+ */
+
+/**
+ * DOC: LPE Audio integration for HDMI or DP playback
+ *
+ * Motivation:
+ * Atom platforms (e.g. valleyview and cherryTrail) integrates a DMA-based
+ * interface as an alternative to the traditional HDaudio path. While this
+ * mode is unrelated to the LPE aka SST audio engine, the documentation refers
+ * to this mode as LPE so we keep this notation for the sake of consistency.
+ *
+ * The interface is handled by a separate standalone driver maintained in the
+ * ALSA subsystem for simplicity. To minimize the interaction between the two
+ * subsystems, a bridge is setup between the hdmi-lpe-audio and i915:
+ * 1. Create a platform device to share MMIO/IRQ resources
+ * 2. Make the platform device child of i915 device for runtime PM.
+ * 3. Create IRQ chip to forward the LPE audio irqs.
+ * the hdmi-lpe-audio driver probes the lpe audio device and creates a new
+ * sound card
+ *
+ * Threats:
+ * Due to the restriction in Linux platform device model, user need manually
+ * uninstall the hdmi-lpe-audio driver before uninstalling i915 module,
+ * otherwise we might run into use-after-free issues after i915 removes the
+ * platform device: even though hdmi-lpe-audio driver is released, the modules
+ * is still in "installed" status.
+ *
+ * Implementation:
+ * The MMIO/REG platform resources are created according to the registers
+ * specification.
+ * When forwarding LPE audio irqs, the flow control handler selection depends
+ * on the platform, for example on valleyview handle_simple_irq is enough.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/intel_lpe_audio.h>
+
+#include "i915_drv.h"
+#include "intel_de.h"
+#include "intel_lpe_audio.h"
+#include "intel_pci_config.h"
+
+#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->display.audio.lpe.platdev != NULL)
+
+static struct platform_device *
+lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct platform_device_info pinfo = {};
+ struct resource *rsc;
+ struct platform_device *platdev;
+ struct intel_hdmi_lpe_audio_pdata *pdata;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ rsc = kcalloc(2, sizeof(*rsc), GFP_KERNEL);
+ if (!rsc) {
+ kfree(pdata);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rsc[0].start = rsc[0].end = dev_priv->display.audio.lpe.irq;
+ rsc[0].flags = IORESOURCE_IRQ;
+ rsc[0].name = "hdmi-lpe-audio-irq";
+
+ rsc[1].start = pci_resource_start(pdev, GTTMMADR_BAR) +
+ I915_HDMI_LPE_AUDIO_BASE;
+ rsc[1].end = pci_resource_start(pdev, GTTMMADR_BAR) +
+ I915_HDMI_LPE_AUDIO_BASE + I915_HDMI_LPE_AUDIO_SIZE - 1;
+ rsc[1].flags = IORESOURCE_MEM;
+ rsc[1].name = "hdmi-lpe-audio-mmio";
+
+ pinfo.parent = dev->dev;
+ pinfo.name = "hdmi-lpe-audio";
+ pinfo.id = -1;
+ pinfo.res = rsc;
+ pinfo.num_res = 2;
+ pinfo.data = pdata;
+ pinfo.size_data = sizeof(*pdata);
+ pinfo.dma_mask = DMA_BIT_MASK(32);
+
+ pdata->num_pipes = INTEL_NUM_PIPES(dev_priv);
+ pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */
+ pdata->port[0].pipe = -1;
+ pdata->port[1].pipe = -1;
+ pdata->port[2].pipe = -1;
+ spin_lock_init(&pdata->lpe_audio_slock);
+
+ platdev = platform_device_register_full(&pinfo);
+ kfree(rsc);
+ kfree(pdata);
+
+ if (IS_ERR(platdev)) {
+ drm_err(&dev_priv->drm,
+ "Failed to allocate LPE audio platform device\n");
+ return platdev;
+ }
+
+ pm_runtime_no_callbacks(&platdev->dev);
+
+ return platdev;
+}
+
+static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
+{
+ /* XXX Note that platform_device_register_full() allocates a dma_mask
+ * and never frees it. We can't free it here as we cannot guarantee
+ * this is the last reference (i.e. that the dma_mask will not be
+ * used after our unregister). So ee choose to leak the sizeof(u64)
+ * allocation here - it should be fixed in the platform_device rather
+ * than us fiddle with its internals.
+ */
+
+ platform_device_unregister(dev_priv->display.audio.lpe.platdev);
+}
+
+static void lpe_audio_irq_unmask(struct irq_data *d)
+{
+}
+
+static void lpe_audio_irq_mask(struct irq_data *d)
+{
+}
+
+static struct irq_chip lpe_audio_irqchip = {
+ .name = "hdmi_lpe_audio_irqchip",
+ .irq_mask = lpe_audio_irq_mask,
+ .irq_unmask = lpe_audio_irq_unmask,
+};
+
+static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
+{
+ int irq = dev_priv->display.audio.lpe.irq;
+
+ drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
+ irq_set_chip_and_handler_name(irq,
+ &lpe_audio_irqchip,
+ handle_simple_irq,
+ "hdmi_lpe_audio_irq_handler");
+
+ return irq_set_chip_data(irq, dev_priv);
+}
+
+static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
+{
+ int lpe_present = false;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ static const struct pci_device_id atom_hdaudio_ids[] = {
+ /* Baytrail */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f04)},
+ /* Braswell */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2284)},
+ {}
+ };
+
+ if (!pci_dev_present(atom_hdaudio_ids)) {
+ drm_info(&dev_priv->drm,
+ "HDaudio controller not detected, using LPE audio instead\n");
+ lpe_present = true;
+ }
+ }
+ return lpe_present;
+}
+
+static int lpe_audio_setup(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ dev_priv->display.audio.lpe.irq = irq_alloc_desc(0);
+ if (dev_priv->display.audio.lpe.irq < 0) {
+ drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n",
+ dev_priv->display.audio.lpe.irq);
+ ret = dev_priv->display.audio.lpe.irq;
+ goto err;
+ }
+
+ drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->display.audio.lpe.irq);
+
+ ret = lpe_audio_irq_init(dev_priv);
+
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "Failed to initialize irqchip for lpe audio: %d\n",
+ ret);
+ goto err_free_irq;
+ }
+
+ dev_priv->display.audio.lpe.platdev = lpe_audio_platdev_create(dev_priv);
+
+ if (IS_ERR(dev_priv->display.audio.lpe.platdev)) {
+ ret = PTR_ERR(dev_priv->display.audio.lpe.platdev);
+ drm_err(&dev_priv->drm,
+ "Failed to create lpe audio platform device: %d\n",
+ ret);
+ goto err_free_irq;
+ }
+
+ /* enable chicken bit; at least this is required for Dell Wyse 3040
+ * with DP outputs (but only sometimes by some reason!)
+ */
+ intel_de_write(dev_priv, VLV_AUD_CHICKEN_BIT_REG,
+ VLV_CHICKEN_BIT_DBG_ENABLE);
+
+ return 0;
+err_free_irq:
+ irq_free_desc(dev_priv->display.audio.lpe.irq);
+err:
+ dev_priv->display.audio.lpe.irq = -1;
+ dev_priv->display.audio.lpe.platdev = NULL;
+ return ret;
+}
+
+/**
+ * intel_lpe_audio_irq_handler() - forwards the LPE audio irq
+ * @dev_priv: the i915 drm device private data
+ *
+ * the LPE Audio irq is forwarded to the irq handler registered by LPE audio
+ * driver.
+ */
+void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ if (!HAS_LPE_AUDIO(dev_priv))
+ return;
+
+ ret = generic_handle_irq(dev_priv->display.audio.lpe.irq);
+ if (ret)
+ drm_err_ratelimited(&dev_priv->drm,
+ "error handling LPE audio irq: %d\n", ret);
+}
+
+/**
+ * intel_lpe_audio_init() - detect and setup the bridge between HDMI LPE Audio
+ * driver and i915
+ * @dev_priv: the i915 drm device private data
+ *
+ * Return: 0 if successful. non-zero if detection or
+ * llocation/initialization fails
+ */
+int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
+{
+ int ret = -ENODEV;
+
+ if (lpe_audio_detect(dev_priv)) {
+ ret = lpe_audio_setup(dev_priv);
+ if (ret < 0)
+ drm_err(&dev_priv->drm,
+ "failed to setup LPE Audio bridge\n");
+ }
+ return ret;
+}
+
+/**
+ * intel_lpe_audio_teardown() - destroy the bridge between HDMI LPE
+ * audio driver and i915
+ * @dev_priv: the i915 drm device private data
+ *
+ * release all the resources for LPE audio <-> i915 bridge.
+ */
+void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_LPE_AUDIO(dev_priv))
+ return;
+
+ lpe_audio_platdev_destroy(dev_priv);
+
+ irq_free_desc(dev_priv->display.audio.lpe.irq);
+
+ dev_priv->display.audio.lpe.irq = -1;
+ dev_priv->display.audio.lpe.platdev = NULL;
+}
+
+/**
+ * intel_lpe_audio_notify() - notify lpe audio event
+ * audio driver and i915
+ * @dev_priv: the i915 drm device private data
+ * @pipe: pipe
+ * @port: port
+ * @eld : ELD data
+ * @ls_clock: Link symbol clock in kHz
+ * @dp_output: Driving a DP output?
+ *
+ * Notify lpe audio driver of eld change.
+ */
+void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ const void *eld, int ls_clock, bool dp_output)
+{
+ unsigned long irqflags;
+ struct intel_hdmi_lpe_audio_pdata *pdata;
+ struct intel_hdmi_lpe_audio_port_pdata *ppdata;
+ u32 audio_enable;
+
+ if (!HAS_LPE_AUDIO(dev_priv))
+ return;
+
+ pdata = dev_get_platdata(&dev_priv->display.audio.lpe.platdev->dev);
+ ppdata = &pdata->port[port - PORT_B];
+
+ spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
+
+ audio_enable = intel_de_read(dev_priv, VLV_AUD_PORT_EN_DBG(port));
+
+ if (eld != NULL) {
+ memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES);
+ ppdata->pipe = pipe;
+ ppdata->ls_clock = ls_clock;
+ ppdata->dp_output = dp_output;
+
+ /* Unmute the amp for both DP and HDMI */
+ intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port),
+ audio_enable & ~VLV_AMP_MUTE);
+ } else {
+ memset(ppdata->eld, 0, HDMI_MAX_ELD_BYTES);
+ ppdata->pipe = -1;
+ ppdata->ls_clock = 0;
+ ppdata->dp_output = false;
+
+ /* Mute the amp for both DP and HDMI */
+ intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port),
+ audio_enable | VLV_AMP_MUTE);
+ }
+
+ if (pdata->notify_audio_lpe)
+ pdata->notify_audio_lpe(dev_priv->display.audio.lpe.platdev, port - PORT_B);
+
+ spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.h b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
new file mode 100644
index 000000000..f848c5038
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_LPE_AUDIO_H__
+#define __INTEL_LPE_AUDIO_H__
+
+#include <linux/types.h>
+
+enum pipe;
+enum port;
+struct drm_i915_private;
+
+int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
+void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
+void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
+void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ const void *eld, int ls_clock, bool dp_output);
+
+#endif /* __INTEL_LPE_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
new file mode 100644
index 000000000..15d59de88
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -0,0 +1,716 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+
+#include <drm/display/drm_dp_dual_mode_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_lspcon.h"
+#include "intel_hdmi.h"
+
+/* LSPCON OUI Vendor ID(signatures) */
+#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
+#define LSPCON_VENDOR_MCA_OUI 0x0060AD
+
+#define DPCD_MCA_LSPCON_HDR_STATUS 0x70003
+#define DPCD_PARADE_LSPCON_HDR_STATUS 0x00511
+
+/* AUX addresses to write MCA AVI IF */
+#define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0
+#define LSPCON_MCA_AVI_IF_CTRL 0x5DF
+#define LSPCON_MCA_AVI_IF_KICKOFF (1 << 0)
+#define LSPCON_MCA_AVI_IF_HANDLED (1 << 1)
+
+/* AUX addresses to write Parade AVI IF */
+#define LSPCON_PARADE_AVI_IF_WRITE_OFFSET 0x516
+#define LSPCON_PARADE_AVI_IF_CTRL 0x51E
+#define LSPCON_PARADE_AVI_IF_KICKOFF (1 << 7)
+#define LSPCON_PARADE_AVI_IF_DATA_SIZE 32
+
+static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
+{
+ struct intel_digital_port *dig_port =
+ container_of(lspcon, struct intel_digital_port, lspcon);
+
+ return &dig_port->dp;
+}
+
+static const char *lspcon_mode_name(enum drm_lspcon_mode mode)
+{
+ switch (mode) {
+ case DRM_LSPCON_MODE_PCON:
+ return "PCON";
+ case DRM_LSPCON_MODE_LS:
+ return "LS";
+ case DRM_LSPCON_MODE_INVALID:
+ return "INVALID";
+ default:
+ MISSING_CASE(mode);
+ return "INVALID";
+ }
+}
+
+static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
+{
+ struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(dp);
+ struct drm_dp_dpcd_ident *ident;
+ u32 vendor_oui;
+
+ if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) {
+ drm_err(&i915->drm, "Can't read description\n");
+ return false;
+ }
+
+ ident = &dp->desc.ident;
+ vendor_oui = (ident->oui[0] << 16) | (ident->oui[1] << 8) |
+ ident->oui[2];
+
+ switch (vendor_oui) {
+ case LSPCON_VENDOR_MCA_OUI:
+ lspcon->vendor = LSPCON_VENDOR_MCA;
+ drm_dbg_kms(&i915->drm, "Vendor: Mega Chips\n");
+ break;
+
+ case LSPCON_VENDOR_PARADE_OUI:
+ lspcon->vendor = LSPCON_VENDOR_PARADE;
+ drm_dbg_kms(&i915->drm, "Vendor: Parade Tech\n");
+ break;
+
+ default:
+ drm_err(&i915->drm, "Invalid/Unknown vendor OUI\n");
+ return false;
+ }
+
+ return true;
+}
+
+static u32 get_hdr_status_reg(struct intel_lspcon *lspcon)
+{
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ return DPCD_MCA_LSPCON_HDR_STATUS;
+ else
+ return DPCD_PARADE_LSPCON_HDR_STATUS;
+}
+
+void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
+{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 hdr_caps;
+ int ret;
+
+ ret = drm_dp_dpcd_read(&intel_dp->aux, get_hdr_status_reg(lspcon),
+ &hdr_caps, 1);
+
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm, "HDR capability detection failed\n");
+ lspcon->hdr_supported = false;
+ } else if (hdr_caps & 0x1) {
+ drm_dbg_kms(&i915->drm, "LSPCON capable of HDR\n");
+ lspcon->hdr_supported = true;
+ }
+}
+
+static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
+{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ enum drm_lspcon_mode current_mode;
+ struct i2c_adapter *adapter = &intel_dp->aux.ddc;
+
+ if (drm_lspcon_get_mode(intel_dp->aux.drm_dev, adapter, &current_mode)) {
+ drm_dbg_kms(&i915->drm, "Error reading LSPCON mode\n");
+ return DRM_LSPCON_MODE_INVALID;
+ }
+ return current_mode;
+}
+
+static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
+ enum drm_lspcon_mode mode)
+{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ enum drm_lspcon_mode current_mode;
+
+ current_mode = lspcon_get_current_mode(lspcon);
+ if (current_mode == mode)
+ goto out;
+
+ drm_dbg_kms(&i915->drm, "Waiting for LSPCON mode %s to settle\n",
+ lspcon_mode_name(mode));
+
+ wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400);
+ if (current_mode != mode)
+ drm_err(&i915->drm, "LSPCON mode hasn't settled\n");
+
+out:
+ drm_dbg_kms(&i915->drm, "Current LSPCON mode %s\n",
+ lspcon_mode_name(current_mode));
+
+ return current_mode;
+}
+
+static int lspcon_change_mode(struct intel_lspcon *lspcon,
+ enum drm_lspcon_mode mode)
+{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int err;
+ enum drm_lspcon_mode current_mode;
+ struct i2c_adapter *adapter = &intel_dp->aux.ddc;
+
+ err = drm_lspcon_get_mode(intel_dp->aux.drm_dev, adapter, &current_mode);
+ if (err) {
+ drm_err(&i915->drm, "Error reading LSPCON mode\n");
+ return err;
+ }
+
+ if (current_mode == mode) {
+ drm_dbg_kms(&i915->drm, "Current mode = desired LSPCON mode\n");
+ return 0;
+ }
+
+ err = drm_lspcon_set_mode(intel_dp->aux.drm_dev, adapter, mode);
+ if (err < 0) {
+ drm_err(&i915->drm, "LSPCON mode change failed\n");
+ return err;
+ }
+
+ lspcon->mode = mode;
+ drm_dbg_kms(&i915->drm, "LSPCON mode changed done\n");
+ return 0;
+}
+
+static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
+{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 rev;
+
+ if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV,
+ &rev) != 1) {
+ drm_dbg_kms(&i915->drm, "Native AUX CH down\n");
+ return false;
+ }
+
+ drm_dbg_kms(&i915->drm, "Native AUX CH up, DPCD version: %d.%d\n",
+ rev >> 4, rev & 0xf);
+
+ return true;
+}
+
+static bool lspcon_probe(struct intel_lspcon *lspcon)
+{
+ int retry;
+ enum drm_dp_dual_mode_type adaptor_type;
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct i2c_adapter *adapter = &intel_dp->aux.ddc;
+ enum drm_lspcon_mode expected_mode;
+
+ expected_mode = lspcon_wake_native_aux_ch(lspcon) ?
+ DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS;
+
+ /* Lets probe the adaptor and check its type */
+ for (retry = 0; retry < 6; retry++) {
+ if (retry)
+ usleep_range(500, 1000);
+
+ adaptor_type = drm_dp_dual_mode_detect(intel_dp->aux.drm_dev, adapter);
+ if (adaptor_type == DRM_DP_DUAL_MODE_LSPCON)
+ break;
+ }
+
+ if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
+ drm_dbg_kms(&i915->drm, "No LSPCON detected, found %s\n",
+ drm_dp_get_dual_mode_type_name(adaptor_type));
+ return false;
+ }
+
+ /* Yay ... got a LSPCON device */
+ drm_dbg_kms(&i915->drm, "LSPCON detected\n");
+ lspcon->mode = lspcon_wait_mode(lspcon, expected_mode);
+
+ /*
+ * In the SW state machine, lets Put LSPCON in PCON mode only.
+ * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
+ * 2.0 sinks.
+ */
+ if (lspcon->mode != DRM_LSPCON_MODE_PCON) {
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
+ drm_err(&i915->drm, "LSPCON mode change to PCON failed\n");
+ return false;
+ }
+ }
+ return true;
+}
+
+static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
+{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ unsigned long start = jiffies;
+
+ while (1) {
+ if (intel_digital_port_connected(&dig_port->base)) {
+ drm_dbg_kms(&i915->drm, "LSPCON recovering in PCON mode after %u ms\n",
+ jiffies_to_msecs(jiffies - start));
+ return;
+ }
+
+ if (time_after(jiffies, start + msecs_to_jiffies(1000)))
+ break;
+
+ usleep_range(10000, 15000);
+ }
+
+ drm_dbg_kms(&i915->drm, "LSPCON DP descriptor mismatch after resume\n");
+}
+
+static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux)
+{
+ u8 avi_if_ctrl;
+ u8 retry;
+ ssize_t ret;
+
+ /* Check if LSPCON FW is ready for data */
+ for (retry = 0; retry < 5; retry++) {
+ if (retry)
+ usleep_range(200, 300);
+
+ ret = drm_dp_dpcd_read(aux, LSPCON_PARADE_AVI_IF_CTRL,
+ &avi_if_ctrl, 1);
+ if (ret < 0) {
+ drm_err(aux->drm_dev, "Failed to read AVI IF control\n");
+ return false;
+ }
+
+ if ((avi_if_ctrl & LSPCON_PARADE_AVI_IF_KICKOFF) == 0)
+ return true;
+ }
+
+ drm_err(aux->drm_dev, "Parade FW not ready to accept AVI IF\n");
+ return false;
+}
+
+static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
+ u8 *avi_buf)
+{
+ u8 avi_if_ctrl;
+ u8 block_count = 0;
+ u8 *data;
+ u16 reg;
+ ssize_t ret;
+
+ while (block_count < 4) {
+ if (!lspcon_parade_fw_ready(aux)) {
+ drm_dbg_kms(aux->drm_dev, "LSPCON FW not ready, block %d\n",
+ block_count);
+ return false;
+ }
+
+ reg = LSPCON_PARADE_AVI_IF_WRITE_OFFSET;
+ data = avi_buf + block_count * 8;
+ ret = drm_dp_dpcd_write(aux, reg, data, 8);
+ if (ret < 0) {
+ drm_err(aux->drm_dev, "Failed to write AVI IF block %d\n",
+ block_count);
+ return false;
+ }
+
+ /*
+ * Once a block of data is written, we have to inform the FW
+ * about this by writing into avi infoframe control register:
+ * - set the kickoff bit[7] to 1
+ * - write the block no. to bits[1:0]
+ */
+ reg = LSPCON_PARADE_AVI_IF_CTRL;
+ avi_if_ctrl = LSPCON_PARADE_AVI_IF_KICKOFF | block_count;
+ ret = drm_dp_dpcd_write(aux, reg, &avi_if_ctrl, 1);
+ if (ret < 0) {
+ drm_err(aux->drm_dev, "Failed to update (0x%x), block %d\n",
+ reg, block_count);
+ return false;
+ }
+
+ block_count++;
+ }
+
+ drm_dbg_kms(aux->drm_dev, "Wrote AVI IF blocks successfully\n");
+ return true;
+}
+
+static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
+ const u8 *frame,
+ ssize_t len)
+{
+ u8 avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
+
+ /*
+ * Parade's frames contains 32 bytes of data, divided
+ * into 4 frames:
+ * Token byte (first byte of first frame, must be non-zero)
+ * HB0 to HB2 from AVI IF (3 bytes header)
+ * PB0 to PB27 from AVI IF (28 bytes data)
+ * So it should look like this
+ * first block: | <token> <HB0-HB2> <DB0-DB3> |
+ * next 3 blocks: |<DB4-DB11>|<DB12-DB19>|<DB20-DB28>|
+ */
+
+ if (len > LSPCON_PARADE_AVI_IF_DATA_SIZE - 1) {
+ drm_err(aux->drm_dev, "Invalid length of infoframes\n");
+ return false;
+ }
+
+ memcpy(&avi_if[1], frame, len);
+
+ if (!_lspcon_parade_write_infoframe_blocks(aux, avi_if)) {
+ drm_dbg_kms(aux->drm_dev, "Failed to write infoframe blocks\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux,
+ const u8 *buffer, ssize_t len)
+{
+ int ret;
+ u32 val = 0;
+ u32 retry;
+ u16 reg;
+ const u8 *data = buffer;
+
+ reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET;
+ while (val < len) {
+ /* DPCD write for AVI IF can fail on a slow FW day, so retry */
+ for (retry = 0; retry < 5; retry++) {
+ ret = drm_dp_dpcd_write(aux, reg, (void *)data, 1);
+ if (ret == 1) {
+ break;
+ } else if (retry < 4) {
+ mdelay(50);
+ continue;
+ } else {
+ drm_err(aux->drm_dev, "DPCD write failed at:0x%x\n", reg);
+ return false;
+ }
+ }
+ val++; reg++; data++;
+ }
+
+ val = 0;
+ reg = LSPCON_MCA_AVI_IF_CTRL;
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ /* Indicate LSPCON chip about infoframe, clear bit 1 and set bit 0 */
+ val &= ~LSPCON_MCA_AVI_IF_HANDLED;
+ val |= LSPCON_MCA_AVI_IF_KICKOFF;
+
+ ret = drm_dp_dpcd_write(aux, reg, &val, 1);
+ if (ret < 0) {
+ drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ val = 0;
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ if (val == LSPCON_MCA_AVI_IF_HANDLED)
+ drm_dbg_kms(aux->drm_dev, "AVI IF handled by FW\n");
+
+ return true;
+}
+
+void lspcon_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len)
+{
+ bool ret = true;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
+ frame, len);
+ else
+ ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
+ frame, len);
+ break;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ drm_dbg_kms(&i915->drm, "Update HDR metadata for lspcon\n");
+ /* It uses the legacy hsw implementation for the same */
+ hsw_write_infoframe(encoder, crtc_state, type, frame, len);
+ break;
+ default:
+ return;
+ }
+
+ if (!ret) {
+ drm_err(&i915->drm, "Failed to write infoframes\n");
+ return;
+ }
+}
+
+void lspcon_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ /* FIXME implement for AVI Infoframe as well */
+ if (type == HDMI_PACKET_TYPE_GAMUT_METADATA)
+ hsw_read_infoframe(encoder, crtc_state, type,
+ frame, len);
+}
+
+void lspcon_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ ssize_t ret;
+ union hdmi_infoframe frame;
+ u8 buf[VIDEO_DIP_DATA_SIZE];
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!lspcon->active) {
+ drm_err(&i915->drm, "Writing infoframes while LSPCON disabled ?\n");
+ return;
+ }
+
+ /* FIXME precompute infoframes */
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ conn_state->connector,
+ adjusted_mode);
+ if (ret < 0) {
+ drm_err(&i915->drm, "couldn't fill AVI infoframe\n");
+ return;
+ }
+
+ /*
+ * Currently there is no interface defined to
+ * check user preference between RGB/YCBCR444
+ * or YCBCR420. So the only possible case for
+ * YCBCR444 usage is driving YCBCR420 output
+ * with LSPCON, when pipe is configured for
+ * YCBCR444 output and LSPCON takes care of
+ * downsampling it.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+ else
+ frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+
+ /* Set the Colorspace as per the HDMI spec */
+ drm_hdmi_avi_infoframe_colorimetry(&frame.avi, conn_state);
+
+ /* nonsense combination */
+ drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ conn_state->connector,
+ adjusted_mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+ } else {
+ frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ frame.avi.ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ }
+
+ drm_hdmi_avi_infoframe_content_type(&frame.avi, conn_state);
+
+ ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
+ if (ret < 0) {
+ drm_err(&i915->drm, "Failed to pack AVI IF\n");
+ return;
+ }
+
+ dig_port->write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI,
+ buf, ret);
+}
+
+static bool _lspcon_read_avi_infoframe_enabled_mca(struct drm_dp_aux *aux)
+{
+ int ret;
+ u32 val = 0;
+ u16 reg = LSPCON_MCA_AVI_IF_CTRL;
+
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ return val & LSPCON_MCA_AVI_IF_KICKOFF;
+}
+
+static bool _lspcon_read_avi_infoframe_enabled_parade(struct drm_dp_aux *aux)
+{
+ int ret;
+ u32 val = 0;
+ u16 reg = LSPCON_PARADE_AVI_IF_CTRL;
+
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ return val & LSPCON_PARADE_AVI_IF_KICKOFF;
+}
+
+u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ bool infoframes_enabled;
+ u32 val = 0;
+ u32 mask, tmp;
+
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ infoframes_enabled = _lspcon_read_avi_infoframe_enabled_mca(&intel_dp->aux);
+ else
+ infoframes_enabled = _lspcon_read_avi_infoframe_enabled_parade(&intel_dp->aux);
+
+ if (infoframes_enabled)
+ val |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ if (lspcon->hdr_supported) {
+ tmp = intel_de_read(dev_priv,
+ HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
+ mask = VIDEO_DIP_ENABLE_GMP_HSW;
+
+ if (tmp & mask)
+ val |= intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
+ }
+
+ return val;
+}
+
+void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
+{
+ lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON);
+}
+
+bool lspcon_init(struct intel_digital_port *dig_port)
+{
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct drm_connector *connector = &intel_dp->attached_connector->base;
+
+ lspcon->active = false;
+ lspcon->mode = DRM_LSPCON_MODE_INVALID;
+
+ if (!lspcon_probe(lspcon)) {
+ drm_err(&i915->drm, "Failed to probe lspcon\n");
+ return false;
+ }
+
+ if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) {
+ drm_err(&i915->drm, "LSPCON DPCD read failed\n");
+ return false;
+ }
+
+ if (!lspcon_detect_vendor(lspcon)) {
+ drm_err(&i915->drm, "LSPCON vendor detection failed\n");
+ return false;
+ }
+
+ connector->ycbcr_420_allowed = true;
+ lspcon->active = true;
+ drm_dbg_kms(&i915->drm, "Success: LSPCON init\n");
+ return true;
+}
+
+u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ return dig_port->infoframes_enabled(encoder, pipe_config);
+}
+
+void lspcon_resume(struct intel_digital_port *dig_port)
+{
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *i915 = to_i915(dev);
+ enum drm_lspcon_mode expected_mode;
+
+ if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
+ return;
+
+ if (!lspcon->active) {
+ if (!lspcon_init(dig_port)) {
+ drm_err(&i915->drm, "LSPCON init failed on port %c\n",
+ port_name(dig_port->base.port));
+ return;
+ }
+ }
+
+ if (lspcon_wake_native_aux_ch(lspcon)) {
+ expected_mode = DRM_LSPCON_MODE_PCON;
+ lspcon_resume_in_pcon_wa(lspcon);
+ } else {
+ expected_mode = DRM_LSPCON_MODE_LS;
+ }
+
+ if (lspcon_wait_mode(lspcon, expected_mode) == DRM_LSPCON_MODE_PCON)
+ return;
+
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON))
+ drm_err(&i915->drm, "LSPCON resume failed\n");
+ else
+ drm_dbg_kms(&i915->drm, "LSPCON resume success\n");
+}
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h
new file mode 100644
index 000000000..e19e10492
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_LSPCON_H__
+#define __INTEL_LSPCON_H__
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_encoder;
+struct intel_lspcon;
+
+bool lspcon_init(struct intel_digital_port *dig_port);
+void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon);
+void lspcon_resume(struct intel_digital_port *dig_port);
+void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
+void lspcon_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *buf, ssize_t len);
+void lspcon_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len);
+void lspcon_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
+u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
+void hsw_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len);
+void hsw_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len);
+
+#endif /* __INTEL_LSPCON_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
new file mode 100644
index 000000000..40b5d3d3c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -0,0 +1,1019 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <acpi/button.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/vga_switcheroo.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_backlight.h"
+#include "intel_connector.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dpll.h"
+#include "intel_fdi.h"
+#include "intel_gmbus.h"
+#include "intel_lvds.h"
+#include "intel_panel.h"
+
+/* Private structure for the integrated LVDS support */
+struct intel_lvds_pps {
+ /* 100us units */
+ int t1_t2;
+ int t3;
+ int t4;
+ int t5;
+ int tx;
+
+ int divider;
+
+ int port;
+ bool powerdown_on_reset;
+};
+
+struct intel_lvds_encoder {
+ struct intel_encoder base;
+
+ bool is_dual_link;
+ i915_reg_t reg;
+ u32 a3_power;
+
+ struct intel_lvds_pps init_pps;
+ u32 init_lvds_val;
+
+ struct intel_connector *attached_connector;
+};
+
+static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_lvds_encoder, base.base);
+}
+
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t lvds_reg, enum pipe *pipe)
+{
+ u32 val;
+
+ val = intel_de_read(dev_priv, lvds_reg);
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+ *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK_CPT, val);
+ else
+ *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK, val);
+
+ return val & LVDS_PORT_EN;
+}
+
+static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
+
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+
+ return ret;
+}
+
+static void intel_lvds_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ u32 tmp, flags = 0;
+
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS);
+
+ tmp = intel_de_read(dev_priv, lvds_encoder->reg);
+ if (tmp & LVDS_HSYNC_POLARITY)
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ if (tmp & LVDS_VSYNC_POLARITY)
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_PVSYNC;
+
+ pipe_config->hw.adjusted_mode.flags |= flags;
+
+ if (DISPLAY_VER(dev_priv) < 5)
+ pipe_config->gmch_pfit.lvds_border_bits =
+ tmp & LVDS_BORDER_ENABLE;
+
+ /* gen2/3 store dither state in pfit control, needs to match */
+ if (DISPLAY_VER(dev_priv) < 4) {
+ tmp = intel_de_read(dev_priv, PFIT_CONTROL);
+
+ pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+ }
+
+ pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
+}
+
+static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_lvds_pps *pps)
+{
+ u32 val;
+
+ pps->powerdown_on_reset = intel_de_read(dev_priv, PP_CONTROL(0)) & PANEL_POWER_RESET;
+
+ val = intel_de_read(dev_priv, PP_ON_DELAYS(0));
+ pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
+ pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
+ pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
+
+ val = intel_de_read(dev_priv, PP_OFF_DELAYS(0));
+ pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
+ pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
+
+ val = intel_de_read(dev_priv, PP_DIVISOR(0));
+ pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
+ val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val);
+ /*
+ * Remove the BSpec specified +1 (100ms) offset that accounts for a
+ * too short power-cycle delay due to the asynchronous programming of
+ * the register.
+ */
+ if (val)
+ val--;
+ /* Convert from 100ms to 100us units */
+ pps->t4 = val * 1000;
+
+ if (DISPLAY_VER(dev_priv) <= 4 &&
+ pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel power timings uninitialized, "
+ "setting defaults\n");
+ /* Set T2 to 40ms and T5 to 200ms in 100 usec units */
+ pps->t1_t2 = 40 * 10;
+ pps->t5 = 200 * 10;
+ /* Set T3 to 35ms and Tx to 200ms in 100 usec units */
+ pps->t3 = 35 * 10;
+ pps->tx = 200 * 10;
+ }
+
+ drm_dbg(&dev_priv->drm, "LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
+ "divider %d port %d powerdown_on_reset %d\n",
+ pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
+ pps->divider, pps->port, pps->powerdown_on_reset);
+}
+
+static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
+ struct intel_lvds_pps *pps)
+{
+ u32 val;
+
+ val = intel_de_read(dev_priv, PP_CONTROL(0));
+ drm_WARN_ON(&dev_priv->drm,
+ (val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
+ if (pps->powerdown_on_reset)
+ val |= PANEL_POWER_RESET;
+ intel_de_write(dev_priv, PP_CONTROL(0), val);
+
+ intel_de_write(dev_priv, PP_ON_DELAYS(0),
+ REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
+
+ intel_de_write(dev_priv, PP_OFF_DELAYS(0),
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
+
+ intel_de_write(dev_priv, PP_DIVISOR(0),
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
+}
+
+static void intel_pre_enable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ enum pipe pipe = crtc->pipe;
+ u32 temp;
+
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ assert_fdi_rx_pll_disabled(dev_priv, pipe);
+ assert_shared_dpll_disabled(dev_priv,
+ pipe_config->shared_dpll);
+ } else {
+ assert_pll_disabled(dev_priv, pipe);
+ }
+
+ intel_lvds_pps_init_hw(dev_priv, &lvds_encoder->init_pps);
+
+ temp = lvds_encoder->init_lvds_val;
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+
+ if (HAS_PCH_CPT(dev_priv)) {
+ temp &= ~LVDS_PIPE_SEL_MASK_CPT;
+ temp |= LVDS_PIPE_SEL_CPT(pipe);
+ } else {
+ temp &= ~LVDS_PIPE_SEL_MASK;
+ temp |= LVDS_PIPE_SEL(pipe);
+ }
+
+ /* set the corresponsding LVDS_BORDER bit */
+ temp &= ~LVDS_BORDER_ENABLE;
+ temp |= pipe_config->gmch_pfit.lvds_border_bits;
+
+ /*
+ * Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (lvds_encoder->is_dual_link)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /*
+ * It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes. For now, let's just maintain the
+ * value we got from the BIOS.
+ */
+ temp &= ~LVDS_A3_POWER_MASK;
+ temp |= lvds_encoder->a3_power;
+
+ /*
+ * Set the dithering flag on LVDS as needed, note that there is no
+ * special lvds dither control bit on pch-split platforms, dithering is
+ * only controlled through the PIPECONF reg.
+ */
+ if (DISPLAY_VER(dev_priv) == 4) {
+ /*
+ * Bspec wording suggests that LVDS port dithering only exists
+ * for 18bpp panels.
+ */
+ if (pipe_config->dither && pipe_config->pipe_bpp == 18)
+ temp |= LVDS_ENABLE_DITHER;
+ else
+ temp &= ~LVDS_ENABLE_DITHER;
+ }
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+
+ intel_de_write(dev_priv, lvds_encoder->reg, temp);
+}
+
+/*
+ * Sets the power state for the panel.
+ */
+static void intel_enable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ intel_de_write(dev_priv, lvds_encoder->reg,
+ intel_de_read(dev_priv, lvds_encoder->reg) | LVDS_PORT_EN);
+
+ intel_de_write(dev_priv, PP_CONTROL(0),
+ intel_de_read(dev_priv, PP_CONTROL(0)) | PANEL_POWER_ON);
+ intel_de_posting_read(dev_priv, lvds_encoder->reg);
+
+ if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
+ drm_err(&dev_priv->drm,
+ "timed out waiting for panel to power on\n");
+
+ intel_backlight_enable(pipe_config, conn_state);
+}
+
+static void intel_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ intel_de_write(dev_priv, PP_CONTROL(0),
+ intel_de_read(dev_priv, PP_CONTROL(0)) & ~PANEL_POWER_ON);
+ if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
+ drm_err(&dev_priv->drm,
+ "timed out waiting for panel to power off\n");
+
+ intel_de_write(dev_priv, lvds_encoder->reg,
+ intel_de_read(dev_priv, lvds_encoder->reg) & ~LVDS_PORT_EN);
+ intel_de_posting_read(dev_priv, lvds_encoder->reg);
+}
+
+static void gmch_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+
+{
+ intel_backlight_disable(old_conn_state);
+
+ intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state);
+}
+
+static void pch_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_backlight_disable(old_conn_state);
+}
+
+static void pch_post_disable_lvds(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state);
+}
+
+static void intel_lvds_shutdown(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_CYCLE_DELAY_ACTIVE, 5000))
+ drm_err(&dev_priv->drm,
+ "timed out waiting for panel power cycle delay\n");
+}
+
+static enum drm_mode_status
+intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_i915_private *i915 = to_i915(intel_connector->base.dev);
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(intel_connector, mode);
+ int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
+ enum drm_mode_status status;
+
+ status = intel_cpu_transcoder_mode_valid(i915, mode);
+ if (status != MODE_OK)
+ return status;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ status = intel_panel_mode_valid(intel_connector, mode);
+ if (status != MODE_OK)
+ return status;
+
+ if (fixed_mode->clock > max_pixclk)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+ struct intel_lvds_encoder *lvds_encoder =
+ to_lvds_encoder(&intel_encoder->base);
+ struct intel_connector *intel_connector =
+ lvds_encoder->attached_connector;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ unsigned int lvds_bpp;
+ int ret;
+
+ /* Should never happen!! */
+ if (DISPLAY_VER(dev_priv) < 4 && crtc->pipe == 0) {
+ drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n");
+ return -EINVAL;
+ }
+
+ if (lvds_encoder->a3_power == LVDS_A3_POWER_UP)
+ lvds_bpp = 8*3;
+ else
+ lvds_bpp = 6*3;
+
+ if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
+ drm_dbg_kms(&dev_priv->drm,
+ "forcing display bpp (was %d) to LVDS (%d)\n",
+ pipe_config->pipe_bpp, lvds_bpp);
+ pipe_config->pipe_bpp = lvds_bpp;
+ }
+
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
+ /*
+ * We have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ ret = intel_panel_compute_config(intel_connector, adjusted_mode);
+ if (ret)
+ return ret;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return -EINVAL;
+
+ if (HAS_PCH_SPLIT(dev_priv))
+ pipe_config->has_pch_encoder = true;
+
+ ret = intel_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
+
+ /*
+ * XXX: It would be nice to support lower refresh rates on the
+ * panels to reduce power consumption, and perhaps match the
+ * user's requested refresh rate.
+ */
+
+ return 0;
+}
+
+/*
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int intel_lvds_get_modes(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ /* use cached edid if we have one */
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ return drm_add_edid_modes(connector, intel_connector->edid);
+
+ return intel_panel_get_modes(intel_connector);
+}
+
+static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
+ .get_modes = intel_lvds_get_modes,
+ .mode_valid = intel_lvds_mode_valid,
+ .atomic_check = intel_digital_connector_atomic_check,
+};
+
+static const struct drm_connector_funcs intel_lvds_connector_funcs = {
+ .detect = intel_panel_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_digital_connector_atomic_get_property,
+ .atomic_set_property = intel_digital_connector_atomic_set_property,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
+ .destroy = intel_connector_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state,
+};
+
+static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
+{
+ DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
+ return 1;
+}
+
+/* These systems claim to have LVDS, but really don't */
+static const struct dmi_system_id intel_no_lvds[] = {
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Apple Mac Mini (Core series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Apple Mac Mini (Core 2 series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "MSI IM-945GSE-A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "A9830IMS"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Dell Studio Hybrid",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Dell OptiPlex FX170",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen Mini PC",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen Mini PC MP915",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen i915GMm-HFS",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen i45GMx-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Aopen i945GTt-VFA",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron U800",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron E830",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus EeeBox PC EB1007",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus AT5NM10T-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard HP t5740",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, " t5740"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard t5745",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard st5747",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "MSI Wind Box DC500",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Gigabyte GA-D525TUD",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Supermicro X7SPA-H",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Fujitsu Esprimo Q900",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Intel D410PT",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+ DMI_MATCH(DMI_BOARD_NAME, "D410PT"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Intel D425KT",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Intel D510MO",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "D510MO"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Intel D525MW",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Radiant P845",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P845"),
+ },
+ },
+
+ { } /* terminating entry */
+};
+
+static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+{
+ DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_dual_link_lvds[] = {
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro 15\" (2010)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"),
+ },
+ },
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro 15\" (2011)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+ },
+ },
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro 15\" (2012)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"),
+ },
+ },
+ { } /* terminating entry */
+};
+
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ if (encoder->type == INTEL_OUTPUT_LVDS)
+ return encoder;
+ }
+
+ return NULL;
+}
+
+bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv);
+
+ return encoder && to_lvds_encoder(&encoder->base)->is_dual_link;
+}
+
+static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(lvds_encoder->base.base.dev);
+ struct intel_connector *connector = lvds_encoder->attached_connector;
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_preferred_fixed_mode(connector);
+ unsigned int val;
+
+ /* use the module option value if specified */
+ if (dev_priv->params.lvds_channel_mode > 0)
+ return dev_priv->params.lvds_channel_mode == 2;
+
+ /* single channel LVDS is limited to 112 MHz */
+ if (fixed_mode->clock > 112999)
+ return true;
+
+ if (dmi_check_system(intel_dual_link_lvds))
+ return true;
+
+ /*
+ * BIOS should set the proper LVDS register value at boot, but
+ * in reality, it doesn't set the value when the lid is closed;
+ * we need to check "the value to be set" in VBT when LVDS
+ * register is uninitialized.
+ */
+ val = intel_de_read(dev_priv, lvds_encoder->reg);
+ if (HAS_PCH_CPT(dev_priv))
+ val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
+ else
+ val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
+ if (val == 0)
+ val = connector->panel.vbt.bios_lvds_val;
+
+ return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
+}
+
+/**
+ * intel_lvds_init - setup LVDS connectors on this device
+ * @dev_priv: i915 device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void intel_lvds_init(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_lvds_encoder *lvds_encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct edid *edid;
+ i915_reg_t lvds_reg;
+ u32 lvds;
+ u8 pin;
+ u32 allowed_scalers;
+
+ /* Skip init on machines we know falsely report LVDS */
+ if (dmi_check_system(intel_no_lvds)) {
+ drm_WARN(dev, !dev_priv->display.vbt.int_lvds_support,
+ "Useless DMI match. Internal LVDS support disabled by VBT\n");
+ return;
+ }
+
+ if (!dev_priv->display.vbt.int_lvds_support) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Internal LVDS support disabled by VBT\n");
+ return;
+ }
+
+ if (HAS_PCH_SPLIT(dev_priv))
+ lvds_reg = PCH_LVDS;
+ else
+ lvds_reg = LVDS;
+
+ lvds = intel_de_read(dev_priv, lvds_reg);
+
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ if ((lvds & LVDS_DETECTED) == 0)
+ return;
+ }
+
+ pin = GMBUS_PIN_PANEL;
+ if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
+ if ((lvds & LVDS_PORT_EN) == 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "LVDS is not present in VBT\n");
+ return;
+ }
+ drm_dbg_kms(&dev_priv->drm,
+ "LVDS is not present in VBT, but enabled anyway\n");
+ }
+
+ lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
+ if (!lvds_encoder)
+ return;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
+ kfree(lvds_encoder);
+ return;
+ }
+
+ lvds_encoder->attached_connector = intel_connector;
+
+ intel_encoder = &lvds_encoder->base;
+ encoder = &intel_encoder->base;
+ connector = &intel_connector->base;
+ drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS, "LVDS");
+
+ intel_encoder->enable = intel_enable_lvds;
+ intel_encoder->pre_enable = intel_pre_enable_lvds;
+ intel_encoder->compute_config = intel_lvds_compute_config;
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ intel_encoder->disable = pch_disable_lvds;
+ intel_encoder->post_disable = pch_post_disable_lvds;
+ } else {
+ intel_encoder->disable = gmch_disable_lvds;
+ }
+ intel_encoder->get_hw_state = intel_lvds_get_hw_state;
+ intel_encoder->get_config = intel_lvds_get_config;
+ intel_encoder->update_pipe = intel_backlight_update;
+ intel_encoder->shutdown = intel_lvds_shutdown;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+
+ intel_encoder->type = INTEL_OUTPUT_LVDS;
+ intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
+ intel_encoder->port = PORT_NONE;
+ intel_encoder->cloneable = 0;
+ if (DISPLAY_VER(dev_priv) < 4)
+ intel_encoder->pipe_mask = BIT(PIPE_B);
+ else
+ intel_encoder->pipe_mask = ~0;
+
+ drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ lvds_encoder->reg = lvds_reg;
+
+ /* create the scaling mode property */
+ allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT);
+ allowed_scalers |= BIT(DRM_MODE_SCALE_FULLSCREEN);
+ allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
+ drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
+ connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+
+ intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps);
+ lvds_encoder->init_lvds_val = lvds;
+
+ /*
+ * LVDS discovery:
+ * 1) check for EDID on DDC
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+ */
+
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+ mutex_lock(&dev->mode_config.mutex);
+ if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC)
+ edid = drm_get_edid_switcheroo(connector,
+ intel_gmbus_get_adapter(dev_priv, pin));
+ else
+ edid = drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv, pin));
+ if (edid) {
+ if (drm_add_edid_modes(connector, edid)) {
+ drm_connector_update_edid_property(connector,
+ edid);
+ } else {
+ kfree(edid);
+ edid = ERR_PTR(-EINVAL);
+ }
+ } else {
+ edid = ERR_PTR(-ENOENT);
+ }
+ intel_connector->edid = edid;
+
+ intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL,
+ IS_ERR(edid) ? NULL : edid);
+
+ /* Try EDID first */
+ intel_panel_add_edid_fixed_modes(intel_connector,
+ intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE);
+
+ /* Failed to get EDID, what about VBT? */
+ if (!intel_panel_preferred_fixed_mode(intel_connector))
+ intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+
+ /*
+ * If we didn't get a fixed mode from EDID or VBT, try checking
+ * if the panel is already turned on. If so, assume that
+ * whatever is currently programmed is the correct mode.
+ */
+ if (!intel_panel_preferred_fixed_mode(intel_connector))
+ intel_panel_add_encoder_fixed_mode(intel_connector, intel_encoder);
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ /* If we still don't have a mode after all that, give up. */
+ if (!intel_panel_preferred_fixed_mode(intel_connector))
+ goto failed;
+
+ intel_panel_init(intel_connector);
+
+ intel_backlight_setup(intel_connector, INVALID_PIPE);
+
+ lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
+ drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n",
+ lvds_encoder->is_dual_link ? "dual" : "single");
+
+ lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
+
+ return;
+
+failed:
+ drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n");
+ drm_connector_cleanup(connector);
+ drm_encoder_cleanup(encoder);
+ kfree(lvds_encoder);
+ intel_connector_free(intel_connector);
+ return;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.h b/drivers/gpu/drm/i915/display/intel_lvds.h
new file mode 100644
index 000000000..9d3372dc5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lvds.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_LVDS_H__
+#define __INTEL_LVDS_H__
+
+#include <linux/types.h>
+
+#include "i915_reg_defs.h"
+
+enum pipe;
+struct drm_i915_private;
+
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t lvds_reg, enum pipe *pipe);
+void intel_lvds_init(struct drm_i915_private *dev_priv);
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv);
+bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_LVDS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
new file mode 100644
index 000000000..cbfabd58b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -0,0 +1,735 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ *
+ * Read out the current hardware modeset state, and sanitize it to the current
+ * state.
+ */
+
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_atomic_state_helper.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_bw.h"
+#include "intel_color.h"
+#include "intel_crtc.h"
+#include "intel_crtc_state_dump.h"
+#include "intel_ddi.h"
+#include "intel_de.h"
+#include "intel_display.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
+#include "intel_modeset_setup.h"
+#include "intel_pch_display.h"
+#include "intel_pm.h"
+#include "skl_watermark.h"
+
+static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_encoder *encoder;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(i915->display.bw.obj.state);
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(i915->display.cdclk.obj.state);
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane;
+ struct drm_atomic_state *state;
+ struct intel_crtc_state *temp_crtc_state;
+ enum pipe pipe = crtc->pipe;
+ int ret;
+
+ if (!crtc_state->hw.active)
+ return;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
+ }
+
+ state = drm_atomic_state_alloc(&i915->drm);
+ if (!state) {
+ drm_dbg_kms(&i915->drm,
+ "failed to disable [CRTC:%d:%s], out of memory",
+ crtc->base.base.id, crtc->base.name);
+ return;
+ }
+
+ state->acquire_ctx = ctx;
+
+ /* Everything's already locked, -EDEADLK can't happen. */
+ temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
+ ret = drm_atomic_add_affected_connectors(state, &crtc->base);
+
+ drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret);
+
+ i915->display.funcs.display->crtc_disable(to_intel_atomic_state(state), crtc);
+
+ drm_atomic_state_put(state);
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
+ crtc->base.base.id, crtc->base.name);
+
+ crtc->active = false;
+ crtc->base.enabled = false;
+
+ drm_WARN_ON(&i915->drm,
+ drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
+ crtc_state->uapi.active = false;
+ crtc_state->uapi.connector_mask = 0;
+ crtc_state->uapi.encoder_mask = 0;
+ intel_crtc_free_hw_state(crtc_state);
+ memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
+
+ for_each_encoder_on_crtc(&i915->drm, &crtc->base, encoder)
+ encoder->base.crtc = NULL;
+
+ intel_fbc_disable(crtc);
+ intel_update_watermarks(i915);
+ intel_disable_shared_dpll(crtc_state);
+
+ intel_display_power_put_all_in_set(i915, &crtc->enabled_power_domains);
+
+ cdclk_state->min_cdclk[pipe] = 0;
+ cdclk_state->min_voltage_level[pipe] = 0;
+ cdclk_state->active_pipes &= ~BIT(pipe);
+
+ dbuf_state->active_pipes &= ~BIT(pipe);
+
+ bw_state->data_rate[pipe] = 0;
+ bw_state->num_active_planes[pipe] = 0;
+}
+
+static void intel_modeset_update_connector_atomic_state(struct drm_i915_private *i915)
+{
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *conn_state = connector->base.state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(connector->base.encoder);
+
+ if (conn_state->crtc)
+ drm_connector_put(&connector->base);
+
+ if (encoder) {
+ struct intel_crtc *crtc =
+ to_intel_crtc(encoder->base.crtc);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ conn_state->best_encoder = &encoder->base;
+ conn_state->crtc = &crtc->base;
+ conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
+
+ drm_connector_get(&connector->base);
+ } else {
+ conn_state->best_encoder = NULL;
+ conn_state->crtc = NULL;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
+static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
+{
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
+ return;
+
+ crtc_state->uapi.enable = crtc_state->hw.enable;
+ crtc_state->uapi.active = crtc_state->hw.active;
+ drm_WARN_ON(crtc_state->uapi.crtc->dev,
+ drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
+
+ crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
+ crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
+
+ drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
+ crtc_state->hw.degamma_lut);
+ drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
+ crtc_state->hw.gamma_lut);
+ drm_property_replace_blob(&crtc_state->uapi.ctm,
+ crtc_state->hw.ctm);
+}
+
+static void
+intel_sanitize_plane_mapping(struct drm_i915_private *i915)
+{
+ struct intel_crtc *crtc;
+
+ if (DISPLAY_VER(i915) >= 4)
+ return;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_crtc *plane_crtc;
+ enum pipe pipe;
+
+ if (!plane->get_hw_state(plane, &pipe))
+ continue;
+
+ if (pipe == crtc->pipe)
+ continue;
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
+ plane->base.base.id, plane->base.name);
+
+ plane_crtc = intel_crtc_for_pipe(i915, pipe);
+ intel_plane_disable_noatomic(plane_crtc, plane);
+ }
+}
+
+static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *encoder;
+
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder)
+ return true;
+
+ return false;
+}
+
+static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_connector *connector;
+
+ for_each_connector_on_encoder(dev, &encoder->base, connector)
+ return connector;
+
+ return NULL;
+}
+
+static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (!crtc_state->hw.active && !HAS_GMCH(i915))
+ return;
+
+ /*
+ * We start out with underrun reporting disabled to avoid races.
+ * For correct bookkeeping mark this on active crtcs.
+ *
+ * Also on gmch platforms we dont have any hardware bits to
+ * disable the underrun reporting. Which means we need to start
+ * out with underrun reporting disabled also on inactive pipes,
+ * since otherwise we'll complain about the garbage we read when
+ * e.g. coming up after runtime pm.
+ *
+ * No protection against concurrent access is required - at
+ * worst a fifo underrun happens which also sets this to false.
+ */
+ crtc->cpu_fifo_underrun_disabled = true;
+
+ /*
+ * We track the PCH trancoder underrun reporting state
+ * within the crtc. With crtc for pipe A housing the underrun
+ * reporting state for PCH transcoder A, crtc for pipe B housing
+ * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
+ * and marking underrun reporting as disabled for the non-existing
+ * PCH transcoders B and C would prevent enabling the south
+ * error interrupt (see cpt_can_enable_serr_int()).
+ */
+ if (intel_has_pch_trancoder(i915, crtc->pipe))
+ crtc->pch_fifo_underrun_disabled = true;
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active) {
+ struct intel_plane *plane;
+
+ /* Disable everything but the primary plane */
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (plane_state->uapi.visible &&
+ plane->base.type != DRM_PLANE_TYPE_PRIMARY)
+ intel_plane_disable_noatomic(crtc, plane);
+ }
+
+ /* Disable any background color/etc. set by the BIOS */
+ intel_color_commit_noarm(crtc_state);
+ intel_color_commit_arm(crtc_state);
+ }
+
+ /*
+ * Adjust the state of the output pipe according to whether we have
+ * active connectors/encoders.
+ */
+ if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
+ !intel_crtc_is_bigjoiner_slave(crtc_state))
+ intel_crtc_disable_noatomic(crtc, ctx);
+}
+
+static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ /*
+ * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
+ * the hardware when a high res displays plugged in. DPLL P
+ * divider is zero, and the pipe timings are bonkers. We'll
+ * try to disable everything in that case.
+ *
+ * FIXME would be nice to be able to sanitize this state
+ * without several WARNs, but for now let's take the easy
+ * road.
+ */
+ return IS_SANDYBRIDGE(i915) &&
+ crtc_state->hw.active &&
+ crtc_state->shared_dpll &&
+ crtc_state->port_clock == 0;
+}
+
+static void intel_sanitize_encoder(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_connector *connector;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc_state *crtc_state = crtc ?
+ to_intel_crtc_state(crtc->base.state) : NULL;
+
+ /*
+ * We need to check both for a crtc link (meaning that the encoder is
+ * active and trying to read from a pipe) and the pipe itself being
+ * active.
+ */
+ bool has_active_crtc = crtc_state &&
+ crtc_state->hw.active;
+
+ if (crtc_state && has_bogus_dpll_config(crtc_state)) {
+ drm_dbg_kms(&i915->drm,
+ "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
+ pipe_name(crtc->pipe));
+ has_active_crtc = false;
+ }
+
+ connector = intel_encoder_find_connector(encoder);
+ if (connector && !has_active_crtc) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+ encoder->base.base.id,
+ encoder->base.name);
+
+ /*
+ * Connector is active, but has no active pipe. This is fallout
+ * from our resume register restoring. Disable the encoder
+ * manually again.
+ */
+ if (crtc_state) {
+ struct drm_encoder *best_encoder;
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] manually disabled\n",
+ encoder->base.base.id,
+ encoder->base.name);
+
+ /* avoid oopsing in case the hooks consult best_encoder */
+ best_encoder = connector->base.state->best_encoder;
+ connector->base.state->best_encoder = &encoder->base;
+
+ /* FIXME NULL atomic state passed! */
+ if (encoder->disable)
+ encoder->disable(NULL, encoder, crtc_state,
+ connector->base.state);
+ if (encoder->post_disable)
+ encoder->post_disable(NULL, encoder, crtc_state,
+ connector->base.state);
+
+ connector->base.state->best_encoder = best_encoder;
+ }
+ encoder->base.crtc = NULL;
+
+ /*
+ * Inconsistent output/port/pipe state happens presumably due to
+ * a bug in one of the get_hw_state functions. Or someplace else
+ * in our code, like the register restore mess on resume. Clamp
+ * things to off as a safer default.
+ */
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ }
+
+ /* notify opregion of the sanitized encoder state */
+ intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
+
+ if (HAS_DDI(i915))
+ intel_ddi_sanitize_encoder_pll_mapping(encoder);
+}
+
+/* FIXME read out full plane state for all planes */
+static void readout_plane_state(struct drm_i915_private *i915)
+{
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+
+ for_each_intel_plane(&i915->drm, plane) {
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state;
+ enum pipe pipe = PIPE_A;
+ bool visible;
+
+ visible = plane->get_hw_state(plane, &pipe);
+
+ crtc = intel_crtc_for_pipe(i915, pipe);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ intel_set_plane_visible(crtc_state, plane_state, visible);
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
+ plane->base.base.id, plane->base.name,
+ str_enabled_disabled(visible), pipe_name(pipe));
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ intel_plane_fixup_bitmasks(crtc_state);
+ }
+}
+
+static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
+{
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(i915->display.cdclk.obj.state);
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ u8 active_pipes = 0;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
+ intel_crtc_free_hw_state(crtc_state);
+ intel_crtc_state_reset(crtc_state, crtc);
+
+ intel_crtc_get_pipe_config(crtc_state);
+
+ crtc_state->hw.enable = crtc_state->hw.active;
+
+ crtc->base.enabled = crtc_state->hw.enable;
+ crtc->active = crtc_state->hw.active;
+
+ if (crtc_state->hw.active)
+ active_pipes |= BIT(crtc->pipe);
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] hw state readout: %s\n",
+ crtc->base.base.id, crtc->base.name,
+ str_enabled_disabled(crtc_state->hw.active));
+ }
+
+ cdclk_state->active_pipes = active_pipes;
+ dbuf_state->active_pipes = active_pipes;
+
+ readout_plane_state(i915);
+
+ for_each_intel_encoder(&i915->drm, encoder) {
+ struct intel_crtc_state *crtc_state = NULL;
+
+ pipe = 0;
+
+ if (encoder->get_hw_state(encoder, &pipe)) {
+ crtc = intel_crtc_for_pipe(i915, pipe);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ encoder->base.crtc = &crtc->base;
+ intel_encoder_get_config(encoder, crtc_state);
+
+ /* read out to slave crtc as well for bigjoiner */
+ if (crtc_state->bigjoiner_pipes) {
+ struct intel_crtc *slave_crtc;
+
+ /* encoder should read be linked to bigjoiner master */
+ WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
+ intel_crtc_bigjoiner_slave_pipes(crtc_state)) {
+ struct intel_crtc_state *slave_crtc_state;
+
+ slave_crtc_state = to_intel_crtc_state(slave_crtc->base.state);
+ intel_encoder_get_config(encoder, slave_crtc_state);
+ }
+ }
+ } else {
+ encoder->base.crtc = NULL;
+ }
+
+ if (encoder->sync_state)
+ encoder->sync_state(encoder, crtc_state);
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
+ encoder->base.base.id, encoder->base.name,
+ str_enabled_disabled(encoder->base.crtc),
+ pipe_name(pipe));
+ }
+
+ intel_dpll_readout_hw_state(i915);
+
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ if (connector->get_hw_state(connector)) {
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ connector->base.dpms = DRM_MODE_DPMS_ON;
+
+ encoder = intel_attached_encoder(connector);
+ connector->base.encoder = &encoder->base;
+
+ crtc = to_intel_crtc(encoder->base.crtc);
+ crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
+
+ if (crtc_state && crtc_state->hw.active) {
+ /*
+ * This has to be done during hardware readout
+ * because anything calling .crtc_disable may
+ * rely on the connector_mask being accurate.
+ */
+ crtc_state->uapi.connector_mask |=
+ drm_connector_mask(&connector->base);
+ crtc_state->uapi.encoder_mask |=
+ drm_encoder_mask(&encoder->base);
+ }
+ } else {
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
+ }
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] hw state readout: %s\n",
+ connector->base.base.id, connector->base.name,
+ str_enabled_disabled(connector->base.encoder));
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(i915->display.bw.obj.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane;
+ int min_cdclk = 0;
+
+ if (crtc_state->hw.active) {
+ /*
+ * The initial mode needs to be set in order to keep
+ * the atomic core happy. It wants a valid mode if the
+ * crtc's enabled, so we do the above call.
+ *
+ * But we don't set all the derived state fully, hence
+ * set a flag to indicate that a full recalculation is
+ * needed on the next commit.
+ */
+ crtc_state->inherited = true;
+
+ intel_crtc_update_active_timings(crtc_state);
+
+ intel_crtc_copy_hw_to_uapi_state(crtc_state);
+ }
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ /*
+ * FIXME don't have the fb yet, so can't
+ * use intel_plane_data_rate() :(
+ */
+ if (plane_state->uapi.visible)
+ crtc_state->data_rate[plane->id] =
+ 4 * crtc_state->pixel_rate;
+ /*
+ * FIXME don't have the fb yet, so can't
+ * use plane->min_cdclk() :(
+ */
+ if (plane_state->uapi.visible && plane->min_cdclk) {
+ if (crtc_state->double_wide || DISPLAY_VER(i915) >= 10)
+ crtc_state->min_cdclk[plane->id] =
+ DIV_ROUND_UP(crtc_state->pixel_rate, 2);
+ else
+ crtc_state->min_cdclk[plane->id] =
+ crtc_state->pixel_rate;
+ }
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] min_cdclk %d kHz\n",
+ plane->base.base.id, plane->base.name,
+ crtc_state->min_cdclk[plane->id]);
+ }
+
+ if (crtc_state->hw.active) {
+ min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
+ if (drm_WARN_ON(&i915->drm, min_cdclk < 0))
+ min_cdclk = 0;
+ }
+
+ cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
+ cdclk_state->min_voltage_level[crtc->pipe] =
+ crtc_state->min_voltage_level;
+
+ intel_bw_crtc_update(bw_state, crtc_state);
+ }
+}
+
+static void
+get_encoder_power_domains(struct drm_i915_private *i915)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(&i915->drm, encoder) {
+ struct intel_crtc_state *crtc_state;
+
+ if (!encoder->get_power_domains)
+ continue;
+
+ /*
+ * MST-primary and inactive encoders don't have a crtc state
+ * and neither of these require any power domain references.
+ */
+ if (!encoder->base.crtc)
+ continue;
+
+ crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
+ encoder->get_power_domains(encoder, crtc_state);
+ }
+}
+
+static void intel_early_display_was(struct drm_i915_private *i915)
+{
+ /*
+ * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
+ * Also known as Wa_14010480278.
+ */
+ if (IS_DISPLAY_VER(i915, 10, 12))
+ intel_de_write(i915, GEN9_CLKGATE_DIS_0,
+ intel_de_read(i915, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
+
+ if (IS_HASWELL(i915)) {
+ /*
+ * WaRsPkgCStateDisplayPMReq:hsw
+ * System hang if this isn't done before disabling all planes!
+ */
+ intel_de_write(i915, CHICKEN_PAR1_1,
+ intel_de_read(i915, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+ }
+
+ if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) {
+ /* Display WA #1142:kbl,cfl,cml */
+ intel_de_rmw(i915, CHICKEN_PAR1_1,
+ KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
+ intel_de_rmw(i915, CHICKEN_MISC_2,
+ KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
+ KBL_ARB_FILL_SPARE_14);
+ }
+}
+
+void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
+ intel_early_display_was(i915);
+ intel_modeset_readout_hw_state(i915);
+
+ /* HW state is read out, now we need to sanitize this mess. */
+ get_encoder_power_domains(i915);
+
+ intel_pch_sanitize(i915);
+
+ /*
+ * intel_sanitize_plane_mapping() may need to do vblank
+ * waits, so we need vblank interrupts restored beforehand.
+ */
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ intel_sanitize_fifo_underrun_reporting(crtc_state);
+
+ drm_crtc_vblank_reset(&crtc->base);
+
+ if (crtc_state->hw.active)
+ intel_crtc_vblank_on(crtc_state);
+ }
+
+ intel_fbc_sanitize(i915);
+
+ intel_sanitize_plane_mapping(i915);
+
+ for_each_intel_encoder(&i915->drm, encoder)
+ intel_sanitize_encoder(encoder);
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ intel_sanitize_crtc(crtc, ctx);
+ intel_crtc_state_dump(crtc_state, NULL, "setup_hw_state");
+ }
+
+ intel_modeset_update_connector_atomic_state(i915);
+
+ intel_dpll_sanitize_state(i915);
+
+ if (IS_G4X(i915)) {
+ g4x_wm_get_hw_state(i915);
+ g4x_wm_sanitize(i915);
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ vlv_wm_get_hw_state(i915);
+ vlv_wm_sanitize(i915);
+ } else if (DISPLAY_VER(i915) >= 9) {
+ skl_wm_get_hw_state(i915);
+ skl_wm_sanitize(i915);
+ } else if (HAS_PCH_SPLIT(i915)) {
+ ilk_wm_get_hw_state(i915);
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_power_domain_mask put_domains;
+
+ intel_modeset_get_crtc_power_domains(crtc_state, &put_domains);
+ if (drm_WARN_ON(&i915->drm, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
+ intel_modeset_put_crtc_power_domains(crtc, &put_domains);
+ }
+
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+
+ intel_power_domains_sanitize_state(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.h b/drivers/gpu/drm/i915/display/intel_modeset_setup.h
new file mode 100644
index 000000000..3beff67b3
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_MODESET_SETUP_H__
+#define __INTEL_MODESET_SETUP_H__
+
+struct drm_i915_private;
+struct drm_modeset_acquire_ctx;
+
+void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
+ struct drm_modeset_acquire_ctx *ctx);
+
+#endif /* __INTEL_MODESET_SETUP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
new file mode 100644
index 000000000..0fdcf2e6d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ *
+ * High level crtc/connector/encoder modeset state verification.
+ */
+
+#include <drm/drm_atomic_state_helper.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_crtc.h"
+#include "intel_crtc_state_dump.h"
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_fdi.h"
+#include "intel_modeset_verify.h"
+#include "intel_snps_phy.h"
+#include "skl_watermark.h"
+
+/*
+ * Cross check the actual hw state with our own modeset state tracking (and its
+ * internal consistency).
+ */
+static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.base.id, connector->base.name);
+
+ if (connector->get_hw_state(connector)) {
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+
+ I915_STATE_WARN(!crtc_state,
+ "connector enabled without attached crtc\n");
+
+ if (!crtc_state)
+ return;
+
+ I915_STATE_WARN(!crtc_state->hw.active,
+ "connector is active, but attached crtc isn't\n");
+
+ if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
+ return;
+
+ I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
+ "atomic encoder doesn't match attached encoder\n");
+
+ I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
+ "attached encoder crtc differs from connector crtc\n");
+ } else {
+ I915_STATE_WARN(crtc_state && crtc_state->hw.active,
+ "attached crtc is active, but connector isn't\n");
+ I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
+ "best encoder set without crtc!\n");
+ }
+}
+
+static void
+verify_connector_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
+ struct drm_encoder *encoder = connector->encoder;
+ struct intel_crtc_state *crtc_state = NULL;
+
+ if (new_conn_state->crtc != &crtc->base)
+ continue;
+
+ if (crtc)
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ intel_connector_verify_state(crtc_state, new_conn_state);
+
+ I915_STATE_WARN(new_conn_state->best_encoder != encoder,
+ "connector's atomic encoder doesn't match legacy encoder\n");
+ }
+}
+
+static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *pipe_config)
+{
+ if (pipe_config->has_pch_encoder) {
+ int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
+ &pipe_config->fdi_m_n);
+ int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
+
+ /*
+ * FDI already provided one idea for the dotclock.
+ * Yell if the encoder disagrees. Allow for slight
+ * rounding differences.
+ */
+ drm_WARN(&dev_priv->drm, abs(fdi_dotclock - dotclock) > 1,
+ "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+ fdi_dotclock, dotclock);
+ }
+}
+
+static void
+verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
+{
+ struct intel_encoder *encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ int i;
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ bool enabled = false, found = false;
+ enum pipe pipe;
+
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
+ encoder->base.base.id,
+ encoder->base.name);
+
+ for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
+ new_conn_state, i) {
+ if (old_conn_state->best_encoder == &encoder->base)
+ found = true;
+
+ if (new_conn_state->best_encoder != &encoder->base)
+ continue;
+
+ found = true;
+ enabled = true;
+
+ I915_STATE_WARN(new_conn_state->crtc !=
+ encoder->base.crtc,
+ "connector's crtc doesn't match encoder crtc\n");
+ }
+
+ if (!found)
+ continue;
+
+ I915_STATE_WARN(!!encoder->base.crtc != enabled,
+ "encoder's enabled state mismatch (expected %i, found %i)\n",
+ !!encoder->base.crtc, enabled);
+
+ if (!encoder->base.crtc) {
+ bool active;
+
+ active = encoder->get_hw_state(encoder, &pipe);
+ I915_STATE_WARN(active,
+ "encoder detached but still enabled on pipe %c.\n",
+ pipe_name(pipe));
+ }
+ }
+}
+
+static void
+verify_crtc_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_encoder *encoder;
+ struct intel_crtc_state *pipe_config = old_crtc_state;
+ struct drm_atomic_state *state = old_crtc_state->uapi.state;
+ struct intel_crtc *master_crtc;
+
+ __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
+ intel_crtc_free_hw_state(old_crtc_state);
+ intel_crtc_state_reset(old_crtc_state, crtc);
+ old_crtc_state->uapi.state = state;
+
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
+ crtc->base.name);
+
+ pipe_config->hw.enable = new_crtc_state->hw.enable;
+
+ intel_crtc_get_pipe_config(pipe_config);
+
+ /* we keep both pipes enabled on 830 */
+ if (IS_I830(dev_priv) && pipe_config->hw.active)
+ pipe_config->hw.active = new_crtc_state->hw.active;
+
+ I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
+ "crtc active state doesn't match with hw state (expected %i, found %i)\n",
+ new_crtc_state->hw.active, pipe_config->hw.active);
+
+ I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
+ "transitional active state does not match atomic hw state (expected %i, found %i)\n",
+ new_crtc_state->hw.active, crtc->active);
+
+ master_crtc = intel_master_crtc(new_crtc_state);
+
+ for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
+ enum pipe pipe;
+ bool active;
+
+ active = encoder->get_hw_state(encoder, &pipe);
+ I915_STATE_WARN(active != new_crtc_state->hw.active,
+ "[ENCODER:%i] active %i with crtc active %i\n",
+ encoder->base.base.id, active,
+ new_crtc_state->hw.active);
+
+ I915_STATE_WARN(active && master_crtc->pipe != pipe,
+ "Encoder connected to wrong pipe %c\n",
+ pipe_name(pipe));
+
+ if (active)
+ intel_encoder_get_config(encoder, pipe_config);
+ }
+
+ if (!new_crtc_state->hw.active)
+ return;
+
+ intel_pipe_config_sanity_check(dev_priv, pipe_config);
+
+ if (!intel_pipe_config_compare(new_crtc_state,
+ pipe_config, false)) {
+ I915_STATE_WARN(1, "pipe state doesn't match!\n");
+ intel_crtc_state_dump(pipe_config, NULL, "hw state");
+ intel_crtc_state_dump(new_crtc_state, NULL, "sw state");
+ }
+}
+
+void intel_modeset_verify_crtc(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
+ return;
+
+ intel_wm_state_verify(crtc, new_crtc_state);
+ verify_connector_state(state, crtc);
+ verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
+ intel_shared_dpll_state_verify(crtc, old_crtc_state, new_crtc_state);
+ intel_mpllb_state_verify(state, new_crtc_state);
+}
+
+void intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
+ struct intel_atomic_state *state)
+{
+ verify_encoder_state(dev_priv, state);
+ verify_connector_state(state, NULL);
+ intel_shared_dpll_verify_disabled(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.h b/drivers/gpu/drm/i915/display/intel_modeset_verify.h
new file mode 100644
index 000000000..2d6fbe4f7
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_MODESET_VERIFY_H__
+#define __INTEL_MODESET_VERIFY_H__
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+
+void intel_modeset_verify_crtc(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state);
+void intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
+ struct intel_atomic_state *state);
+
+#endif /* __INTEL_MODESET_VERIFY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
new file mode 100644
index 000000000..caa07ef34
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -0,0 +1,1250 @@
+/*
+ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
+ * Copyright 2008 Red Hat <mjg@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/firmware.h>
+#include <acpi/video.h>
+
+#include <drm/drm_edid.h>
+
+#include "i915_drv.h"
+#include "intel_acpi.h"
+#include "intel_backlight.h"
+#include "intel_display_types.h"
+#include "intel_opregion.h"
+#include "intel_pci_config.h"
+
+#define OPREGION_HEADER_OFFSET 0
+#define OPREGION_ACPI_OFFSET 0x100
+#define ACPI_CLID 0x01ac /* current lid state indicator */
+#define ACPI_CDCK 0x01b0 /* current docking state indicator */
+#define OPREGION_SWSCI_OFFSET 0x200
+#define OPREGION_ASLE_OFFSET 0x300
+#define OPREGION_VBT_OFFSET 0x400
+#define OPREGION_ASLE_EXT_OFFSET 0x1C00
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_ACPI BIT(0) /* Mailbox #1 */
+#define MBOX_SWSCI BIT(1) /* Mailbox #2 (obsolete from v2.x) */
+#define MBOX_ASLE BIT(2) /* Mailbox #3 */
+#define MBOX_ASLE_EXT BIT(4) /* Mailbox #5 */
+#define MBOX_BACKLIGHT BIT(5) /* Mailbox #2 (valid from v3.x) */
+
+#define PCON_HEADLESS_SKU BIT(13)
+
+struct opregion_header {
+ u8 signature[16];
+ u32 size;
+ struct {
+ u8 rsvd;
+ u8 revision;
+ u8 minor;
+ u8 major;
+ } __packed over;
+ u8 bios_ver[32];
+ u8 vbios_ver[16];
+ u8 driver_ver[16];
+ u32 mboxes;
+ u32 driver_model;
+ u32 pcon;
+ u8 dver[32];
+ u8 rsvd[124];
+} __packed;
+
+/* OpRegion mailbox #1: public ACPI methods */
+struct opregion_acpi {
+ u32 drdy; /* driver readiness */
+ u32 csts; /* notification status */
+ u32 cevt; /* current event */
+ u8 rsvd1[20];
+ u32 didl[8]; /* supported display devices ID list */
+ u32 cpdl[8]; /* currently presented display list */
+ u32 cadl[8]; /* currently active display list */
+ u32 nadl[8]; /* next active devices list */
+ u32 aslp; /* ASL sleep time-out */
+ u32 tidx; /* toggle table index */
+ u32 chpd; /* current hotplug enable indicator */
+ u32 clid; /* current lid state*/
+ u32 cdck; /* current docking state */
+ u32 sxsw; /* Sx state resume */
+ u32 evts; /* ASL supported events */
+ u32 cnot; /* current OS notification */
+ u32 nrdy; /* driver status */
+ u32 did2[7]; /* extended supported display devices ID list */
+ u32 cpd2[7]; /* extended attached display devices list */
+ u8 rsvd2[4];
+} __packed;
+
+/* OpRegion mailbox #2: SWSCI */
+struct opregion_swsci {
+ u32 scic; /* SWSCI command|status|data */
+ u32 parm; /* command parameters */
+ u32 dslp; /* driver sleep time-out */
+ u8 rsvd[244];
+} __packed;
+
+/* OpRegion mailbox #3: ASLE */
+struct opregion_asle {
+ u32 ardy; /* driver readiness */
+ u32 aslc; /* ASLE interrupt command */
+ u32 tche; /* technology enabled indicator */
+ u32 alsi; /* current ALS illuminance reading */
+ u32 bclp; /* backlight brightness to set */
+ u32 pfit; /* panel fitting state */
+ u32 cblv; /* current brightness level */
+ u16 bclm[20]; /* backlight level duty cycle mapping table */
+ u32 cpfm; /* current panel fitting mode */
+ u32 epfm; /* enabled panel fitting modes */
+ u8 plut[74]; /* panel LUT and identifier */
+ u32 pfmb; /* PWM freq and min brightness */
+ u32 cddv; /* color correction default values */
+ u32 pcft; /* power conservation features */
+ u32 srot; /* supported rotation angles */
+ u32 iuer; /* IUER events */
+ u64 fdss;
+ u32 fdsp;
+ u32 stat;
+ u64 rvda; /* Physical (2.0) or relative from opregion (2.1+)
+ * address of raw VBT data. */
+ u32 rvds; /* Size of raw vbt data */
+ u8 rsvd[58];
+} __packed;
+
+/* OpRegion mailbox #5: ASLE ext */
+struct opregion_asle_ext {
+ u32 phed; /* Panel Header */
+ u8 bddc[256]; /* Panel EDID */
+ u8 rsvd[764];
+} __packed;
+
+/* Driver readiness indicator */
+#define ASLE_ARDY_READY (1 << 0)
+#define ASLE_ARDY_NOT_READY (0 << 0)
+
+/* ASLE Interrupt Command (ASLC) bits */
+#define ASLC_SET_ALS_ILLUM (1 << 0)
+#define ASLC_SET_BACKLIGHT (1 << 1)
+#define ASLC_SET_PFIT (1 << 2)
+#define ASLC_SET_PWM_FREQ (1 << 3)
+#define ASLC_SUPPORTED_ROTATION_ANGLES (1 << 4)
+#define ASLC_BUTTON_ARRAY (1 << 5)
+#define ASLC_CONVERTIBLE_INDICATOR (1 << 6)
+#define ASLC_DOCKING_INDICATOR (1 << 7)
+#define ASLC_ISCT_STATE_CHANGE (1 << 8)
+#define ASLC_REQ_MSK 0x1ff
+/* response bits */
+#define ASLC_ALS_ILLUM_FAILED (1 << 10)
+#define ASLC_BACKLIGHT_FAILED (1 << 12)
+#define ASLC_PFIT_FAILED (1 << 14)
+#define ASLC_PWM_FREQ_FAILED (1 << 16)
+#define ASLC_ROTATION_ANGLES_FAILED (1 << 18)
+#define ASLC_BUTTON_ARRAY_FAILED (1 << 20)
+#define ASLC_CONVERTIBLE_FAILED (1 << 22)
+#define ASLC_DOCKING_FAILED (1 << 24)
+#define ASLC_ISCT_STATE_FAILED (1 << 26)
+
+/* Technology enabled indicator */
+#define ASLE_TCHE_ALS_EN (1 << 0)
+#define ASLE_TCHE_BLC_EN (1 << 1)
+#define ASLE_TCHE_PFIT_EN (1 << 2)
+#define ASLE_TCHE_PFMB_EN (1 << 3)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID (1<<31)
+#define ASLE_BCLP_MSK (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* PWM frequency and minimum brightness */
+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+#define ASLE_PFMB_PWM_VALID (1<<31)
+
+#define ASLE_CBLV_VALID (1<<31)
+
+/* IUER */
+#define ASLE_IUER_DOCKING (1 << 7)
+#define ASLE_IUER_CONVERTIBLE (1 << 6)
+#define ASLE_IUER_ROTATION_LOCK_BTN (1 << 4)
+#define ASLE_IUER_VOLUME_DOWN_BTN (1 << 3)
+#define ASLE_IUER_VOLUME_UP_BTN (1 << 2)
+#define ASLE_IUER_WINDOWS_BTN (1 << 1)
+#define ASLE_IUER_POWER_BTN (1 << 0)
+
+#define ASLE_PHED_EDID_VALID_MASK 0x3
+
+/* Software System Control Interrupt (SWSCI) */
+#define SWSCI_SCIC_INDICATOR (1 << 0)
+#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1
+#define SWSCI_SCIC_MAIN_FUNCTION_MASK (0xf << 1)
+#define SWSCI_SCIC_SUB_FUNCTION_SHIFT 8
+#define SWSCI_SCIC_SUB_FUNCTION_MASK (0xff << 8)
+#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT 8
+#define SWSCI_SCIC_EXIT_PARAMETER_MASK (0xff << 8)
+#define SWSCI_SCIC_EXIT_STATUS_SHIFT 5
+#define SWSCI_SCIC_EXIT_STATUS_MASK (7 << 5)
+#define SWSCI_SCIC_EXIT_STATUS_SUCCESS 1
+
+#define SWSCI_FUNCTION_CODE(main, sub) \
+ ((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
+ (sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
+
+/* SWSCI: Get BIOS Data (GBDA) */
+#define SWSCI_GBDA 4
+#define SWSCI_GBDA_SUPPORTED_CALLS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
+#define SWSCI_GBDA_REQUESTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
+#define SWSCI_GBDA_BOOT_DISPLAY_PREF SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
+#define SWSCI_GBDA_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
+#define SWSCI_GBDA_TV_STANDARD SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
+#define SWSCI_GBDA_INTERNAL_GRAPHICS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
+#define SWSCI_GBDA_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
+
+/* SWSCI: System BIOS Callbacks (SBCB) */
+#define SWSCI_SBCB 6
+#define SWSCI_SBCB_SUPPORTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
+#define SWSCI_SBCB_INIT_COMPLETION SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
+#define SWSCI_SBCB_PRE_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
+#define SWSCI_SBCB_POST_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
+#define SWSCI_SBCB_DISPLAY_SWITCH SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
+#define SWSCI_SBCB_SET_TV_FORMAT SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
+#define SWSCI_SBCB_ADAPTER_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
+#define SWSCI_SBCB_DISPLAY_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
+#define SWSCI_SBCB_SET_BOOT_DISPLAY SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
+#define SWSCI_SBCB_SET_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
+#define SWSCI_SBCB_SET_INTERNAL_GFX SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
+#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
+#define SWSCI_SBCB_SUSPEND_RESUME SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
+#define SWSCI_SBCB_SET_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
+#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
+#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
+
+#define MAX_DSLP 1500
+
+static int check_swsci_function(struct drm_i915_private *i915, u32 function)
+{
+ struct opregion_swsci *swsci = i915->display.opregion.swsci;
+ u32 main_function, sub_function;
+
+ if (!swsci)
+ return -ENODEV;
+
+ main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
+ SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
+ sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
+ SWSCI_SCIC_SUB_FUNCTION_SHIFT;
+
+ /* Check if we can call the function. See swsci_setup for details. */
+ if (main_function == SWSCI_SBCB) {
+ if ((i915->display.opregion.swsci_sbcb_sub_functions &
+ (1 << sub_function)) == 0)
+ return -EINVAL;
+ } else if (main_function == SWSCI_GBDA) {
+ if ((i915->display.opregion.swsci_gbda_sub_functions &
+ (1 << sub_function)) == 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int swsci(struct drm_i915_private *dev_priv,
+ u32 function, u32 parm, u32 *parm_out)
+{
+ struct opregion_swsci *swsci = dev_priv->display.opregion.swsci;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ u32 scic, dslp;
+ u16 swsci_val;
+ int ret;
+
+ ret = check_swsci_function(dev_priv, function);
+ if (ret)
+ return ret;
+
+ /* Driver sleep timeout in ms. */
+ dslp = swsci->dslp;
+ if (!dslp) {
+ /* The spec says 2ms should be the default, but it's too small
+ * for some machines. */
+ dslp = 50;
+ } else if (dslp > MAX_DSLP) {
+ /* Hey bios, trust must be earned. */
+ DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
+ "using %u ms instead\n", dslp, MAX_DSLP);
+ dslp = MAX_DSLP;
+ }
+
+ /* The spec tells us to do this, but we are the only user... */
+ scic = swsci->scic;
+ if (scic & SWSCI_SCIC_INDICATOR) {
+ drm_dbg(&dev_priv->drm, "SWSCI request already in progress\n");
+ return -EBUSY;
+ }
+
+ scic = function | SWSCI_SCIC_INDICATOR;
+
+ swsci->parm = parm;
+ swsci->scic = scic;
+
+ /* Ensure SCI event is selected and event trigger is cleared. */
+ pci_read_config_word(pdev, SWSCI, &swsci_val);
+ if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
+ swsci_val |= SWSCI_SCISEL;
+ swsci_val &= ~SWSCI_GSSCIE;
+ pci_write_config_word(pdev, SWSCI, swsci_val);
+ }
+
+ /* Use event trigger to tell bios to check the mail. */
+ swsci_val |= SWSCI_GSSCIE;
+ pci_write_config_word(pdev, SWSCI, swsci_val);
+
+ /* Poll for the result. */
+#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
+ if (wait_for(C, dslp)) {
+ drm_dbg(&dev_priv->drm, "SWSCI request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
+ SWSCI_SCIC_EXIT_STATUS_SHIFT;
+
+ /* Note: scic == 0 is an error! */
+ if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
+ drm_dbg(&dev_priv->drm, "SWSCI request error %u\n", scic);
+ return -EIO;
+ }
+
+ if (parm_out)
+ *parm_out = swsci->parm;
+
+ return 0;
+
+#undef C
+}
+
+#define DISPLAY_TYPE_CRT 0
+#define DISPLAY_TYPE_TV 1
+#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL 2
+#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL 3
+
+int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+ bool enable)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+ u32 parm = 0;
+ u32 type = 0;
+ u32 port;
+ int ret;
+
+ /* don't care about old stuff for now */
+ if (!HAS_DDI(dev_priv))
+ return 0;
+
+ /* Avoid port out of bounds checks if SWSCI isn't there. */
+ ret = check_swsci_function(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE);
+ if (ret)
+ return ret;
+
+ if (intel_encoder->type == INTEL_OUTPUT_DSI)
+ port = 0;
+ else
+ port = intel_encoder->port;
+
+ if (port == PORT_E) {
+ port = 0;
+ } else {
+ parm |= 1 << port;
+ port++;
+ }
+
+ /*
+ * The port numbering and mapping here is bizarre. The now-obsolete
+ * swsci spec supports ports numbered [0..4]. Port E is handled as a
+ * special case, but port F and beyond are not. The functionality is
+ * supposed to be obsolete for new platforms. Just bail out if the port
+ * number is out of bounds after mapping.
+ */
+ if (port > 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
+ intel_encoder->base.base.id, intel_encoder->base.name,
+ port_name(intel_encoder->port), port);
+ return -EINVAL;
+ }
+
+ if (!enable)
+ parm |= 4 << 8;
+
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ type = DISPLAY_TYPE_CRT;
+ break;
+ case INTEL_OUTPUT_DDI:
+ case INTEL_OUTPUT_DP:
+ case INTEL_OUTPUT_HDMI:
+ case INTEL_OUTPUT_DP_MST:
+ type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
+ break;
+ case INTEL_OUTPUT_EDP:
+ case INTEL_OUTPUT_DSI:
+ type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
+ break;
+ default:
+ drm_WARN_ONCE(&dev_priv->drm, 1,
+ "unsupported intel_encoder type %d\n",
+ intel_encoder->type);
+ return -EINVAL;
+ }
+
+ parm |= type << (16 + port * 3);
+
+ return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+}
+
+static const struct {
+ pci_power_t pci_power_state;
+ u32 parm;
+} power_state_map[] = {
+ { PCI_D0, 0x00 },
+ { PCI_D1, 0x01 },
+ { PCI_D2, 0x02 },
+ { PCI_D3hot, 0x04 },
+ { PCI_D3cold, 0x04 },
+};
+
+int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+ pci_power_t state)
+{
+ int i;
+
+ if (!HAS_DDI(dev_priv))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
+ if (state == power_state_map[i].pci_power_state)
+ return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE,
+ power_state_map[i].parm, NULL);
+ }
+
+ return -EINVAL;
+}
+
+static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
+{
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct opregion_asle *asle = dev_priv->display.opregion.asle;
+ struct drm_device *dev = &dev_priv->drm;
+
+ drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp);
+
+ if (acpi_video_get_backlight_type() == acpi_backlight_native) {
+ drm_dbg_kms(&dev_priv->drm,
+ "opregion backlight request ignored\n");
+ return 0;
+ }
+
+ if (!(bclp & ASLE_BCLP_VALID))
+ return ASLC_BACKLIGHT_FAILED;
+
+ bclp &= ASLE_BCLP_MSK;
+ if (bclp > 255)
+ return ASLC_BACKLIGHT_FAILED;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ /*
+ * Update backlight on all connectors that support backlight (usually
+ * only one).
+ */
+ drm_dbg_kms(&dev_priv->drm, "updating opregion backlight %d/255\n",
+ bclp);
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter)
+ intel_backlight_set_acpi(connector->base.state, bclp, 255);
+ drm_connector_list_iter_end(&conn_iter);
+ asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+
+ return 0;
+}
+
+static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
+{
+ /* alsi is the current ALS reading in lux. 0 indicates below sensor
+ range, 0xffff indicates above sensor range. 1-0xfffe are valid */
+ drm_dbg(&dev_priv->drm, "Illum is not supported\n");
+ return ASLC_ALS_ILLUM_FAILED;
+}
+
+static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
+{
+ drm_dbg(&dev_priv->drm, "PWM freq is not supported\n");
+ return ASLC_PWM_FREQ_FAILED;
+}
+
+static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
+{
+ /* Panel fitting is currently controlled by the X code, so this is a
+ noop until modesetting support works fully */
+ drm_dbg(&dev_priv->drm, "Pfit is not supported\n");
+ return ASLC_PFIT_FAILED;
+}
+
+static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
+{
+ drm_dbg(&dev_priv->drm, "SROT is not supported\n");
+ return ASLC_ROTATION_ANGLES_FAILED;
+}
+
+static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
+{
+ if (!iuer)
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (nothing)\n");
+ if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (rotation lock)\n");
+ if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (volume down)\n");
+ if (iuer & ASLE_IUER_VOLUME_UP_BTN)
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (volume up)\n");
+ if (iuer & ASLE_IUER_WINDOWS_BTN)
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (windows)\n");
+ if (iuer & ASLE_IUER_POWER_BTN)
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (power)\n");
+
+ return ASLC_BUTTON_ARRAY_FAILED;
+}
+
+static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
+{
+ if (iuer & ASLE_IUER_CONVERTIBLE)
+ drm_dbg(&dev_priv->drm,
+ "Convertible is not supported (clamshell)\n");
+ else
+ drm_dbg(&dev_priv->drm,
+ "Convertible is not supported (slate)\n");
+
+ return ASLC_CONVERTIBLE_FAILED;
+}
+
+static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
+{
+ if (iuer & ASLE_IUER_DOCKING)
+ drm_dbg(&dev_priv->drm, "Docking is not supported (docked)\n");
+ else
+ drm_dbg(&dev_priv->drm,
+ "Docking is not supported (undocked)\n");
+
+ return ASLC_DOCKING_FAILED;
+}
+
+static u32 asle_isct_state(struct drm_i915_private *dev_priv)
+{
+ drm_dbg(&dev_priv->drm, "ISCT is not supported\n");
+ return ASLC_ISCT_STATE_FAILED;
+}
+
+static void asle_work(struct work_struct *work)
+{
+ struct intel_opregion *opregion =
+ container_of(work, struct intel_opregion, asle_work);
+ struct drm_i915_private *dev_priv =
+ container_of(opregion, struct drm_i915_private, display.opregion);
+ struct opregion_asle *asle = dev_priv->display.opregion.asle;
+ u32 aslc_stat = 0;
+ u32 aslc_req;
+
+ if (!asle)
+ return;
+
+ aslc_req = asle->aslc;
+
+ if (!(aslc_req & ASLC_REQ_MSK)) {
+ drm_dbg(&dev_priv->drm,
+ "No request on ASLC interrupt 0x%08x\n", aslc_req);
+ return;
+ }
+
+ if (aslc_req & ASLC_SET_ALS_ILLUM)
+ aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi);
+
+ if (aslc_req & ASLC_SET_BACKLIGHT)
+ aslc_stat |= asle_set_backlight(dev_priv, asle->bclp);
+
+ if (aslc_req & ASLC_SET_PFIT)
+ aslc_stat |= asle_set_pfit(dev_priv, asle->pfit);
+
+ if (aslc_req & ASLC_SET_PWM_FREQ)
+ aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb);
+
+ if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
+ aslc_stat |= asle_set_supported_rotation_angles(dev_priv,
+ asle->srot);
+
+ if (aslc_req & ASLC_BUTTON_ARRAY)
+ aslc_stat |= asle_set_button_array(dev_priv, asle->iuer);
+
+ if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
+ aslc_stat |= asle_set_convertible(dev_priv, asle->iuer);
+
+ if (aslc_req & ASLC_DOCKING_INDICATOR)
+ aslc_stat |= asle_set_docking(dev_priv, asle->iuer);
+
+ if (aslc_req & ASLC_ISCT_STATE_CHANGE)
+ aslc_stat |= asle_isct_state(dev_priv);
+
+ asle->aslc = aslc_stat;
+}
+
+void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->display.opregion.asle)
+ schedule_work(&dev_priv->display.opregion.asle_work);
+}
+
+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
+#define ACPI_EV_LID (1<<1)
+#define ACPI_EV_DOCK (1<<2)
+
+/*
+ * The only video events relevant to opregion are 0x80. These indicate either a
+ * docking event, lid switch or display switch request. In Linux, these are
+ * handled by the dock, button and video drivers.
+ */
+static int intel_opregion_video_event(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct intel_opregion *opregion = container_of(nb, struct intel_opregion,
+ acpi_notifier);
+ struct acpi_bus_event *event = data;
+ struct opregion_acpi *acpi;
+ int ret = NOTIFY_OK;
+
+ if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+ return NOTIFY_DONE;
+
+ acpi = opregion->acpi;
+
+ if (event->type == 0x80 && ((acpi->cevt & 1) == 0))
+ ret = NOTIFY_BAD;
+
+ acpi->csts = 0;
+
+ return ret;
+}
+
+/*
+ * Initialise the DIDL field in opregion. This passes a list of devices to
+ * the firmware. Values are defined by section B.4.2 of the ACPI specification
+ * (version 3)
+ */
+
+static void set_did(struct intel_opregion *opregion, int i, u32 val)
+{
+ if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+ opregion->acpi->didl[i] = val;
+ } else {
+ i -= ARRAY_SIZE(opregion->acpi->didl);
+
+ if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+ return;
+
+ opregion->acpi->did2[i] = val;
+ }
+}
+
+static void intel_didl_outputs(struct drm_i915_private *dev_priv)
+{
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ int i = 0, max_outputs;
+
+ /*
+ * In theory, did2, the extended didl, gets added at opregion version
+ * 3.0. In practice, however, we're supposed to set it for earlier
+ * versions as well, since a BIOS that doesn't understand did2 should
+ * not look at it anyway. Use a variable so we can tweak this if a need
+ * arises later.
+ */
+ max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
+ ARRAY_SIZE(opregion->acpi->did2);
+
+ intel_acpi_device_id_update(dev_priv);
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ if (i < max_outputs)
+ set_did(opregion, i, connector->acpi_device_id);
+ i++;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ drm_dbg_kms(&dev_priv->drm, "%d outputs detected\n", i);
+
+ if (i > max_outputs)
+ drm_err(&dev_priv->drm,
+ "More than %d outputs in connector list\n",
+ max_outputs);
+
+ /* If fewer than max outputs, the list must be null terminated */
+ if (i < max_outputs)
+ set_did(opregion, i, 0);
+}
+
+static void intel_setup_cadls(struct drm_i915_private *dev_priv)
+{
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ int i = 0;
+
+ /*
+ * Initialize the CADL field from the connector device ids. This is
+ * essentially the same as copying from the DIDL. Technically, this is
+ * not always correct as display outputs may exist, but not active. This
+ * initialization is necessary for some Clevo laptops that check this
+ * field before processing the brightness and display switching hotkeys.
+ *
+ * Note that internal panels should be at the front of the connector
+ * list already, ensuring they're not left out.
+ */
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ if (i >= ARRAY_SIZE(opregion->acpi->cadl))
+ break;
+ opregion->acpi->cadl[i++] = connector->acpi_device_id;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ /* If fewer than 8 active devices, the list must be null terminated */
+ if (i < ARRAY_SIZE(opregion->acpi->cadl))
+ opregion->acpi->cadl[i] = 0;
+}
+
+static void swsci_setup(struct drm_i915_private *dev_priv)
+{
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
+ bool requested_callbacks = false;
+ u32 tmp;
+
+ /* Sub-function code 0 is okay, let's allow them. */
+ opregion->swsci_gbda_sub_functions = 1;
+ opregion->swsci_sbcb_sub_functions = 1;
+
+ /* We use GBDA to ask for supported GBDA calls. */
+ if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+ /* make the bits match the sub-function codes */
+ tmp <<= 1;
+ opregion->swsci_gbda_sub_functions |= tmp;
+ }
+
+ /*
+ * We also use GBDA to ask for _requested_ SBCB callbacks. The driver
+ * must not call interfaces that are not specifically requested by the
+ * bios.
+ */
+ if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+ /* here, the bits already match sub-function codes */
+ opregion->swsci_sbcb_sub_functions |= tmp;
+ requested_callbacks = true;
+ }
+
+ /*
+ * But we use SBCB to ask for _supported_ SBCB calls. This does not mean
+ * the callback is _requested_. But we still can't call interfaces that
+ * are not requested.
+ */
+ if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+ /* make the bits match the sub-function codes */
+ u32 low = tmp & 0x7ff;
+ u32 high = tmp & ~0xfff; /* bit 11 is reserved */
+ tmp = (high << 4) | (low << 1) | 1;
+
+ /* best guess what to do with supported wrt requested */
+ if (requested_callbacks) {
+ u32 req = opregion->swsci_sbcb_sub_functions;
+ if ((req & tmp) != req)
+ drm_dbg(&dev_priv->drm,
+ "SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n",
+ req, tmp);
+ /* XXX: for now, trust the requested callbacks */
+ /* opregion->swsci_sbcb_sub_functions &= tmp; */
+ } else {
+ opregion->swsci_sbcb_sub_functions |= tmp;
+ }
+ }
+
+ drm_dbg(&dev_priv->drm,
+ "SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
+ opregion->swsci_gbda_sub_functions,
+ opregion->swsci_sbcb_sub_functions);
+}
+
+static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Falling back to manually reading VBT from "
+ "VBIOS ROM for %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_no_opregion_vbt[] = {
+ {
+ .callback = intel_no_opregion_vbt_callback,
+ .ident = "ThinkCentre A57",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
+ },
+ },
+ { }
+};
+
+static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
+{
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
+ const struct firmware *fw = NULL;
+ const char *name = dev_priv->params.vbt_firmware;
+ int ret;
+
+ if (!name || !*name)
+ return -ENOENT;
+
+ ret = request_firmware(&fw, name, dev_priv->drm.dev);
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "Requesting VBT firmware \"%s\" failed (%d)\n",
+ name, ret);
+ return ret;
+ }
+
+ if (intel_bios_is_valid_vbt(fw->data, fw->size)) {
+ opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ if (opregion->vbt_firmware) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Found valid VBT firmware \"%s\"\n", name);
+ opregion->vbt = opregion->vbt_firmware;
+ opregion->vbt_size = fw->size;
+ ret = 0;
+ } else {
+ ret = -ENOMEM;
+ }
+ } else {
+ drm_dbg_kms(&dev_priv->drm, "Invalid VBT firmware \"%s\"\n",
+ name);
+ ret = -EINVAL;
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+int intel_opregion_setup(struct drm_i915_private *dev_priv)
+{
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ u32 asls, mboxes;
+ char buf[sizeof(OPREGION_SIGNATURE)];
+ int err = 0;
+ void *base;
+ const void *vbt;
+ u32 vbt_size;
+
+ BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
+ BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
+ BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100);
+ BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
+ BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
+
+ pci_read_config_dword(pdev, ASLS, &asls);
+ drm_dbg(&dev_priv->drm, "graphic opregion physical addr: 0x%x\n",
+ asls);
+ if (asls == 0) {
+ drm_dbg(&dev_priv->drm, "ACPI OpRegion not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ INIT_WORK(&opregion->asle_work, asle_work);
+
+ base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB);
+ if (!base)
+ return -ENOMEM;
+
+ memcpy(buf, base, sizeof(buf));
+
+ if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
+ drm_dbg(&dev_priv->drm, "opregion signature mismatch\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+ opregion->header = base;
+ opregion->lid_state = base + ACPI_CLID;
+
+ drm_dbg(&dev_priv->drm, "ACPI OpRegion version %u.%u.%u\n",
+ opregion->header->over.major,
+ opregion->header->over.minor,
+ opregion->header->over.revision);
+
+ mboxes = opregion->header->mboxes;
+ if (mboxes & MBOX_ACPI) {
+ drm_dbg(&dev_priv->drm, "Public ACPI methods supported\n");
+ opregion->acpi = base + OPREGION_ACPI_OFFSET;
+ /*
+ * Indicate we handle monitor hotplug events ourselves so we do
+ * not need ACPI notifications for them. Disabling these avoids
+ * triggering the AML code doing the notifation, which may be
+ * broken as Windows also seems to disable these.
+ */
+ opregion->acpi->chpd = 1;
+ }
+
+ if (mboxes & MBOX_SWSCI) {
+ u8 major = opregion->header->over.major;
+
+ if (major >= 3) {
+ drm_err(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v3.x, ignoring\n");
+ } else {
+ if (major >= 2)
+ drm_dbg(&dev_priv->drm, "SWSCI Mailbox #2 present for opregion v2.x\n");
+ drm_dbg(&dev_priv->drm, "SWSCI supported\n");
+ opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+ swsci_setup(dev_priv);
+ }
+ }
+
+ if (mboxes & MBOX_ASLE) {
+ drm_dbg(&dev_priv->drm, "ASLE supported\n");
+ opregion->asle = base + OPREGION_ASLE_OFFSET;
+
+ opregion->asle->ardy = ASLE_ARDY_NOT_READY;
+ }
+
+ if (mboxes & MBOX_ASLE_EXT) {
+ drm_dbg(&dev_priv->drm, "ASLE extension supported\n");
+ opregion->asle_ext = base + OPREGION_ASLE_EXT_OFFSET;
+ }
+
+ if (mboxes & MBOX_BACKLIGHT) {
+ drm_dbg(&dev_priv->drm, "Mailbox #2 for backlight present\n");
+ }
+
+ if (intel_load_vbt_firmware(dev_priv) == 0)
+ goto out;
+
+ if (dmi_check_system(intel_no_opregion_vbt))
+ goto out;
+
+ if (opregion->header->over.major >= 2 && opregion->asle &&
+ opregion->asle->rvda && opregion->asle->rvds) {
+ resource_size_t rvda = opregion->asle->rvda;
+
+ /*
+ * opregion 2.0: rvda is the physical VBT address.
+ *
+ * opregion 2.1+: rvda is unsigned, relative offset from
+ * opregion base, and should never point within opregion.
+ */
+ if (opregion->header->over.major > 2 ||
+ opregion->header->over.minor >= 1) {
+ drm_WARN_ON(&dev_priv->drm, rvda < OPREGION_SIZE);
+
+ rvda += asls;
+ }
+
+ opregion->rvda = memremap(rvda, opregion->asle->rvds,
+ MEMREMAP_WB);
+
+ vbt = opregion->rvda;
+ vbt_size = opregion->asle->rvds;
+ if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Found valid VBT in ACPI OpRegion (RVDA)\n");
+ opregion->vbt = vbt;
+ opregion->vbt_size = vbt_size;
+ goto out;
+ } else {
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid VBT in ACPI OpRegion (RVDA)\n");
+ memunmap(opregion->rvda);
+ opregion->rvda = NULL;
+ }
+ }
+
+ vbt = base + OPREGION_VBT_OFFSET;
+ /*
+ * The VBT specification says that if the ASLE ext mailbox is not used
+ * its area is reserved, but on some CHT boards the VBT extends into the
+ * ASLE ext area. Allow this even though it is against the spec, so we
+ * do not end up rejecting the VBT on those boards (and end up not
+ * finding the LCD panel because of this).
+ */
+ vbt_size = (mboxes & MBOX_ASLE_EXT) ?
+ OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
+ vbt_size -= OPREGION_VBT_OFFSET;
+ if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
+ opregion->vbt = vbt;
+ opregion->vbt_size = vbt_size;
+ } else {
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
+ }
+
+out:
+ return 0;
+
+err_out:
+ memunmap(base);
+ return err;
+}
+
+static int intel_use_opregion_panel_type_callback(const struct dmi_system_id *id)
+{
+ DRM_INFO("Using panel type from OpRegion on %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_use_opregion_panel_type[] = {
+ {
+ .callback = intel_use_opregion_panel_type_callback,
+ .ident = "Conrac GmbH IX45GM2",
+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "Conrac GmbH"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "IX45GM2"),
+ },
+ },
+ { }
+};
+
+int
+intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
+{
+ u32 panel_details;
+ int ret;
+
+ ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
+ if (ret)
+ return ret;
+
+ ret = (panel_details >> 8) & 0xff;
+ if (ret > 0x10) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid OpRegion panel type 0x%x\n", ret);
+ return -EINVAL;
+ }
+
+ /* fall back to VBT panel type? */
+ if (ret == 0x0) {
+ drm_dbg_kms(&dev_priv->drm, "No panel type in OpRegion\n");
+ return -ENODEV;
+ }
+
+ /*
+ * So far we know that some machined must use it, others must not use it.
+ * There doesn't seem to be any way to determine which way to go, except
+ * via a quirk list :(
+ */
+ if (!dmi_check_system(intel_use_opregion_panel_type)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Ignoring OpRegion panel type (%d)\n", ret - 1);
+ return -ENODEV;
+ }
+
+ return ret - 1;
+}
+
+/**
+ * intel_opregion_get_edid - Fetch EDID from ACPI OpRegion mailbox #5
+ * @intel_connector: eDP connector
+ *
+ * This reads the ACPI Opregion mailbox #5 to extract the EDID that is passed
+ * to it.
+ *
+ * Returns:
+ * The EDID in the OpRegion, or NULL if there is none or it's invalid.
+ *
+ */
+struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
+{
+ struct drm_connector *connector = &intel_connector->base;
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_opregion *opregion = &i915->display.opregion;
+ const void *in_edid;
+ const struct edid *edid;
+ struct edid *new_edid;
+ int len;
+
+ if (!opregion->asle_ext)
+ return NULL;
+
+ in_edid = opregion->asle_ext->bddc;
+
+ /* Validity corresponds to number of 128-byte blocks */
+ len = (opregion->asle_ext->phed & ASLE_PHED_EDID_VALID_MASK) * 128;
+ if (!len || !memchr_inv(in_edid, 0, len))
+ return NULL;
+
+ edid = in_edid;
+
+ if (len < EDID_LENGTH * (1 + edid->extensions)) {
+ drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5): too short\n");
+ return NULL;
+ }
+ new_edid = drm_edid_duplicate(edid);
+ if (!new_edid)
+ return NULL;
+ if (!drm_edid_is_valid(new_edid)) {
+ kfree(new_edid);
+ drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5)\n");
+ return NULL;
+ }
+ return new_edid;
+}
+
+bool intel_opregion_headless_sku(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->display.opregion;
+ struct opregion_header *header = opregion->header;
+
+ if (!header || header->over.major < 2 ||
+ (header->over.major == 2 && header->over.minor < 3))
+ return false;
+
+ return opregion->header->pcon & PCON_HEADLESS_SKU;
+}
+
+void intel_opregion_register(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->display.opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ opregion->acpi_notifier.notifier_call =
+ intel_opregion_video_event;
+ register_acpi_notifier(&opregion->acpi_notifier);
+ }
+
+ intel_opregion_resume(i915);
+}
+
+void intel_opregion_resume(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->display.opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ intel_didl_outputs(i915);
+ intel_setup_cadls(i915);
+
+ /*
+ * Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video
+ * module. We don't actually need to do anything with them.
+ */
+ opregion->acpi->csts = 0;
+ opregion->acpi->drdy = 1;
+ }
+
+ if (opregion->asle) {
+ opregion->asle->tche = ASLE_TCHE_BLC_EN;
+ opregion->asle->ardy = ASLE_ARDY_READY;
+ }
+
+ /* Some platforms abuse the _DSM to enable MUX */
+ intel_dsm_get_bios_data_funcs_supported(i915);
+
+ intel_opregion_notify_adapter(i915, PCI_D0);
+}
+
+void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
+{
+ struct intel_opregion *opregion = &i915->display.opregion;
+
+ if (!opregion->header)
+ return;
+
+ intel_opregion_notify_adapter(i915, state);
+
+ if (opregion->asle)
+ opregion->asle->ardy = ASLE_ARDY_NOT_READY;
+
+ cancel_work_sync(&i915->display.opregion.asle_work);
+
+ if (opregion->acpi)
+ opregion->acpi->drdy = 0;
+}
+
+void intel_opregion_unregister(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->display.opregion;
+
+ intel_opregion_suspend(i915, PCI_D1);
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi_notifier.notifier_call) {
+ unregister_acpi_notifier(&opregion->acpi_notifier);
+ opregion->acpi_notifier.notifier_call = NULL;
+ }
+
+ /* just clear all opregion memory pointers now */
+ memunmap(opregion->header);
+ if (opregion->rvda) {
+ memunmap(opregion->rvda);
+ opregion->rvda = NULL;
+ }
+ if (opregion->vbt_firmware) {
+ kfree(opregion->vbt_firmware);
+ opregion->vbt_firmware = NULL;
+ }
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->asle_ext = NULL;
+ opregion->vbt = NULL;
+ opregion->lid_state = NULL;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
new file mode 100644
index 000000000..2f261f985
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright © 2008-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_OPREGION_H_
+#define _INTEL_OPREGION_H_
+
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+
+struct drm_i915_private;
+struct intel_connector;
+struct intel_encoder;
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+struct opregion_asle_ext;
+
+struct intel_opregion {
+ struct opregion_header *header;
+ struct opregion_acpi *acpi;
+ struct opregion_swsci *swsci;
+ u32 swsci_gbda_sub_functions;
+ u32 swsci_sbcb_sub_functions;
+ struct opregion_asle *asle;
+ struct opregion_asle_ext *asle_ext;
+ void *rvda;
+ void *vbt_firmware;
+ const void *vbt;
+ u32 vbt_size;
+ u32 *lid_state;
+ struct work_struct asle_work;
+ struct notifier_block acpi_notifier;
+};
+
+#define OPREGION_SIZE (8 * 1024)
+
+#ifdef CONFIG_ACPI
+
+int intel_opregion_setup(struct drm_i915_private *dev_priv);
+
+void intel_opregion_register(struct drm_i915_private *dev_priv);
+void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+
+void intel_opregion_resume(struct drm_i915_private *dev_priv);
+void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+ pci_power_t state);
+
+void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
+int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+ bool enable);
+int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+ pci_power_t state);
+int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
+struct edid *intel_opregion_get_edid(struct intel_connector *connector);
+
+bool intel_opregion_headless_sku(struct drm_i915_private *i915);
+
+#else /* CONFIG_ACPI*/
+
+static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
+{
+ return 0;
+}
+
+static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_resume(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+ pci_power_t state)
+{
+}
+
+static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline int
+intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+{
+ return 0;
+}
+
+static inline int
+intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
+{
+ return 0;
+}
+
+static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
+{
+ return -ENODEV;
+}
+
+static inline struct edid *
+intel_opregion_get_edid(struct intel_connector *connector)
+{
+ return NULL;
+}
+
+static inline bool intel_opregion_headless_sku(struct drm_i915_private *i915)
+{
+ return false;
+}
+
+#endif /* CONFIG_ACPI */
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
new file mode 100644
index 000000000..c12bdca8d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -0,0 +1,1533 @@
+/*
+ * Copyright © 2009
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel@ffwll.ch>
+ *
+ * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
+ */
+
+#include <drm/drm_fourcc.h>
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_ring.h"
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_frontbuffer.h"
+#include "intel_overlay.h"
+#include "intel_pci_config.h"
+
+/* Limits for overlay size. According to intel doc, the real limits are:
+ * Y width: 4095, UV width (planar): 2047, Y height: 2047,
+ * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
+ * the mininum of both. */
+#define IMAGE_MAX_WIDTH 2048
+#define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */
+/* on 830 and 845 these large limits result in the card hanging */
+#define IMAGE_MAX_WIDTH_LEGACY 1024
+#define IMAGE_MAX_HEIGHT_LEGACY 1088
+
+/* overlay register definitions */
+/* OCMD register */
+#define OCMD_TILED_SURFACE (0x1<<19)
+#define OCMD_MIRROR_MASK (0x3<<17)
+#define OCMD_MIRROR_MODE (0x3<<17)
+#define OCMD_MIRROR_HORIZONTAL (0x1<<17)
+#define OCMD_MIRROR_VERTICAL (0x2<<17)
+#define OCMD_MIRROR_BOTH (0x3<<17)
+#define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */
+#define OCMD_UV_SWAP (0x1<<14) /* YVYU */
+#define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */
+#define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */
+#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
+#define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_422_PACKED (0x8<<10)
+#define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_420_PLANAR (0xc<<10)
+#define OCMD_YUV_422_PLANAR (0xd<<10)
+#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
+#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
+#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
+#define OCMD_BUF_TYPE_MASK (0x1<<5)
+#define OCMD_BUF_TYPE_FRAME (0x0<<5)
+#define OCMD_BUF_TYPE_FIELD (0x1<<5)
+#define OCMD_TEST_MODE (0x1<<4)
+#define OCMD_BUFFER_SELECT (0x3<<2)
+#define OCMD_BUFFER0 (0x0<<2)
+#define OCMD_BUFFER1 (0x1<<2)
+#define OCMD_FIELD_SELECT (0x1<<2)
+#define OCMD_FIELD0 (0x0<<1)
+#define OCMD_FIELD1 (0x1<<1)
+#define OCMD_ENABLE (0x1<<0)
+
+/* OCONFIG register */
+#define OCONF_PIPE_MASK (0x1<<18)
+#define OCONF_PIPE_A (0x0<<18)
+#define OCONF_PIPE_B (0x1<<18)
+#define OCONF_GAMMA2_ENABLE (0x1<<16)
+#define OCONF_CSC_MODE_BT601 (0x0<<5)
+#define OCONF_CSC_MODE_BT709 (0x1<<5)
+#define OCONF_CSC_BYPASS (0x1<<4)
+#define OCONF_CC_OUT_8BIT (0x1<<3)
+#define OCONF_TEST_MODE (0x1<<2)
+#define OCONF_THREE_LINE_BUFFER (0x1<<0)
+#define OCONF_TWO_LINE_BUFFER (0x0<<0)
+
+/* DCLRKM (dst-key) register */
+#define DST_KEY_ENABLE (0x1<<31)
+#define CLK_RGB24_MASK 0x0
+#define CLK_RGB16_MASK 0x070307
+#define CLK_RGB15_MASK 0x070707
+
+#define RGB30_TO_COLORKEY(c) \
+ ((((c) & 0x3fc00000) >> 6) | (((c) & 0x000ff000) >> 4) | (((c) & 0x000003fc) >> 2))
+#define RGB16_TO_COLORKEY(c) \
+ ((((c) & 0xf800) << 8) | (((c) & 0x07e0) << 5) | (((c) & 0x001f) << 3))
+#define RGB15_TO_COLORKEY(c) \
+ ((((c) & 0x7c00) << 9) | (((c) & 0x03e0) << 6) | (((c) & 0x001f) << 3))
+#define RGB8I_TO_COLORKEY(c) \
+ ((((c) & 0xff) << 16) | (((c) & 0xff) << 8) | (((c) & 0xff) << 0))
+
+/* overlay flip addr flag */
+#define OFC_UPDATE 0x1
+
+/* polyphase filter coefficients */
+#define N_HORIZ_Y_TAPS 5
+#define N_VERT_Y_TAPS 3
+#define N_HORIZ_UV_TAPS 3
+#define N_VERT_UV_TAPS 3
+#define N_PHASES 17
+#define MAX_TAPS 5
+
+/* memory bufferd overlay registers */
+struct overlay_registers {
+ u32 OBUF_0Y;
+ u32 OBUF_1Y;
+ u32 OBUF_0U;
+ u32 OBUF_0V;
+ u32 OBUF_1U;
+ u32 OBUF_1V;
+ u32 OSTRIDE;
+ u32 YRGB_VPH;
+ u32 UV_VPH;
+ u32 HORZ_PH;
+ u32 INIT_PHS;
+ u32 DWINPOS;
+ u32 DWINSZ;
+ u32 SWIDTH;
+ u32 SWIDTHSW;
+ u32 SHEIGHT;
+ u32 YRGBSCALE;
+ u32 UVSCALE;
+ u32 OCLRC0;
+ u32 OCLRC1;
+ u32 DCLRKV;
+ u32 DCLRKM;
+ u32 SCLRKVH;
+ u32 SCLRKVL;
+ u32 SCLRKEN;
+ u32 OCONFIG;
+ u32 OCMD;
+ u32 RESERVED1; /* 0x6C */
+ u32 OSTART_0Y;
+ u32 OSTART_1Y;
+ u32 OSTART_0U;
+ u32 OSTART_0V;
+ u32 OSTART_1U;
+ u32 OSTART_1V;
+ u32 OTILEOFF_0Y;
+ u32 OTILEOFF_1Y;
+ u32 OTILEOFF_0U;
+ u32 OTILEOFF_0V;
+ u32 OTILEOFF_1U;
+ u32 OTILEOFF_1V;
+ u32 FASTHSCALE; /* 0xA0 */
+ u32 UVSCALEV; /* 0xA4 */
+ u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+ u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+ u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+ u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+ u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+ u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+ u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+ u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+ u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+};
+
+struct intel_overlay {
+ struct drm_i915_private *i915;
+ struct intel_context *context;
+ struct intel_crtc *crtc;
+ struct i915_vma *vma;
+ struct i915_vma *old_vma;
+ struct intel_frontbuffer *frontbuffer;
+ bool active;
+ bool pfit_active;
+ u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+ u32 color_key:24;
+ u32 color_key_enabled:1;
+ u32 brightness, contrast, saturation;
+ u32 old_xscale, old_yscale;
+ /* register access */
+ struct drm_i915_gem_object *reg_bo;
+ struct overlay_registers __iomem *regs;
+ u32 flip_addr;
+ /* flip handling */
+ struct i915_active last_flip;
+ void (*flip_complete)(struct intel_overlay *ovl);
+};
+
+static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ u8 val;
+
+ /* WA_OVERLAY_CLKGATE:alm */
+ if (enable)
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), 0);
+ else
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv),
+ OVRUNIT_CLOCK_GATE_DISABLE);
+
+ /* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
+ pci_bus_read_config_byte(pdev->bus,
+ PCI_DEVFN(0, 0), I830_CLOCK_GATE, &val);
+ if (enable)
+ val &= ~I830_L2_CACHE_CLOCK_GATE_DISABLE;
+ else
+ val |= I830_L2_CACHE_CLOCK_GATE_DISABLE;
+ pci_bus_write_config_byte(pdev->bus,
+ PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
+}
+
+static struct i915_request *
+alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
+{
+ struct i915_request *rq;
+ int err;
+
+ overlay->flip_complete = fn;
+
+ rq = i915_request_create(overlay->context);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = i915_active_add_request(&overlay->last_flip, rq);
+ if (err) {
+ i915_request_add(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+/* overlay needs to be disable in OCMD reg */
+static int intel_overlay_on(struct intel_overlay *overlay)
+{
+ struct drm_i915_private *dev_priv = overlay->i915;
+ struct i915_request *rq;
+ u32 *cs;
+
+ drm_WARN_ON(&dev_priv->drm, overlay->active);
+
+ rq = alloc_request(overlay, NULL);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ overlay->active = true;
+
+ if (IS_I830(dev_priv))
+ i830_overlay_clock_gating(dev_priv, false);
+
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
+ *cs++ = overlay->flip_addr | OFC_UPDATE;
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ i915_request_add(rq);
+
+ return i915_active_wait(&overlay->last_flip);
+}
+
+static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
+ struct i915_vma *vma)
+{
+ enum pipe pipe = overlay->crtc->pipe;
+ struct intel_frontbuffer *frontbuffer = NULL;
+
+ drm_WARN_ON(&overlay->i915->drm, overlay->old_vma);
+
+ if (vma)
+ frontbuffer = intel_frontbuffer_get(vma->obj);
+
+ intel_frontbuffer_track(overlay->frontbuffer, frontbuffer,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+ if (overlay->frontbuffer)
+ intel_frontbuffer_put(overlay->frontbuffer);
+ overlay->frontbuffer = frontbuffer;
+
+ intel_frontbuffer_flip_prepare(overlay->i915,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+ overlay->old_vma = overlay->vma;
+ if (vma)
+ overlay->vma = i915_vma_get(vma);
+ else
+ overlay->vma = NULL;
+}
+
+/* overlay needs to be enabled in OCMD reg */
+static int intel_overlay_continue(struct intel_overlay *overlay,
+ struct i915_vma *vma,
+ bool load_polyphase_filter)
+{
+ struct drm_i915_private *dev_priv = overlay->i915;
+ struct i915_request *rq;
+ u32 flip_addr = overlay->flip_addr;
+ u32 tmp, *cs;
+
+ drm_WARN_ON(&dev_priv->drm, !overlay->active);
+
+ if (load_polyphase_filter)
+ flip_addr |= OFC_UPDATE;
+
+ /* check for underruns */
+ tmp = intel_de_read(dev_priv, DOVSTA);
+ if (tmp & (1 << 17))
+ drm_dbg(&dev_priv->drm, "overlay underrun, DOVSTA: %x\n", tmp);
+
+ rq = alloc_request(overlay, NULL);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+ *cs++ = flip_addr;
+ intel_ring_advance(rq, cs);
+
+ intel_overlay_flip_prepare(overlay, vma);
+ i915_request_add(rq);
+
+ return 0;
+}
+
+static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
+{
+ struct i915_vma *vma;
+
+ vma = fetch_and_zero(&overlay->old_vma);
+ if (drm_WARN_ON(&overlay->i915->drm, !vma))
+ return;
+
+ intel_frontbuffer_flip_complete(overlay->i915,
+ INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+}
+
+static void
+intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
+{
+ intel_overlay_release_old_vma(overlay);
+}
+
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+ struct drm_i915_private *dev_priv = overlay->i915;
+
+ intel_overlay_release_old_vma(overlay);
+
+ overlay->crtc->overlay = NULL;
+ overlay->crtc = NULL;
+ overlay->active = false;
+
+ if (IS_I830(dev_priv))
+ i830_overlay_clock_gating(dev_priv, true);
+}
+
+static void intel_overlay_last_flip_retire(struct i915_active *active)
+{
+ struct intel_overlay *overlay =
+ container_of(active, typeof(*overlay), last_flip);
+
+ if (overlay->flip_complete)
+ overlay->flip_complete(overlay);
+}
+
+/* overlay needs to be disabled in OCMD reg */
+static int intel_overlay_off(struct intel_overlay *overlay)
+{
+ struct i915_request *rq;
+ u32 *cs, flip_addr = overlay->flip_addr;
+
+ drm_WARN_ON(&overlay->i915->drm, !overlay->active);
+
+ /* According to intel docs the overlay hw may hang (when switching
+ * off) without loading the filter coeffs. It is however unclear whether
+ * this applies to the disabling of the overlay or to the switching off
+ * of the hw. Do it in both cases */
+ flip_addr |= OFC_UPDATE;
+
+ rq = alloc_request(overlay, intel_overlay_off_tail);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ /* wait for overlay to go idle */
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+ *cs++ = flip_addr;
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+
+ /* turn overlay off */
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
+ *cs++ = flip_addr;
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+
+ intel_ring_advance(rq, cs);
+
+ intel_overlay_flip_prepare(overlay, NULL);
+ i915_request_add(rq);
+
+ return i915_active_wait(&overlay->last_flip);
+}
+
+/* recover from an interruption due to a signal
+ * We have to be careful not to repeat work forever an make forward progess. */
+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
+{
+ return i915_active_wait(&overlay->last_flip);
+}
+
+/* Wait for pending overlay flip and release old frame.
+ * Needs to be called before the overlay register are changed
+ * via intel_overlay_(un)map_regs
+ */
+static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
+{
+ struct drm_i915_private *dev_priv = overlay->i915;
+ struct i915_request *rq;
+ u32 *cs;
+
+ /*
+ * Only wait if there is actually an old frame to release to
+ * guarantee forward progress.
+ */
+ if (!overlay->old_vma)
+ return 0;
+
+ if (!(intel_de_read(dev_priv, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
+ intel_overlay_release_old_vid_tail(overlay);
+ return 0;
+ }
+
+ rq = alloc_request(overlay, intel_overlay_release_old_vid_tail);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ i915_request_add(rq);
+
+ return i915_active_wait(&overlay->last_flip);
+}
+
+void intel_overlay_reset(struct drm_i915_private *dev_priv)
+{
+ struct intel_overlay *overlay = dev_priv->display.overlay;
+
+ if (!overlay)
+ return;
+
+ overlay->old_xscale = 0;
+ overlay->old_yscale = 0;
+ overlay->crtc = NULL;
+ overlay->active = false;
+}
+
+static int packed_depth_bytes(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ return 4;
+ case I915_OVERLAY_YUV411:
+ /* return 6; not implemented */
+ default:
+ return -EINVAL;
+ }
+}
+
+static int packed_width_bytes(u32 format, short width)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ return width << 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uv_hsubsampling(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV420:
+ return 2;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ return 4;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uv_vsubsampling(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV420:
+ case I915_OVERLAY_YUV410:
+ return 2;
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV411:
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
+{
+ u32 sw;
+
+ if (DISPLAY_VER(dev_priv) == 2)
+ sw = ALIGN((offset & 31) + width, 32);
+ else
+ sw = ALIGN((offset & 63) + width, 64);
+
+ if (sw == 0)
+ return 0;
+
+ return (sw - 32) >> 3;
+}
+
+static const u16 y_static_hcoeffs[N_PHASES][N_HORIZ_Y_TAPS] = {
+ [ 0] = { 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0, },
+ [ 1] = { 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440, },
+ [ 2] = { 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0, },
+ [ 3] = { 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380, },
+ [ 4] = { 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320, },
+ [ 5] = { 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0, },
+ [ 6] = { 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260, },
+ [ 7] = { 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200, },
+ [ 8] = { 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0, },
+ [ 9] = { 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160, },
+ [10] = { 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120, },
+ [11] = { 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0, },
+ [12] = { 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0, },
+ [13] = { 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, },
+ [14] = { 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, },
+ [15] = { 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, },
+ [16] = { 0xb000, 0x3000, 0x0800, 0x3000, 0xb000, },
+};
+
+static const u16 uv_static_hcoeffs[N_PHASES][N_HORIZ_UV_TAPS] = {
+ [ 0] = { 0x3000, 0x1800, 0x1800, },
+ [ 1] = { 0xb000, 0x18d0, 0x2e60, },
+ [ 2] = { 0xb000, 0x1990, 0x2ce0, },
+ [ 3] = { 0xb020, 0x1a68, 0x2b40, },
+ [ 4] = { 0xb040, 0x1b20, 0x29e0, },
+ [ 5] = { 0xb060, 0x1bd8, 0x2880, },
+ [ 6] = { 0xb080, 0x1c88, 0x3e60, },
+ [ 7] = { 0xb0a0, 0x1d28, 0x3c00, },
+ [ 8] = { 0xb0c0, 0x1db8, 0x39e0, },
+ [ 9] = { 0xb0e0, 0x1e40, 0x37e0, },
+ [10] = { 0xb100, 0x1eb8, 0x3620, },
+ [11] = { 0xb100, 0x1f18, 0x34a0, },
+ [12] = { 0xb100, 0x1f68, 0x3360, },
+ [13] = { 0xb0e0, 0x1fa8, 0x3240, },
+ [14] = { 0xb0c0, 0x1fe0, 0x3140, },
+ [15] = { 0xb060, 0x1ff0, 0x30a0, },
+ [16] = { 0x3000, 0x0800, 0x3000, },
+};
+
+static void update_polyphase_filter(struct overlay_registers __iomem *regs)
+{
+ memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+ memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
+ sizeof(uv_static_hcoeffs));
+}
+
+static bool update_scaling_factors(struct intel_overlay *overlay,
+ struct overlay_registers __iomem *regs,
+ struct drm_intel_overlay_put_image *params)
+{
+ /* fixed point with a 12 bit shift */
+ u32 xscale, yscale, xscale_UV, yscale_UV;
+#define FP_SHIFT 12
+#define FRACT_MASK 0xfff
+ bool scale_changed = false;
+ int uv_hscale = uv_hsubsampling(params->flags);
+ int uv_vscale = uv_vsubsampling(params->flags);
+
+ if (params->dst_width > 1)
+ xscale = ((params->src_scan_width - 1) << FP_SHIFT) /
+ params->dst_width;
+ else
+ xscale = 1 << FP_SHIFT;
+
+ if (params->dst_height > 1)
+ yscale = ((params->src_scan_height - 1) << FP_SHIFT) /
+ params->dst_height;
+ else
+ yscale = 1 << FP_SHIFT;
+
+ /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
+ xscale_UV = xscale/uv_hscale;
+ yscale_UV = yscale/uv_vscale;
+ /* make the Y scale to UV scale ratio an exact multiply */
+ xscale = xscale_UV * uv_hscale;
+ yscale = yscale_UV * uv_vscale;
+ /*} else {
+ xscale_UV = 0;
+ yscale_UV = 0;
+ }*/
+
+ if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
+ scale_changed = true;
+ overlay->old_xscale = xscale;
+ overlay->old_yscale = yscale;
+
+ iowrite32(((yscale & FRACT_MASK) << 20) |
+ ((xscale >> FP_SHIFT) << 16) |
+ ((xscale & FRACT_MASK) << 3),
+ &regs->YRGBSCALE);
+
+ iowrite32(((yscale_UV & FRACT_MASK) << 20) |
+ ((xscale_UV >> FP_SHIFT) << 16) |
+ ((xscale_UV & FRACT_MASK) << 3),
+ &regs->UVSCALE);
+
+ iowrite32((((yscale >> FP_SHIFT) << 16) |
+ ((yscale_UV >> FP_SHIFT) << 0)),
+ &regs->UVSCALEV);
+
+ if (scale_changed)
+ update_polyphase_filter(regs);
+
+ return scale_changed;
+}
+
+static void update_colorkey(struct intel_overlay *overlay,
+ struct overlay_registers __iomem *regs)
+{
+ const struct intel_plane_state *state =
+ to_intel_plane_state(overlay->crtc->base.primary->state);
+ u32 key = overlay->color_key;
+ u32 format = 0;
+ u32 flags = 0;
+
+ if (overlay->color_key_enabled)
+ flags |= DST_KEY_ENABLE;
+
+ if (state->uapi.visible)
+ format = state->hw.fb->format->format;
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ key = RGB8I_TO_COLORKEY(key);
+ flags |= CLK_RGB24_MASK;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ key = RGB15_TO_COLORKEY(key);
+ flags |= CLK_RGB15_MASK;
+ break;
+ case DRM_FORMAT_RGB565:
+ key = RGB16_TO_COLORKEY(key);
+ flags |= CLK_RGB16_MASK;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ key = RGB30_TO_COLORKEY(key);
+ flags |= CLK_RGB24_MASK;
+ break;
+ default:
+ flags |= CLK_RGB24_MASK;
+ break;
+ }
+
+ iowrite32(key, &regs->DCLRKV);
+ iowrite32(flags, &regs->DCLRKM);
+}
+
+static u32 overlay_cmd_reg(struct drm_intel_overlay_put_image *params)
+{
+ u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
+
+ if (params->flags & I915_OVERLAY_YUV_PLANAR) {
+ switch (params->flags & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PLANAR;
+ break;
+ case I915_OVERLAY_YUV420:
+ cmd |= OCMD_YUV_420_PLANAR;
+ break;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ cmd |= OCMD_YUV_410_PLANAR;
+ break;
+ }
+ } else { /* YUV packed */
+ switch (params->flags & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PACKED;
+ break;
+ case I915_OVERLAY_YUV411:
+ cmd |= OCMD_YUV_411_PACKED;
+ break;
+ }
+
+ switch (params->flags & I915_OVERLAY_SWAP_MASK) {
+ case I915_OVERLAY_NO_SWAP:
+ break;
+ case I915_OVERLAY_UV_SWAP:
+ cmd |= OCMD_UV_SWAP;
+ break;
+ case I915_OVERLAY_Y_SWAP:
+ cmd |= OCMD_Y_SWAP;
+ break;
+ case I915_OVERLAY_Y_AND_UV_SWAP:
+ cmd |= OCMD_Y_AND_UV_SWAP;
+ break;
+ }
+ }
+
+ return cmd;
+}
+
+static struct i915_vma *intel_overlay_pin_fb(struct drm_i915_gem_object *new_bo)
+{
+ struct i915_gem_ww_ctx ww;
+ struct i915_vma *vma;
+ int ret;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(new_bo, &ww);
+ if (!ret) {
+ vma = i915_gem_object_pin_to_display_plane(new_bo, &ww, 0,
+ NULL, PIN_MAPPABLE);
+ ret = PTR_ERR_OR_ZERO(vma);
+ }
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return vma;
+}
+
+static int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ struct drm_i915_gem_object *new_bo,
+ struct drm_intel_overlay_put_image *params)
+{
+ struct overlay_registers __iomem *regs = overlay->regs;
+ struct drm_i915_private *dev_priv = overlay->i915;
+ u32 swidth, swidthsw, sheight, ostride;
+ enum pipe pipe = overlay->crtc->pipe;
+ bool scale_changed = false;
+ struct i915_vma *vma;
+ int ret, tmp_width;
+
+ drm_WARN_ON(&dev_priv->drm,
+ !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+
+ ret = intel_overlay_release_old_vid(overlay);
+ if (ret != 0)
+ return ret;
+
+ atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+
+ vma = intel_overlay_pin_fb(new_bo);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out_pin_section;
+ }
+
+ i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
+
+ if (!overlay->active) {
+ const struct intel_crtc_state *crtc_state =
+ overlay->crtc->config;
+ u32 oconfig = 0;
+
+ if (crtc_state->gamma_enable &&
+ crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ oconfig |= OCONF_CC_OUT_8BIT;
+ if (crtc_state->gamma_enable)
+ oconfig |= OCONF_GAMMA2_ENABLE;
+ if (DISPLAY_VER(dev_priv) == 4)
+ oconfig |= OCONF_CSC_MODE_BT709;
+ oconfig |= pipe == 0 ?
+ OCONF_PIPE_A : OCONF_PIPE_B;
+ iowrite32(oconfig, &regs->OCONFIG);
+
+ ret = intel_overlay_on(overlay);
+ if (ret != 0)
+ goto out_unpin;
+ }
+
+ iowrite32(params->dst_y << 16 | params->dst_x, &regs->DWINPOS);
+ iowrite32(params->dst_height << 16 | params->dst_width, &regs->DWINSZ);
+
+ if (params->flags & I915_OVERLAY_YUV_PACKED)
+ tmp_width = packed_width_bytes(params->flags,
+ params->src_width);
+ else
+ tmp_width = params->src_width;
+
+ swidth = params->src_width;
+ swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
+ sheight = params->src_height;
+ iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
+ ostride = params->stride_Y;
+
+ if (params->flags & I915_OVERLAY_YUV_PLANAR) {
+ int uv_hscale = uv_hsubsampling(params->flags);
+ int uv_vscale = uv_vsubsampling(params->flags);
+ u32 tmp_U, tmp_V;
+
+ swidth |= (params->src_width / uv_hscale) << 16;
+ sheight |= (params->src_height / uv_vscale) << 16;
+
+ tmp_U = calc_swidthsw(dev_priv, params->offset_U,
+ params->src_width / uv_hscale);
+ tmp_V = calc_swidthsw(dev_priv, params->offset_V,
+ params->src_width / uv_hscale);
+ swidthsw |= max(tmp_U, tmp_V) << 16;
+
+ iowrite32(i915_ggtt_offset(vma) + params->offset_U,
+ &regs->OBUF_0U);
+ iowrite32(i915_ggtt_offset(vma) + params->offset_V,
+ &regs->OBUF_0V);
+
+ ostride |= params->stride_UV << 16;
+ }
+
+ iowrite32(swidth, &regs->SWIDTH);
+ iowrite32(swidthsw, &regs->SWIDTHSW);
+ iowrite32(sheight, &regs->SHEIGHT);
+ iowrite32(ostride, &regs->OSTRIDE);
+
+ scale_changed = update_scaling_factors(overlay, regs, params);
+
+ update_colorkey(overlay, regs);
+
+ iowrite32(overlay_cmd_reg(params), &regs->OCMD);
+
+ ret = intel_overlay_continue(overlay, vma, scale_changed);
+ if (ret)
+ goto out_unpin;
+
+ return 0;
+
+out_unpin:
+ i915_vma_unpin(vma);
+out_pin_section:
+ atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+
+ return ret;
+}
+
+int intel_overlay_switch_off(struct intel_overlay *overlay)
+{
+ struct drm_i915_private *dev_priv = overlay->i915;
+ int ret;
+
+ drm_WARN_ON(&dev_priv->drm,
+ !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+
+ ret = intel_overlay_recover_from_interrupt(overlay);
+ if (ret != 0)
+ return ret;
+
+ if (!overlay->active)
+ return 0;
+
+ ret = intel_overlay_release_old_vid(overlay);
+ if (ret != 0)
+ return ret;
+
+ iowrite32(0, &overlay->regs->OCMD);
+
+ return intel_overlay_off(overlay);
+}
+
+static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+ struct intel_crtc *crtc)
+{
+ if (!crtc->active)
+ return -EINVAL;
+
+ /* can't use the overlay with double wide pipe */
+ if (crtc->config->double_wide)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
+{
+ struct drm_i915_private *dev_priv = overlay->i915;
+ u32 pfit_control = intel_de_read(dev_priv, PFIT_CONTROL);
+ u32 ratio;
+
+ /* XXX: This is not the same logic as in the xorg driver, but more in
+ * line with the intel documentation for the i965
+ */
+ if (DISPLAY_VER(dev_priv) >= 4) {
+ /* on i965 use the PGM reg to read out the autoscaler values */
+ ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+ } else {
+ if (pfit_control & VERT_AUTO_SCALE)
+ ratio = intel_de_read(dev_priv, PFIT_AUTO_RATIOS);
+ else
+ ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS);
+ ratio >>= PFIT_VERT_SCALE_SHIFT;
+ }
+
+ overlay->pfit_vscale_ratio = ratio;
+}
+
+static int check_overlay_dst(struct intel_overlay *overlay,
+ struct drm_intel_overlay_put_image *rec)
+{
+ const struct intel_crtc_state *crtc_state =
+ overlay->crtc->config;
+ struct drm_rect req, clipped;
+
+ drm_rect_init(&req, rec->dst_x, rec->dst_y,
+ rec->dst_width, rec->dst_height);
+
+ clipped = req;
+ drm_rect_intersect(&clipped, &crtc_state->pipe_src);
+
+ if (!drm_rect_visible(&clipped) ||
+ !drm_rect_equals(&clipped, &req))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int check_overlay_scaling(struct drm_intel_overlay_put_image *rec)
+{
+ u32 tmp;
+
+ /* downscaling limit is 8.0 */
+ tmp = ((rec->src_scan_height << 16) / rec->dst_height) >> 16;
+ if (tmp > 7)
+ return -EINVAL;
+
+ tmp = ((rec->src_scan_width << 16) / rec->dst_width) >> 16;
+ if (tmp > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int check_overlay_src(struct drm_i915_private *dev_priv,
+ struct drm_intel_overlay_put_image *rec,
+ struct drm_i915_gem_object *new_bo)
+{
+ int uv_hscale = uv_hsubsampling(rec->flags);
+ int uv_vscale = uv_vsubsampling(rec->flags);
+ u32 stride_mask;
+ int depth;
+ u32 tmp;
+
+ /* check src dimensions */
+ if (IS_I845G(dev_priv) || IS_I830(dev_priv)) {
+ if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
+ rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+ return -EINVAL;
+ } else {
+ if (rec->src_height > IMAGE_MAX_HEIGHT ||
+ rec->src_width > IMAGE_MAX_WIDTH)
+ return -EINVAL;
+ }
+
+ /* better safe than sorry, use 4 as the maximal subsampling ratio */
+ if (rec->src_height < N_VERT_Y_TAPS*4 ||
+ rec->src_width < N_HORIZ_Y_TAPS*4)
+ return -EINVAL;
+
+ /* check alignment constraints */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+ case I915_OVERLAY_RGB:
+ /* not implemented */
+ return -EINVAL;
+
+ case I915_OVERLAY_YUV_PACKED:
+ if (uv_vscale != 1)
+ return -EINVAL;
+
+ depth = packed_depth_bytes(rec->flags);
+ if (depth < 0)
+ return depth;
+
+ /* ignore UV planes */
+ rec->stride_UV = 0;
+ rec->offset_U = 0;
+ rec->offset_V = 0;
+ /* check pixel alignment */
+ if (rec->offset_Y % depth)
+ return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (uv_vscale < 0 || uv_hscale < 0)
+ return -EINVAL;
+ /* no offset restrictions for planar formats */
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (rec->src_width % uv_hscale)
+ return -EINVAL;
+
+ /* stride checking */
+ if (IS_I830(dev_priv) || IS_I845G(dev_priv))
+ stride_mask = 255;
+ else
+ stride_mask = 63;
+
+ if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
+ return -EINVAL;
+ if (DISPLAY_VER(dev_priv) == 4 && rec->stride_Y < 512)
+ return -EINVAL;
+
+ tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
+ 4096 : 8192;
+ if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
+ return -EINVAL;
+
+ /* check buffer dimensions */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+ case I915_OVERLAY_RGB:
+ case I915_OVERLAY_YUV_PACKED:
+ /* always 4 Y values per depth pixels */
+ if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
+ return -EINVAL;
+
+ tmp = rec->stride_Y*rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->base.size)
+ return -EINVAL;
+ break;
+
+ case I915_OVERLAY_YUV_PLANAR:
+ if (rec->src_width > rec->stride_Y)
+ return -EINVAL;
+ if (rec->src_width/uv_hscale > rec->stride_UV)
+ return -EINVAL;
+
+ tmp = rec->stride_Y * rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->base.size)
+ return -EINVAL;
+
+ tmp = rec->stride_UV * (rec->src_height / uv_vscale);
+ if (rec->offset_U + tmp > new_bo->base.size ||
+ rec->offset_V + tmp > new_bo->base.size)
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_overlay_put_image *params = data;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_overlay *overlay;
+ struct drm_crtc *drmmode_crtc;
+ struct intel_crtc *crtc;
+ struct drm_i915_gem_object *new_bo;
+ int ret;
+
+ overlay = dev_priv->display.overlay;
+ if (!overlay) {
+ drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
+ return -ENODEV;
+ }
+
+ if (!(params->flags & I915_OVERLAY_ENABLE)) {
+ drm_modeset_lock_all(dev);
+ ret = intel_overlay_switch_off(overlay);
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+ }
+
+ drmmode_crtc = drm_crtc_find(dev, file_priv, params->crtc_id);
+ if (!drmmode_crtc)
+ return -ENOENT;
+ crtc = to_intel_crtc(drmmode_crtc);
+
+ new_bo = i915_gem_object_lookup(file_priv, params->bo_handle);
+ if (!new_bo)
+ return -ENOENT;
+
+ drm_modeset_lock_all(dev);
+
+ if (i915_gem_object_is_tiled(new_bo)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "buffer used for overlay image can not be tiled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = intel_overlay_recover_from_interrupt(overlay);
+ if (ret != 0)
+ goto out_unlock;
+
+ if (overlay->crtc != crtc) {
+ ret = intel_overlay_switch_off(overlay);
+ if (ret != 0)
+ goto out_unlock;
+
+ ret = check_overlay_possible_on_crtc(overlay, crtc);
+ if (ret != 0)
+ goto out_unlock;
+
+ overlay->crtc = crtc;
+ crtc->overlay = overlay;
+
+ /* line too wide, i.e. one-line-mode */
+ if (drm_rect_width(&crtc->config->pipe_src) > 1024 &&
+ crtc->config->gmch_pfit.control & PFIT_ENABLE) {
+ overlay->pfit_active = true;
+ update_pfit_vscale_ratio(overlay);
+ } else
+ overlay->pfit_active = false;
+ }
+
+ ret = check_overlay_dst(overlay, params);
+ if (ret != 0)
+ goto out_unlock;
+
+ if (overlay->pfit_active) {
+ params->dst_y = (((u32)params->dst_y << 12) /
+ overlay->pfit_vscale_ratio);
+ /* shifting right rounds downwards, so add 1 */
+ params->dst_height = (((u32)params->dst_height << 12) /
+ overlay->pfit_vscale_ratio) + 1;
+ }
+
+ if (params->src_scan_height > params->src_height ||
+ params->src_scan_width > params->src_width) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = check_overlay_src(dev_priv, params, new_bo);
+ if (ret != 0)
+ goto out_unlock;
+
+ /* Check scaling after src size to prevent a divide-by-zero. */
+ ret = check_overlay_scaling(params);
+ if (ret != 0)
+ goto out_unlock;
+
+ ret = intel_overlay_do_put_image(overlay, new_bo, params);
+ if (ret != 0)
+ goto out_unlock;
+
+ drm_modeset_unlock_all(dev);
+ i915_gem_object_put(new_bo);
+
+ return 0;
+
+out_unlock:
+ drm_modeset_unlock_all(dev);
+ i915_gem_object_put(new_bo);
+
+ return ret;
+}
+
+static void update_reg_attrs(struct intel_overlay *overlay,
+ struct overlay_registers __iomem *regs)
+{
+ iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
+ &regs->OCLRC0);
+ iowrite32(overlay->saturation, &regs->OCLRC1);
+}
+
+static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
+{
+ int i;
+
+ if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
+ return false;
+
+ for (i = 0; i < 3; i++) {
+ if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_gamma5_errata(u32 gamma5)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (((gamma5 >> i*8) & 0xff) == 0x80)
+ return false;
+ }
+
+ return true;
+}
+
+static int check_gamma(struct drm_intel_overlay_attrs *attrs)
+{
+ if (!check_gamma_bounds(0, attrs->gamma0) ||
+ !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
+ !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
+ !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
+ !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
+ !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
+ !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+ return -EINVAL;
+
+ if (!check_gamma5_errata(attrs->gamma5))
+ return -EINVAL;
+
+ return 0;
+}
+
+int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_overlay_attrs *attrs = data;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_overlay *overlay;
+ int ret;
+
+ overlay = dev_priv->display.overlay;
+ if (!overlay) {
+ drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
+ return -ENODEV;
+ }
+
+ drm_modeset_lock_all(dev);
+
+ ret = -EINVAL;
+ if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
+ attrs->color_key = overlay->color_key;
+ attrs->brightness = overlay->brightness;
+ attrs->contrast = overlay->contrast;
+ attrs->saturation = overlay->saturation;
+
+ if (DISPLAY_VER(dev_priv) != 2) {
+ attrs->gamma0 = intel_de_read(dev_priv, OGAMC0);
+ attrs->gamma1 = intel_de_read(dev_priv, OGAMC1);
+ attrs->gamma2 = intel_de_read(dev_priv, OGAMC2);
+ attrs->gamma3 = intel_de_read(dev_priv, OGAMC3);
+ attrs->gamma4 = intel_de_read(dev_priv, OGAMC4);
+ attrs->gamma5 = intel_de_read(dev_priv, OGAMC5);
+ }
+ } else {
+ if (attrs->brightness < -128 || attrs->brightness > 127)
+ goto out_unlock;
+ if (attrs->contrast > 255)
+ goto out_unlock;
+ if (attrs->saturation > 1023)
+ goto out_unlock;
+
+ overlay->color_key = attrs->color_key;
+ overlay->brightness = attrs->brightness;
+ overlay->contrast = attrs->contrast;
+ overlay->saturation = attrs->saturation;
+
+ update_reg_attrs(overlay, overlay->regs);
+
+ if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
+ if (DISPLAY_VER(dev_priv) == 2)
+ goto out_unlock;
+
+ if (overlay->active) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = check_gamma(attrs);
+ if (ret)
+ goto out_unlock;
+
+ intel_de_write(dev_priv, OGAMC0, attrs->gamma0);
+ intel_de_write(dev_priv, OGAMC1, attrs->gamma1);
+ intel_de_write(dev_priv, OGAMC2, attrs->gamma2);
+ intel_de_write(dev_priv, OGAMC3, attrs->gamma3);
+ intel_de_write(dev_priv, OGAMC4, attrs->gamma4);
+ intel_de_write(dev_priv, OGAMC5, attrs->gamma5);
+ }
+ }
+ overlay->color_key_enabled = (attrs->flags & I915_OVERLAY_DISABLE_DEST_COLORKEY) == 0;
+
+ ret = 0;
+out_unlock:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+static int get_registers(struct intel_overlay *overlay, bool use_phys)
+{
+ struct drm_i915_private *i915 = overlay->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_put_bo;
+ }
+
+ if (use_phys)
+ overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
+ else
+ overlay->flip_addr = i915_ggtt_offset(vma);
+ overlay->regs = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+
+ if (IS_ERR(overlay->regs)) {
+ err = PTR_ERR(overlay->regs);
+ goto err_put_bo;
+ }
+
+ overlay->reg_bo = obj;
+ return 0;
+
+err_put_bo:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+void intel_overlay_setup(struct drm_i915_private *dev_priv)
+{
+ struct intel_overlay *overlay;
+ struct intel_engine_cs *engine;
+ int ret;
+
+ if (!HAS_OVERLAY(dev_priv))
+ return;
+
+ engine = to_gt(dev_priv)->engine[RCS0];
+ if (!engine || !engine->kernel_context)
+ return;
+
+ overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+ if (!overlay)
+ return;
+
+ overlay->i915 = dev_priv;
+ overlay->context = engine->kernel_context;
+ overlay->color_key = 0x0101fe;
+ overlay->color_key_enabled = true;
+ overlay->brightness = -19;
+ overlay->contrast = 75;
+ overlay->saturation = 146;
+
+ i915_active_init(&overlay->last_flip,
+ NULL, intel_overlay_last_flip_retire, 0);
+
+ ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
+ if (ret)
+ goto out_free;
+
+ memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
+ update_polyphase_filter(overlay->regs);
+ update_reg_attrs(overlay, overlay->regs);
+
+ dev_priv->display.overlay = overlay;
+ drm_info(&dev_priv->drm, "Initialized overlay support.\n");
+ return;
+
+out_free:
+ kfree(overlay);
+}
+
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
+{
+ struct intel_overlay *overlay;
+
+ overlay = fetch_and_zero(&dev_priv->display.overlay);
+ if (!overlay)
+ return;
+
+ /*
+ * The bo's should be free'd by the generic code already.
+ * Furthermore modesetting teardown happens beforehand so the
+ * hardware should be off already.
+ */
+ drm_WARN_ON(&dev_priv->drm, overlay->active);
+
+ i915_gem_object_put(overlay->reg_bo);
+ i915_active_fini(&overlay->last_flip);
+
+ kfree(overlay);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
+struct intel_overlay_error_state {
+ struct overlay_registers regs;
+ unsigned long base;
+ u32 dovsta;
+ u32 isr;
+};
+
+struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
+{
+ struct intel_overlay *overlay = dev_priv->display.overlay;
+ struct intel_overlay_error_state *error;
+
+ if (!overlay || !overlay->active)
+ return NULL;
+
+ error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ if (error == NULL)
+ return NULL;
+
+ error->dovsta = intel_de_read(dev_priv, DOVSTA);
+ error->isr = intel_de_read(dev_priv, GEN2_ISR);
+ error->base = overlay->flip_addr;
+
+ memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
+
+ return error;
+}
+
+void
+intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
+ struct intel_overlay_error_state *error)
+{
+ i915_error_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
+ error->dovsta, error->isr);
+ i915_error_printf(m, " Register file at 0x%08lx:\n",
+ error->base);
+
+#define P(x) i915_error_printf(m, " " #x ": 0x%08x\n", error->regs.x)
+ P(OBUF_0Y);
+ P(OBUF_1Y);
+ P(OBUF_0U);
+ P(OBUF_0V);
+ P(OBUF_1U);
+ P(OBUF_1V);
+ P(OSTRIDE);
+ P(YRGB_VPH);
+ P(UV_VPH);
+ P(HORZ_PH);
+ P(INIT_PHS);
+ P(DWINPOS);
+ P(DWINSZ);
+ P(SWIDTH);
+ P(SWIDTHSW);
+ P(SHEIGHT);
+ P(YRGBSCALE);
+ P(UVSCALE);
+ P(OCLRC0);
+ P(OCLRC1);
+ P(DCLRKV);
+ P(DCLRKM);
+ P(SCLRKVH);
+ P(SCLRKVL);
+ P(SCLRKEN);
+ P(OCONFIG);
+ P(OCMD);
+ P(OSTART_0Y);
+ P(OSTART_1Y);
+ P(OSTART_0U);
+ P(OSTART_0V);
+ P(OSTART_1U);
+ P(OSTART_1V);
+ P(OTILEOFF_0Y);
+ P(OTILEOFF_1Y);
+ P(OTILEOFF_0U);
+ P(OTILEOFF_0V);
+ P(OTILEOFF_1U);
+ P(OTILEOFF_1V);
+ P(FASTHSCALE);
+ P(UVSCALEV);
+#undef P
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.h b/drivers/gpu/drm/i915/display/intel_overlay.h
new file mode 100644
index 000000000..a167c28ac
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_overlay.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_OVERLAY_H__
+#define __INTEL_OVERLAY_H__
+
+struct drm_device;
+struct drm_file;
+struct drm_i915_error_state_buf;
+struct drm_i915_private;
+struct intel_overlay;
+struct intel_overlay_error_state;
+
+void intel_overlay_setup(struct drm_i915_private *dev_priv);
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
+int intel_overlay_switch_off(struct intel_overlay *overlay);
+int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void intel_overlay_reset(struct drm_i915_private *dev_priv);
+struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
+void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
+ struct intel_overlay_error_state *error);
+
+#endif /* __INTEL_OVERLAY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
new file mode 100644
index 000000000..b50db0dd2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -0,0 +1,686 @@
+/*
+ * Copyright © 2006-2010 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#include <linux/kernel.h>
+#include <linux/pwm.h>
+
+#include "intel_backlight.h"
+#include "intel_connector.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_drrs.h"
+#include "intel_panel.h"
+#include "intel_quirks.h"
+
+bool intel_panel_use_ssc(struct drm_i915_private *i915)
+{
+ if (i915->params.panel_use_ssc >= 0)
+ return i915->params.panel_use_ssc != 0;
+ return i915->display.vbt.lvds_use_ssc &&
+ !intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
+}
+
+const struct drm_display_mode *
+intel_panel_preferred_fixed_mode(struct intel_connector *connector)
+{
+ return list_first_entry_or_null(&connector->panel.fixed_modes,
+ struct drm_display_mode, head);
+}
+
+const struct drm_display_mode *
+intel_panel_fixed_mode(struct intel_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ const struct drm_display_mode *fixed_mode, *best_mode = NULL;
+ int vrefresh = drm_mode_vrefresh(mode);
+
+ /* pick the fixed_mode that is closest in terms of vrefresh */
+ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) {
+ if (!best_mode ||
+ abs(drm_mode_vrefresh(fixed_mode) - vrefresh) <
+ abs(drm_mode_vrefresh(best_mode) - vrefresh))
+ best_mode = fixed_mode;
+ }
+
+ return best_mode;
+}
+
+static bool is_alt_drrs_mode(const struct drm_display_mode *mode,
+ const struct drm_display_mode *preferred_mode)
+{
+ return drm_mode_match(mode, preferred_mode,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS) &&
+ mode->clock != preferred_mode->clock;
+}
+
+static bool is_alt_fixed_mode(const struct drm_display_mode *mode,
+ const struct drm_display_mode *preferred_mode)
+{
+ u32 sync_flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC;
+
+ return (mode->flags & ~sync_flags) == (preferred_mode->flags & ~sync_flags) &&
+ mode->hdisplay == preferred_mode->hdisplay &&
+ mode->vdisplay == preferred_mode->vdisplay;
+}
+
+const struct drm_display_mode *
+intel_panel_downclock_mode(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode)
+{
+ const struct drm_display_mode *fixed_mode, *best_mode = NULL;
+ int min_vrefresh = connector->panel.vbt.seamless_drrs_min_refresh_rate;
+ int max_vrefresh = drm_mode_vrefresh(adjusted_mode);
+
+ /* pick the fixed_mode with the lowest refresh rate */
+ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) {
+ int vrefresh = drm_mode_vrefresh(fixed_mode);
+
+ if (is_alt_drrs_mode(fixed_mode, adjusted_mode) &&
+ vrefresh >= min_vrefresh && vrefresh < max_vrefresh) {
+ max_vrefresh = vrefresh;
+ best_mode = fixed_mode;
+ }
+ }
+
+ return best_mode;
+}
+
+const struct drm_display_mode *
+intel_panel_highest_mode(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode)
+{
+ const struct drm_display_mode *fixed_mode, *best_mode = adjusted_mode;
+
+ /* pick the fixed_mode that has the highest clock */
+ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) {
+ if (fixed_mode->clock > best_mode->clock)
+ best_mode = fixed_mode;
+ }
+
+ return best_mode;
+}
+
+int intel_panel_get_modes(struct intel_connector *connector)
+{
+ const struct drm_display_mode *fixed_mode;
+ int num_modes = 0;
+
+ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->base.dev, fixed_mode);
+ if (mode) {
+ drm_mode_probed_add(&connector->base, mode);
+ num_modes++;
+ }
+ }
+
+ return num_modes;
+}
+
+enum drrs_type intel_panel_drrs_type(struct intel_connector *connector)
+{
+ if (list_empty(&connector->panel.fixed_modes) ||
+ list_is_singular(&connector->panel.fixed_modes))
+ return DRRS_TYPE_NONE;
+
+ return connector->panel.vbt.drrs_type;
+}
+
+int intel_panel_compute_config(struct intel_connector *connector,
+ struct drm_display_mode *adjusted_mode)
+{
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(connector, adjusted_mode);
+
+ if (!fixed_mode)
+ return 0;
+
+ /*
+ * We don't want to lie too much to the user about the refresh
+ * rate they're going to get. But we have to allow a bit of latitude
+ * for Xorg since it likes to automagically cook up modes with slightly
+ * off refresh rates.
+ */
+ if (abs(drm_mode_vrefresh(adjusted_mode) - drm_mode_vrefresh(fixed_mode)) > 1) {
+ drm_dbg_kms(connector->base.dev,
+ "[CONNECTOR:%d:%s] Requested mode vrefresh (%d Hz) does not match fixed mode vrefresh (%d Hz)\n",
+ connector->base.base.id, connector->base.name,
+ drm_mode_vrefresh(adjusted_mode), drm_mode_vrefresh(fixed_mode));
+
+ return -EINVAL;
+ }
+
+ drm_mode_copy(adjusted_mode, fixed_mode);
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ return 0;
+}
+
+static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ const struct drm_display_mode *preferred_mode =
+ intel_panel_preferred_fixed_mode(connector);
+ struct drm_display_mode *mode, *next;
+
+ list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) {
+ if (!is_alt_fixed_mode(mode, preferred_mode))
+ continue;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] using alternate EDID fixed mode: " DRM_MODE_FMT "\n",
+ connector->base.base.id, connector->base.name,
+ DRM_MODE_ARG(mode));
+
+ list_move_tail(&mode->head, &connector->panel.fixed_modes);
+ }
+}
+
+static void intel_panel_add_edid_preferred_mode(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_display_mode *scan, *fixed_mode = NULL;
+
+ if (list_empty(&connector->base.probed_modes))
+ return;
+
+ /* make sure the preferred mode is first */
+ list_for_each_entry(scan, &connector->base.probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ fixed_mode = scan;
+ break;
+ }
+ }
+
+ if (!fixed_mode)
+ fixed_mode = list_first_entry(&connector->base.probed_modes,
+ typeof(*fixed_mode), head);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] using %s EDID fixed mode: " DRM_MODE_FMT "\n",
+ connector->base.base.id, connector->base.name,
+ fixed_mode->type & DRM_MODE_TYPE_PREFERRED ? "preferred" : "first",
+ DRM_MODE_ARG(fixed_mode));
+
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ list_move_tail(&fixed_mode->head, &connector->panel.fixed_modes);
+}
+
+static void intel_panel_destroy_probed_modes(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct drm_display_mode *mode, *next;
+
+ list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) {
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] not using EDID mode: " DRM_MODE_FMT "\n",
+ connector->base.base.id, connector->base.name,
+ DRM_MODE_ARG(mode));
+ list_del(&mode->head);
+ drm_mode_destroy(&i915->drm, mode);
+ }
+}
+
+void intel_panel_add_edid_fixed_modes(struct intel_connector *connector,
+ bool use_alt_fixed_modes)
+{
+ intel_panel_add_edid_preferred_mode(connector);
+ if (intel_panel_preferred_fixed_mode(connector) && use_alt_fixed_modes)
+ intel_panel_add_edid_alt_fixed_modes(connector);
+ intel_panel_destroy_probed_modes(connector);
+}
+
+static void intel_panel_add_fixed_mode(struct intel_connector *connector,
+ struct drm_display_mode *fixed_mode,
+ const char *type)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct drm_display_info *info = &connector->base.display_info;
+
+ if (!fixed_mode)
+ return;
+
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+
+ info->width_mm = fixed_mode->width_mm;
+ info->height_mm = fixed_mode->height_mm;
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] using %s fixed mode: " DRM_MODE_FMT "\n",
+ connector->base.base.id, connector->base.name, type,
+ DRM_MODE_ARG(fixed_mode));
+
+ list_add_tail(&fixed_mode->head, &connector->panel.fixed_modes);
+}
+
+void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *mode;
+
+ mode = connector->panel.vbt.lfp_lvds_vbt_mode;
+ if (!mode)
+ return;
+
+ intel_panel_add_fixed_mode(connector,
+ drm_mode_duplicate(&i915->drm, mode),
+ "VBT LFP");
+}
+
+void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *mode;
+
+ mode = connector->panel.vbt.sdvo_lvds_vbt_mode;
+ if (!mode)
+ return;
+
+ intel_panel_add_fixed_mode(connector,
+ drm_mode_duplicate(&i915->drm, mode),
+ "VBT SDVO");
+}
+
+void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
+ struct intel_encoder *encoder)
+{
+ intel_panel_add_fixed_mode(connector,
+ intel_encoder_current_mode(encoder),
+ "current (BIOS)");
+}
+
+/* adjusted_mode has been preset to be the panel's fixed mode */
+static int pch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
+ int x, y, width, height;
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->crtc_hdisplay == pipe_src_w &&
+ adjusted_mode->crtc_vdisplay == pipe_src_h &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
+ return 0;
+
+ switch (conn_state->scaling_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ width = pipe_src_w;
+ height = pipe_src_h;
+ x = (adjusted_mode->crtc_hdisplay - width + 1)/2;
+ y = (adjusted_mode->crtc_vdisplay - height + 1)/2;
+ break;
+
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the aspect ratio */
+ {
+ u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h;
+ u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay;
+ if (scaled_width > scaled_height) { /* pillar */
+ width = scaled_height / pipe_src_h;
+ if (width & 1)
+ width++;
+ x = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
+ y = 0;
+ height = adjusted_mode->crtc_vdisplay;
+ } else if (scaled_width < scaled_height) { /* letter */
+ height = scaled_width / pipe_src_w;
+ if (height & 1)
+ height++;
+ y = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
+ x = 0;
+ width = adjusted_mode->crtc_hdisplay;
+ } else {
+ x = y = 0;
+ width = adjusted_mode->crtc_hdisplay;
+ height = adjusted_mode->crtc_vdisplay;
+ }
+ }
+ break;
+
+ case DRM_MODE_SCALE_NONE:
+ WARN_ON(adjusted_mode->crtc_hdisplay != pipe_src_w);
+ WARN_ON(adjusted_mode->crtc_vdisplay != pipe_src_h);
+ fallthrough;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ x = y = 0;
+ width = adjusted_mode->crtc_hdisplay;
+ height = adjusted_mode->crtc_vdisplay;
+ break;
+
+ default:
+ MISSING_CASE(conn_state->scaling_mode);
+ return -EINVAL;
+ }
+
+ drm_rect_init(&crtc_state->pch_pfit.dst,
+ x, y, width, height);
+ crtc_state->pch_pfit.enabled = true;
+
+ return 0;
+}
+
+static void
+centre_horizontally(struct drm_display_mode *adjusted_mode,
+ int width)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the hsync and hblank widths constant */
+ sync_width = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
+ blank_width = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
+ border += border & 1; /* make the border even */
+
+ adjusted_mode->crtc_hdisplay = width;
+ adjusted_mode->crtc_hblank_start = width + border;
+ adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_start + blank_width;
+
+ adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hblank_start + sync_pos;
+ adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + sync_width;
+}
+
+static void
+centre_vertically(struct drm_display_mode *adjusted_mode,
+ int height)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the vsync and vblank widths constant */
+ sync_width = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
+ blank_width = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
+
+ adjusted_mode->crtc_vdisplay = height;
+ adjusted_mode->crtc_vblank_start = height + border;
+ adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vblank_start + blank_width;
+
+ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vblank_start + sync_pos;
+ adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width;
+}
+
+static u32 panel_fitter_scaling(u32 source, u32 target)
+{
+ /*
+ * Floating point operation is not supported. So the FACTOR
+ * is defined, which can avoid the floating point computation
+ * when calculating the panel ratio.
+ */
+#define ACCURACY 12
+#define FACTOR (1 << ACCURACY)
+ u32 ratio = source * FACTOR / target;
+ return (FACTOR * ratio + FACTOR/2) / FACTOR;
+}
+
+static void i965_scale_aspect(struct intel_crtc_state *crtc_state,
+ u32 *pfit_control)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
+ u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h;
+ u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay;
+
+ /* 965+ is easy, it does everything in hw */
+ if (scaled_width > scaled_height)
+ *pfit_control |= PFIT_ENABLE |
+ PFIT_SCALING_PILLAR;
+ else if (scaled_width < scaled_height)
+ *pfit_control |= PFIT_ENABLE |
+ PFIT_SCALING_LETTER;
+ else if (adjusted_mode->crtc_hdisplay != pipe_src_w)
+ *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
+}
+
+static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state,
+ u32 *pfit_control, u32 *pfit_pgm_ratios,
+ u32 *border)
+{
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
+ u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h;
+ u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay;
+ u32 bits;
+
+ /*
+ * For earlier chips we have to calculate the scaling
+ * ratio by hand and program it into the
+ * PFIT_PGM_RATIO register
+ */
+ if (scaled_width > scaled_height) { /* pillar */
+ centre_horizontally(adjusted_mode,
+ scaled_height / pipe_src_h);
+
+ *border = LVDS_BORDER_ENABLE;
+ if (pipe_src_h != adjusted_mode->crtc_vdisplay) {
+ bits = panel_fitter_scaling(pipe_src_h,
+ adjusted_mode->crtc_vdisplay);
+
+ *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ *pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else if (scaled_width < scaled_height) { /* letter */
+ centre_vertically(adjusted_mode,
+ scaled_width / pipe_src_w);
+
+ *border = LVDS_BORDER_ENABLE;
+ if (pipe_src_w != adjusted_mode->crtc_hdisplay) {
+ bits = panel_fitter_scaling(pipe_src_w,
+ adjusted_mode->crtc_hdisplay);
+
+ *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ *pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else {
+ /* Aspects match, Let hw scale both directions */
+ *pfit_control |= (PFIT_ENABLE |
+ VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+}
+
+static int gmch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->crtc_hdisplay == pipe_src_w &&
+ adjusted_mode->crtc_vdisplay == pipe_src_h)
+ goto out;
+
+ switch (conn_state->scaling_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ /*
+ * For centered modes, we have to calculate border widths &
+ * heights and modify the values programmed into the CRTC.
+ */
+ centre_horizontally(adjusted_mode, pipe_src_w);
+ centre_vertically(adjusted_mode, pipe_src_h);
+ border = LVDS_BORDER_ENABLE;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the aspect ratio */
+ if (DISPLAY_VER(dev_priv) >= 4)
+ i965_scale_aspect(crtc_state, &pfit_control);
+ else
+ i9xx_scale_aspect(crtc_state, &pfit_control,
+ &pfit_pgm_ratios, &border);
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ /*
+ * Full scaling, even if it changes the aspect ratio.
+ * Fortunately this is all done for us in hw.
+ */
+ if (pipe_src_h != adjusted_mode->crtc_vdisplay ||
+ pipe_src_w != adjusted_mode->crtc_hdisplay) {
+ pfit_control |= PFIT_ENABLE;
+ if (DISPLAY_VER(dev_priv) >= 4)
+ pfit_control |= PFIT_SCALING_AUTO;
+ else
+ pfit_control |= (VERT_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_AUTO_SCALE |
+ HORIZ_INTERP_BILINEAR);
+ }
+ break;
+ default:
+ MISSING_CASE(conn_state->scaling_mode);
+ return -EINVAL;
+ }
+
+ /* 965+ wants fuzzy fitting */
+ /* FIXME: handle multiple panels by failing gracefully */
+ if (DISPLAY_VER(dev_priv) >= 4)
+ pfit_control |= PFIT_PIPE(crtc->pipe) | PFIT_FILTER_FUZZY;
+
+out:
+ if ((pfit_control & PFIT_ENABLE) == 0) {
+ pfit_control = 0;
+ pfit_pgm_ratios = 0;
+ }
+
+ /* Make sure pre-965 set dither correctly for 18bpp panels. */
+ if (DISPLAY_VER(dev_priv) < 4 && crtc_state->pipe_bpp == 18)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+ crtc_state->gmch_pfit.control = pfit_control;
+ crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
+ crtc_state->gmch_pfit.lvds_border_bits = border;
+
+ return 0;
+}
+
+int intel_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (HAS_GMCH(i915))
+ return gmch_panel_fitting(crtc_state, conn_state);
+ else
+ return pch_panel_fitting(crtc_state, conn_state);
+}
+
+enum drm_connector_status
+intel_panel_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ if (!INTEL_DISPLAY_ENABLED(i915))
+ return connector_status_disconnected;
+
+ return connector_status_connected;
+}
+
+enum drm_mode_status
+intel_panel_mode_valid(struct intel_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(connector, mode);
+
+ if (!fixed_mode)
+ return MODE_OK;
+
+ if (mode->hdisplay != fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay != fixed_mode->vdisplay)
+ return MODE_PANEL;
+
+ if (drm_mode_vrefresh(mode) != drm_mode_vrefresh(fixed_mode))
+ return MODE_PANEL;
+
+ return MODE_OK;
+}
+
+void intel_panel_init_alloc(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ connector->panel.vbt.panel_type = -1;
+ INIT_LIST_HEAD(&panel->fixed_modes);
+}
+
+int intel_panel_init(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ intel_backlight_init_funcs(panel);
+
+ drm_dbg_kms(connector->base.dev,
+ "[CONNECTOR:%d:%s] DRRS type: %s\n",
+ connector->base.base.id, connector->base.name,
+ intel_drrs_type_str(intel_panel_drrs_type(connector)));
+
+ return 0;
+}
+
+void intel_panel_fini(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+ struct drm_display_mode *fixed_mode, *next;
+
+ intel_backlight_destroy(panel);
+
+ intel_bios_fini_panel(panel);
+
+ list_for_each_entry_safe(fixed_mode, next, &panel->fixed_modes, head) {
+ list_del(&fixed_mode->head);
+ drm_mode_destroy(connector->base.dev, fixed_mode);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
new file mode 100644
index 000000000..4b51e1c51
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PANEL_H__
+#define __INTEL_PANEL_H__
+
+#include <linux/types.h>
+
+enum drm_connector_status;
+enum drrs_type;
+struct drm_connector;
+struct drm_connector_state;
+struct drm_display_mode;
+struct drm_i915_private;
+struct intel_connector;
+struct intel_crtc_state;
+struct intel_encoder;
+
+void intel_panel_init_alloc(struct intel_connector *connector);
+int intel_panel_init(struct intel_connector *connector);
+void intel_panel_fini(struct intel_connector *connector);
+enum drm_connector_status
+intel_panel_detect(struct drm_connector *connector, bool force);
+bool intel_panel_use_ssc(struct drm_i915_private *i915);
+const struct drm_display_mode *
+intel_panel_preferred_fixed_mode(struct intel_connector *connector);
+const struct drm_display_mode *
+intel_panel_fixed_mode(struct intel_connector *connector,
+ const struct drm_display_mode *mode);
+const struct drm_display_mode *
+intel_panel_downclock_mode(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode);
+const struct drm_display_mode *
+intel_panel_highest_mode(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode);
+int intel_panel_get_modes(struct intel_connector *connector);
+enum drrs_type intel_panel_drrs_type(struct intel_connector *connector);
+enum drm_mode_status
+intel_panel_mode_valid(struct intel_connector *connector,
+ const struct drm_display_mode *mode);
+int intel_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+int intel_panel_compute_config(struct intel_connector *connector,
+ struct drm_display_mode *adjusted_mode);
+void intel_panel_add_edid_fixed_modes(struct intel_connector *connector,
+ bool use_alt_fixed_modes);
+void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector);
+void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector);
+void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+
+#endif /* __INTEL_PANEL_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
new file mode 100644
index 000000000..837152dca
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "g4x_dp.h"
+#include "intel_crt.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_fdi.h"
+#include "intel_lvds.h"
+#include "intel_pch_display.h"
+#include "intel_pch_refclk.h"
+#include "intel_pps.h"
+#include "intel_sdvo.h"
+
+bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+ enum pipe pch_transcoder)
+{
+ return HAS_PCH_IBX(i915) || HAS_PCH_CPT(i915) ||
+ (HAS_PCH_LPT_H(i915) && pch_transcoder == PIPE_A);
+}
+
+enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (HAS_PCH_LPT(i915))
+ return PIPE_A;
+ else
+ return crtc->pipe;
+}
+
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ i915_reg_t dp_reg)
+{
+ enum pipe port_pipe;
+ bool state;
+
+ state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
+
+ I915_STATE_WARN(state && port_pipe == pipe,
+ "PCH DP %c enabled on transcoder %c, should be disabled\n",
+ port_name(port), pipe_name(pipe));
+
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ "IBX PCH DP %c still using transcoder B\n",
+ port_name(port));
+}
+
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum port port,
+ i915_reg_t hdmi_reg)
+{
+ enum pipe port_pipe;
+ bool state;
+
+ state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
+
+ I915_STATE_WARN(state && port_pipe == pipe,
+ "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
+ port_name(port), pipe_name(pipe));
+
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ "IBX PCH HDMI %c still using transcoder B\n",
+ port_name(port));
+}
+
+static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ enum pipe port_pipe;
+
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
+
+ I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
+ port_pipe == pipe,
+ "PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
+ port_pipe == pipe,
+ "PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ /* PCH SDVOB multiplex with HDMIB */
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
+ assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
+}
+
+static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ u32 val;
+ bool enabled;
+
+ val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
+ enabled = !!(val & TRANS_ENABLE);
+ I915_STATE_WARN(enabled,
+ "transcoder assertion failed, should be off on pipe %c but is still active\n",
+ pipe_name(pipe));
+}
+
+static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
+ enum port port, i915_reg_t hdmi_reg)
+{
+ u32 val = intel_de_read(dev_priv, hdmi_reg);
+
+ if (val & SDVO_ENABLE ||
+ (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Sanitizing transcoder select for HDMI %c\n",
+ port_name(port));
+
+ val &= ~SDVO_PIPE_SEL_MASK;
+ val |= SDVO_PIPE_SEL(PIPE_A);
+
+ intel_de_write(dev_priv, hdmi_reg, val);
+}
+
+static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
+ enum port port, i915_reg_t dp_reg)
+{
+ u32 val = intel_de_read(dev_priv, dp_reg);
+
+ if (val & DP_PORT_EN ||
+ (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Sanitizing transcoder select for DP %c\n",
+ port_name(port));
+
+ val &= ~DP_PIPE_SEL_MASK;
+ val |= DP_PIPE_SEL(PIPE_A);
+
+ intel_de_write(dev_priv, dp_reg, val);
+}
+
+static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
+{
+ /*
+ * The BIOS may select transcoder B on some of the PCH
+ * ports even it doesn't enable the port. This would trip
+ * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
+ * Sanitize the transcoder select bits to prevent that. We
+ * assume that the BIOS never actually enabled the port,
+ * because if it did we'd actually have to toggle the port
+ * on and back off to make the transcoder A select stick
+ * (see. intel_dp_link_down(), intel_disable_hdmi(),
+ * intel_disable_sdvo()).
+ */
+ ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
+ ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
+ ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
+
+ /* PCH SDVOB multiplex with HDMIB */
+ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
+ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
+ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
+}
+
+static void intel_pch_transcoder_set_m1_n1(struct intel_crtc *crtc,
+ const struct intel_link_m_n *m_n)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ intel_set_m_n(dev_priv, m_n,
+ PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe),
+ PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe));
+}
+
+static void intel_pch_transcoder_set_m2_n2(struct intel_crtc *crtc,
+ const struct intel_link_m_n *m_n)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ intel_set_m_n(dev_priv, m_n,
+ PCH_TRANS_DATA_M2(pipe), PCH_TRANS_DATA_N2(pipe),
+ PCH_TRANS_LINK_M2(pipe), PCH_TRANS_LINK_N2(pipe));
+}
+
+void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ intel_get_m_n(dev_priv, m_n,
+ PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe),
+ PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe));
+}
+
+void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ intel_get_m_n(dev_priv, m_n,
+ PCH_TRANS_DATA_M2(pipe), PCH_TRANS_DATA_N2(pipe),
+ PCH_TRANS_LINK_M2(pipe), PCH_TRANS_LINK_N2(pipe));
+}
+
+static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
+ enum pipe pch_transcoder)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
+ intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
+ intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
+ intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
+
+ intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
+ intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
+ intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
+ intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+ intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
+}
+
+static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 val, pipeconf_val;
+
+ /* Make sure PCH DPLL is enabled */
+ assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, pipe);
+ assert_fdi_rx_enabled(dev_priv, pipe);
+
+ if (HAS_PCH_CPT(dev_priv)) {
+ reg = TRANS_CHICKEN2(pipe);
+ val = intel_de_read(dev_priv, reg);
+ /*
+ * Workaround: Set the timing override bit
+ * before enabling the pch transcoder.
+ */
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ /* Configure frame start delay to match the CPU */
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
+ intel_de_write(dev_priv, reg, val);
+ }
+
+ reg = PCH_TRANSCONF(pipe);
+ val = intel_de_read(dev_priv, reg);
+ pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
+
+ if (HAS_PCH_IBX(dev_priv)) {
+ /* Configure frame start delay to match the CPU */
+ val &= ~TRANS_FRAME_START_DELAY_MASK;
+ val |= TRANS_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
+
+ /*
+ * Make the BPC in transcoder be consistent with
+ * that in pipeconf reg. For HDMI we must use 8bpc
+ * here for both 8bpc and 12bpc.
+ */
+ val &= ~PIPECONF_BPC_MASK;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ val |= PIPECONF_BPC_8;
+ else
+ val |= pipeconf_val & PIPECONF_BPC_MASK;
+ }
+
+ val &= ~TRANS_INTERLACE_MASK;
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK_ILK) == PIPECONF_INTERLACE_IF_ID_ILK) {
+ if (HAS_PCH_IBX(dev_priv) &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
+ val |= TRANS_INTERLACE_LEGACY_VSYNC_IBX;
+ else
+ val |= TRANS_INTERLACE_INTERLACED;
+ } else {
+ val |= TRANS_INTERLACE_PROGRESSIVE;
+ }
+
+ intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
+ if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
+ drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
+ pipe_name(pipe));
+}
+
+static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t reg;
+ u32 val;
+
+ /* FDI relies on the transcoder */
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+
+ /* Ports must be off as well */
+ assert_pch_ports_disabled(dev_priv, pipe);
+
+ reg = PCH_TRANSCONF(pipe);
+ val = intel_de_read(dev_priv, reg);
+ val &= ~TRANS_ENABLE;
+ intel_de_write(dev_priv, reg, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
+ drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
+ pipe_name(pipe));
+
+ if (HAS_PCH_CPT(dev_priv)) {
+ /* Workaround: Clear the timing override chicken bit again. */
+ reg = TRANS_CHICKEN2(pipe);
+ val = intel_de_read(dev_priv, reg);
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ intel_de_write(dev_priv, reg, val);
+ }
+}
+
+void ilk_pch_pre_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ /*
+ * Note: FDI PLL enabling _must_ be done before we enable the
+ * cpu pipes, hence this is separate from all the other fdi/pch
+ * enabling.
+ */
+ ilk_fdi_pll_enable(crtc_state);
+}
+
+/*
+ * Enable PCH resources required for PCH ports:
+ * - PCH PLLs
+ * - FDI training & RX/TX
+ * - update transcoder timings
+ * - DP transcoding bits
+ * - transcoder
+ */
+void ilk_pch_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 temp;
+
+ assert_pch_transcoder_disabled(dev_priv, pipe);
+
+ /* For PCH output, training FDI link */
+ intel_fdi_link_train(crtc, crtc_state);
+
+ /*
+ * We need to program the right clock selection
+ * before writing the pixel multiplier into the DPLL.
+ */
+ if (HAS_PCH_CPT(dev_priv)) {
+ u32 sel;
+
+ temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
+ temp |= TRANS_DPLL_ENABLE(pipe);
+ sel = TRANS_DPLLB_SEL(pipe);
+ if (crtc_state->shared_dpll ==
+ intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
+ temp |= sel;
+ else
+ temp &= ~sel;
+ intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
+ }
+
+ /*
+ * XXX: pch pll's can be enabled any time before we enable the PCH
+ * transcoder, and we actually should do this to not upset any PCH
+ * transcoder that already use the clock when we share it.
+ *
+ * Note that enable_shared_dpll tries to do the right thing, but
+ * get_shared_dpll unconditionally resets the pll - we need that
+ * to have the right LVDS enable sequence.
+ */
+ intel_enable_shared_dpll(crtc_state);
+
+ /* set transcoder timing, panel must allow it */
+ assert_pps_unlocked(dev_priv, pipe);
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ intel_pch_transcoder_set_m1_n1(crtc, &crtc_state->dp_m_n);
+ intel_pch_transcoder_set_m2_n2(crtc, &crtc_state->dp_m2_n2);
+ }
+ ilk_pch_transcoder_set_timings(crtc_state, pipe);
+
+ intel_fdi_normal_train(crtc);
+
+ /* For PCH DP, enable TRANS_DP_CTL */
+ if (HAS_PCH_CPT(dev_priv) &&
+ intel_crtc_has_dp_encoder(crtc_state)) {
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
+ i915_reg_t reg = TRANS_DP_CTL(pipe);
+ enum port port;
+
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(TRANS_DP_PORT_SEL_MASK |
+ TRANS_DP_VSYNC_ACTIVE_HIGH |
+ TRANS_DP_HSYNC_ACTIVE_HIGH |
+ TRANS_DP_BPC_MASK);
+ temp |= TRANS_DP_OUTPUT_ENABLE;
+ temp |= bpc << 9; /* same format but at 11:9 */
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
+
+ port = intel_get_crtc_new_encoder(state, crtc_state)->port;
+ drm_WARN_ON(&dev_priv->drm, port < PORT_B || port > PORT_D);
+ temp |= TRANS_DP_PORT_SEL(port);
+
+ intel_de_write(dev_priv, reg, temp);
+ }
+
+ ilk_enable_pch_transcoder(crtc_state);
+}
+
+void ilk_pch_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ ilk_fdi_disable(crtc);
+}
+
+void ilk_pch_post_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ ilk_disable_pch_transcoder(crtc);
+
+ if (HAS_PCH_CPT(dev_priv)) {
+ i915_reg_t reg;
+ u32 temp;
+
+ /* disable TRANS_DP_CTL */
+ reg = TRANS_DP_CTL(pipe);
+ temp = intel_de_read(dev_priv, reg);
+ temp &= ~(TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_PORT_SEL_MASK);
+ temp |= TRANS_DP_PORT_SEL_NONE;
+ intel_de_write(dev_priv, reg, temp);
+
+ /* disable DPLL_SEL */
+ temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
+ temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
+ intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
+ }
+
+ ilk_fdi_pll_disable(crtc);
+}
+
+static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /* read out port_clock from the DPLL */
+ i9xx_crtc_clock_get(crtc, crtc_state);
+
+ /*
+ * In case there is an active pipe without active ports,
+ * we may need some idea for the dotclock anyway.
+ * Calculate one based on the FDI configuration.
+ */
+ crtc_state->hw.adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, crtc_state),
+ &crtc_state->fdi_m_n);
+}
+
+void ilk_pch_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll;
+ enum pipe pipe = crtc->pipe;
+ enum intel_dpll_id pll_id;
+ bool pll_active;
+ u32 tmp;
+
+ if ((intel_de_read(dev_priv, PCH_TRANSCONF(pipe)) & TRANS_ENABLE) == 0)
+ return;
+
+ crtc_state->has_pch_encoder = true;
+
+ tmp = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
+ crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+ FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+ intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder,
+ &crtc_state->fdi_m_n);
+
+ if (HAS_PCH_IBX(dev_priv)) {
+ /*
+ * The pipe->pch transcoder and pch transcoder->pll
+ * mapping is fixed.
+ */
+ pll_id = (enum intel_dpll_id) pipe;
+ } else {
+ tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
+ if (tmp & TRANS_DPLLB_SEL(pipe))
+ pll_id = DPLL_ID_PCH_PLL_B;
+ else
+ pll_id = DPLL_ID_PCH_PLL_A;
+ }
+
+ crtc_state->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+ pll = crtc_state->shared_dpll;
+
+ pll_active = intel_dpll_get_hw_state(dev_priv, pll,
+ &crtc_state->dpll_hw_state);
+ drm_WARN_ON(&dev_priv->drm, !pll_active);
+
+ tmp = crtc_state->dpll_hw_state.dpll;
+ crtc_state->pixel_multiplier =
+ ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
+ >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
+
+ ilk_pch_clock_get(crtc_state);
+}
+
+static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val, pipeconf_val;
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
+ assert_fdi_rx_enabled(dev_priv, PIPE_A);
+
+ val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
+ /* Workaround: set timing override bit. */
+ val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ /* Configure frame start delay to match the CPU */
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
+ intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
+
+ val = TRANS_ENABLE;
+ pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
+
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == PIPECONF_INTERLACE_IF_ID_ILK)
+ val |= TRANS_INTERLACE_INTERLACED;
+ else
+ val |= TRANS_INTERLACE_PROGRESSIVE;
+
+ intel_de_write(dev_priv, LPT_TRANSCONF, val);
+ if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
+ TRANS_STATE_ENABLE, 100))
+ drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
+}
+
+static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = intel_de_read(dev_priv, LPT_TRANSCONF);
+ val &= ~TRANS_ENABLE;
+ intel_de_write(dev_priv, LPT_TRANSCONF, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
+ TRANS_STATE_ENABLE, 50))
+ drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
+
+ /* Workaround: clear timing override bit. */
+ val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
+ val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+ intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
+}
+
+void lpt_pch_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ assert_pch_transcoder_disabled(dev_priv, PIPE_A);
+
+ lpt_program_iclkip(crtc_state);
+
+ /* Set transcoder timing. */
+ ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
+
+ lpt_enable_pch_transcoder(crtc_state);
+}
+
+void lpt_pch_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ lpt_disable_pch_transcoder(dev_priv);
+
+ lpt_disable_iclkip(dev_priv);
+}
+
+void lpt_pch_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 tmp;
+
+ if ((intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) == 0)
+ return;
+
+ crtc_state->has_pch_encoder = true;
+
+ tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
+ crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+ FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+ intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder,
+ &crtc_state->fdi_m_n);
+
+ crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
+}
+
+void intel_pch_sanitize(struct drm_i915_private *i915)
+{
+ if (HAS_PCH_IBX(i915))
+ ibx_sanitize_pch_ports(i915);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h
new file mode 100644
index 000000000..41a63413c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _INTEL_PCH_DISPLAY_H_
+#define _INTEL_PCH_DISPLAY_H_
+
+#include <linux/types.h>
+
+enum pipe;
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_link_m_n;
+
+bool intel_has_pch_trancoder(struct drm_i915_private *i915,
+ enum pipe pch_transcoder);
+enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
+
+void ilk_pch_pre_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void ilk_pch_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void ilk_pch_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void ilk_pch_post_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void ilk_pch_get_config(struct intel_crtc_state *crtc_state);
+
+void lpt_pch_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void lpt_pch_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void lpt_pch_get_config(struct intel_crtc_state *crtc_state);
+
+void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n);
+void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc,
+ struct intel_link_m_n *m_n);
+
+void intel_pch_sanitize(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
new file mode 100644
index 000000000..a66097cdc
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -0,0 +1,679 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_panel.h"
+#include "intel_pch_refclk.h"
+#include "intel_sbi.h"
+
+static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
+{
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
+ tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
+
+ tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
+ tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+ drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
+}
+
+/* WaMPhyProgramming:hsw */
+static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
+{
+ u32 tmp;
+
+ lpt_fdi_reset_mphy(dev_priv);
+
+ tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+ tmp &= ~(0xFF << 24);
+ tmp |= (0x12 << 24);
+ intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+}
+
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+{
+ u32 temp;
+
+ intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp |= SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+struct iclkip_params {
+ u32 iclk_virtual_root_freq;
+ u32 iclk_pi_range;
+ u32 divsel, phaseinc, auxdiv, phasedir, desired_divisor;
+};
+
+static void iclkip_params_init(struct iclkip_params *p)
+{
+ memset(p, 0, sizeof(*p));
+
+ p->iclk_virtual_root_freq = 172800 * 1000;
+ p->iclk_pi_range = 64;
+}
+
+static int lpt_iclkip_freq(struct iclkip_params *p)
+{
+ return DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq,
+ p->desired_divisor << p->auxdiv);
+}
+
+static void lpt_compute_iclkip(struct iclkip_params *p, int clock)
+{
+ iclkip_params_init(p);
+
+ /* The iCLK virtual clock root frequency is in MHz,
+ * but the adjusted_mode->crtc_clock in KHz. To get the
+ * divisors, it is necessary to divide one by another, so we
+ * convert the virtual clock precision to KHz here for higher
+ * precision.
+ */
+ for (p->auxdiv = 0; p->auxdiv < 2; p->auxdiv++) {
+ p->desired_divisor = DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq,
+ clock << p->auxdiv);
+ p->divsel = (p->desired_divisor / p->iclk_pi_range) - 2;
+ p->phaseinc = p->desired_divisor % p->iclk_pi_range;
+
+ /*
+ * Near 20MHz is a corner case which is
+ * out of range for the 7-bit divisor
+ */
+ if (p->divsel <= 0x7f)
+ break;
+ }
+}
+
+int lpt_iclkip(const struct intel_crtc_state *crtc_state)
+{
+ struct iclkip_params p;
+
+ lpt_compute_iclkip(&p, crtc_state->hw.adjusted_mode.crtc_clock);
+
+ return lpt_iclkip_freq(&p);
+}
+
+/* Program iCLKIP clock to the desired frequency */
+void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int clock = crtc_state->hw.adjusted_mode.crtc_clock;
+ struct iclkip_params p;
+ u32 temp;
+
+ lpt_disable_iclkip(dev_priv);
+
+ lpt_compute_iclkip(&p, clock);
+ drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock);
+
+ /* This should not happen with any sane values */
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
+ ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
+ ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+ clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* Program SSCDIVINTPHASE6 */
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+ temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_DIVSEL(p.divsel);
+ temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_INCVAL(p.phaseinc);
+ temp |= SBI_SSCDIVINTPHASE_DIR(p.phasedir);
+ temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
+ intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
+
+ /* Program SSCAUXDIV */
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+ temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
+ temp |= SBI_SSCAUXDIV_FINALDIV2SEL(p.auxdiv);
+ intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
+
+ /* Enable modulator and associated divider */
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ temp &= ~SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+
+ mutex_unlock(&dev_priv->sb_lock);
+
+ /* Wait for initialization time */
+ udelay(24);
+
+ intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+}
+
+int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+{
+ struct iclkip_params p;
+ u32 temp;
+
+ if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
+ return 0;
+
+ iclkip_params_init(&p);
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ if (temp & SBI_SSCCTL_DISABLE) {
+ mutex_unlock(&dev_priv->sb_lock);
+ return 0;
+ }
+
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+ p.divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
+ SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
+ p.phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
+ SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
+
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+ p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
+ SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
+
+ mutex_unlock(&dev_priv->sb_lock);
+
+ p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc;
+
+ return lpt_iclkip_freq(&p);
+}
+
+/* Implements 3 different sequences from BSpec chapter "Display iCLK
+ * Programming" based on the parameters passed:
+ * - Sequence to enable CLKOUT_DP
+ * - Sequence to enable CLKOUT_DP without spread
+ * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
+ */
+static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
+ bool with_spread, bool with_fdi)
+{
+ u32 reg, tmp;
+
+ if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
+ "FDI requires downspread\n"))
+ with_spread = true;
+ if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
+ with_fdi, "LP PCH doesn't have FDI\n"))
+ with_fdi = false;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_DISABLE;
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ udelay(24);
+
+ if (with_spread) {
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ if (with_fdi)
+ lpt_fdi_program_mphy(dev_priv);
+ }
+
+ reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
+ tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+ intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+/* Sequence to disable CLKOUT_DP */
+void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
+{
+ u32 reg, tmp;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
+ tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+ intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ if (!(tmp & SBI_SSCCTL_DISABLE)) {
+ if (!(tmp & SBI_SSCCTL_PATHALT)) {
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ udelay(32);
+ }
+ tmp |= SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ }
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+#define BEND_IDX(steps) ((50 + (steps)) / 5)
+
+static const u16 sscdivintphase[] = {
+ [BEND_IDX( 50)] = 0x3B23,
+ [BEND_IDX( 45)] = 0x3B23,
+ [BEND_IDX( 40)] = 0x3C23,
+ [BEND_IDX( 35)] = 0x3C23,
+ [BEND_IDX( 30)] = 0x3D23,
+ [BEND_IDX( 25)] = 0x3D23,
+ [BEND_IDX( 20)] = 0x3E23,
+ [BEND_IDX( 15)] = 0x3E23,
+ [BEND_IDX( 10)] = 0x3F23,
+ [BEND_IDX( 5)] = 0x3F23,
+ [BEND_IDX( 0)] = 0x0025,
+ [BEND_IDX( -5)] = 0x0025,
+ [BEND_IDX(-10)] = 0x0125,
+ [BEND_IDX(-15)] = 0x0125,
+ [BEND_IDX(-20)] = 0x0225,
+ [BEND_IDX(-25)] = 0x0225,
+ [BEND_IDX(-30)] = 0x0325,
+ [BEND_IDX(-35)] = 0x0325,
+ [BEND_IDX(-40)] = 0x0425,
+ [BEND_IDX(-45)] = 0x0425,
+ [BEND_IDX(-50)] = 0x0525,
+};
+
+/*
+ * Bend CLKOUT_DP
+ * steps -50 to 50 inclusive, in steps of 5
+ * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
+ * change in clock period = -(steps / 10) * 5.787 ps
+ */
+static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
+{
+ u32 tmp;
+ int idx = BEND_IDX(steps);
+
+ if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
+ return;
+
+ if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
+ return;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ if (steps % 10 != 0)
+ tmp = 0xAAAAAAAB;
+ else
+ tmp = 0x00000000;
+ intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
+ tmp &= 0xffff0000;
+ tmp |= sscdivintphase[idx];
+ intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+#undef BEND_IDX
+
+static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
+{
+ u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
+ u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
+
+ if ((ctl & SPLL_PLL_ENABLE) == 0)
+ return false;
+
+ if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
+ (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
+ return true;
+
+ if (IS_BROADWELL(dev_priv) &&
+ (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
+ return true;
+
+ return false;
+}
+
+static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id id)
+{
+ u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
+ u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
+
+ if ((ctl & WRPLL_PLL_ENABLE) == 0)
+ return false;
+
+ if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
+ return true;
+
+ if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
+ (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
+ (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
+ return true;
+
+ return false;
+}
+
+static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ bool has_fdi = false;
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ has_fdi = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * The BIOS may have decided to use the PCH SSC
+ * reference so we must not disable it until the
+ * relevant PLLs have stopped relying on it. We'll
+ * just leave the PCH SSC reference enabled in case
+ * any active PLL is using it. It will get disabled
+ * after runtime suspend if we don't have FDI.
+ *
+ * TODO: Move the whole reference clock handling
+ * to the modeset sequence proper so that we can
+ * actually enable/disable/reconfigure these things
+ * safely. To do that we need to introduce a real
+ * clock hierarchy. That would also allow us to do
+ * clock bending finally.
+ */
+ dev_priv->pch_ssc_use = 0;
+
+ if (spll_uses_pch_ssc(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
+ dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
+ }
+
+ if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
+ drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
+ dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
+ }
+
+ if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
+ drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
+ dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
+ }
+
+ if (dev_priv->pch_ssc_use)
+ return;
+
+ if (has_fdi) {
+ lpt_bend_clkout_dp(dev_priv, 0);
+ lpt_enable_clkout_dp(dev_priv, true, true);
+ } else {
+ lpt_disable_clkout_dp(dev_priv);
+ }
+}
+
+static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ int i;
+ u32 val, final;
+ bool has_lvds = false;
+ bool has_cpu_edp = false;
+ bool has_panel = false;
+ bool has_ck505 = false;
+ bool can_ssc = false;
+ bool using_ssc_source = false;
+
+ /* We need to take the global config into account */
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ has_panel = true;
+ has_lvds = true;
+ break;
+ case INTEL_OUTPUT_EDP:
+ has_panel = true;
+ if (encoder->port == PORT_A)
+ has_cpu_edp = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (HAS_PCH_IBX(dev_priv)) {
+ has_ck505 = dev_priv->display.vbt.display_clock_mode;
+ can_ssc = has_ck505;
+ } else {
+ has_ck505 = false;
+ can_ssc = true;
+ }
+
+ /* Check if any DPLLs are using the SSC source */
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
+ u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
+
+ if (!(temp & DPLL_VCO_ENABLE))
+ continue;
+
+ if ((temp & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ using_ssc_source = true;
+ break;
+ }
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
+ has_panel, has_lvds, has_ck505, using_ssc_source);
+
+ /* Ironlake: try to setup display ref clock before DPLL
+ * enabling. This is only under driver's control after
+ * PCH B stepping, previous chipset stepping should be
+ * ignoring this setting.
+ */
+ val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
+
+ /* As we must carefully and slowly disable/enable each source in turn,
+ * compute the final state we want first and check if we need to
+ * make any changes at all.
+ */
+ final = val;
+ final &= ~DREF_NONSPREAD_SOURCE_MASK;
+ if (has_ck505)
+ final |= DREF_NONSPREAD_CK505_ENABLE;
+ else
+ final |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+ final &= ~DREF_SSC_SOURCE_MASK;
+ final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+ final &= ~DREF_SSC1_ENABLE;
+
+ if (has_panel) {
+ final |= DREF_SSC_SOURCE_ENABLE;
+
+ if (intel_panel_use_ssc(dev_priv) && can_ssc)
+ final |= DREF_SSC1_ENABLE;
+
+ if (has_cpu_edp) {
+ if (intel_panel_use_ssc(dev_priv) && can_ssc)
+ final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ else
+ final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ } else {
+ final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ }
+ } else if (using_ssc_source) {
+ final |= DREF_SSC_SOURCE_ENABLE;
+ final |= DREF_SSC1_ENABLE;
+ }
+
+ if (final == val)
+ return;
+
+ /* Always enable nonspread source */
+ val &= ~DREF_NONSPREAD_SOURCE_MASK;
+
+ if (has_ck505)
+ val |= DREF_NONSPREAD_CK505_ENABLE;
+ else
+ val |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+ if (has_panel) {
+ val &= ~DREF_SSC_SOURCE_MASK;
+ val |= DREF_SSC_SOURCE_ENABLE;
+
+ /* SSC must be turned on before enabling the CPU output */
+ if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
+ val |= DREF_SSC1_ENABLE;
+ } else {
+ val &= ~DREF_SSC1_ENABLE;
+ }
+
+ /* Get SSC going before enabling the outputs */
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
+ udelay(200);
+
+ val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Enable CPU source on CPU attached eDP */
+ if (has_cpu_edp) {
+ if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Using SSC on eDP\n");
+ val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ } else {
+ val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ }
+ } else {
+ val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ }
+
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
+ udelay(200);
+ } else {
+ drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
+
+ val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Turn off CPU output */
+ val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
+ udelay(200);
+
+ if (!using_ssc_source) {
+ drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
+
+ /* Turn off the SSC source */
+ val &= ~DREF_SSC_SOURCE_MASK;
+ val |= DREF_SSC_SOURCE_DISABLE;
+
+ /* Turn off SSC1 */
+ val &= ~DREF_SSC1_ENABLE;
+
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
+ udelay(200);
+ }
+ }
+
+ drm_WARN_ON(&dev_priv->drm, val != final);
+}
+
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
+{
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
+ ilk_init_pch_refclk(dev_priv);
+ else if (HAS_PCH_LPT(dev_priv))
+ lpt_init_pch_refclk(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
new file mode 100644
index 000000000..9bcf56629
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _INTEL_PCH_REFCLK_H_
+#define _INTEL_PCH_REFCLK_H_
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_crtc_state;
+
+void lpt_program_iclkip(const struct intel_crtc_state *crtc_state);
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
+int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+int lpt_iclkip(const struct intel_crtc_state *crtc_state);
+
+void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
+void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
new file mode 100644
index 000000000..8ac263f47
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -0,0 +1,672 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Author: Damien Lespiau <damien.lespiau@intel.com>
+ *
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "intel_atomic.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_pipe_crc.h"
+
+static const char * const pipe_crc_sources[] = {
+ [INTEL_PIPE_CRC_SOURCE_NONE] = "none",
+ [INTEL_PIPE_CRC_SOURCE_PLANE1] = "plane1",
+ [INTEL_PIPE_CRC_SOURCE_PLANE2] = "plane2",
+ [INTEL_PIPE_CRC_SOURCE_PLANE3] = "plane3",
+ [INTEL_PIPE_CRC_SOURCE_PLANE4] = "plane4",
+ [INTEL_PIPE_CRC_SOURCE_PLANE5] = "plane5",
+ [INTEL_PIPE_CRC_SOURCE_PLANE6] = "plane6",
+ [INTEL_PIPE_CRC_SOURCE_PLANE7] = "plane7",
+ [INTEL_PIPE_CRC_SOURCE_PIPE] = "pipe",
+ [INTEL_PIPE_CRC_SOURCE_TV] = "TV",
+ [INTEL_PIPE_CRC_SOURCE_DP_B] = "DP-B",
+ [INTEL_PIPE_CRC_SOURCE_DP_C] = "DP-C",
+ [INTEL_PIPE_CRC_SOURCE_DP_D] = "DP-D",
+ [INTEL_PIPE_CRC_SOURCE_AUTO] = "auto",
+};
+
+static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+ u32 *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+ struct intel_digital_port *dig_port;
+ int ret = 0;
+
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ drm_modeset_lock_all(dev);
+ for_each_intel_encoder(dev, encoder) {
+ if (!encoder->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(encoder->base.crtc);
+
+ if (crtc->pipe != pipe)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_TVOUT:
+ *source = INTEL_PIPE_CRC_SOURCE_TV;
+ break;
+ case INTEL_OUTPUT_DP:
+ case INTEL_OUTPUT_EDP:
+ dig_port = enc_to_dig_port(encoder);
+ switch (dig_port->base.port) {
+ case PORT_B:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_B;
+ break;
+ case PORT_C:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_C;
+ break;
+ case PORT_D:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_D;
+ break;
+ default:
+ drm_WARN(dev, 1, "nonexisting DP port %c\n",
+ port_name(dig_port->base.port));
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ u32 *val)
+{
+ bool need_stable_symbols = false;
+
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+ int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ if (ret)
+ return ret;
+ }
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ if (!IS_CHERRYVIEW(dev_priv))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * When the pipe CRC tap point is after the transcoders we need
+ * to tweak symbol-level features to produce a deterministic series of
+ * symbols for a given frame. We need to reset those features only once
+ * a frame (instead of every nth symbol):
+ * - DC-balance: used to ensure a better clock recovery from the data
+ * link (SDVO)
+ * - DisplayPort scrambling: used for EMI reduction
+ */
+ if (need_stable_symbols) {
+ u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X);
+
+ tmp |= DC_BALANCE_RESET_VLV;
+ switch (pipe) {
+ case PIPE_A:
+ tmp |= PIPE_A_SCRAMBLE_RESET;
+ break;
+ case PIPE_B:
+ tmp |= PIPE_B_SCRAMBLE_RESET;
+ break;
+ case PIPE_C:
+ tmp |= PIPE_C_SCRAMBLE_RESET;
+ break;
+ default:
+ return -EINVAL;
+ }
+ intel_de_write(dev_priv, PORT_DFT2_G4X, tmp);
+ }
+
+ return 0;
+}
+
+static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ u32 *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+ int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ if (ret)
+ return ret;
+ }
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_TV:
+ if (!SUPPORTS_TV(dev_priv))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ /*
+ * The DP CRC source doesn't work on g4x.
+ * It can be made to work to some degree by selecting
+ * the correct CRC source before the port is enabled,
+ * and not touching the CRC source bits again until
+ * the port is disabled. But even then the bits
+ * eventually get stuck and a reboot is needed to get
+ * working CRCs on the pipe again. Let's simply
+ * refuse to use DP CRCs on g4x.
+ */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X);
+
+ switch (pipe) {
+ case PIPE_A:
+ tmp &= ~PIPE_A_SCRAMBLE_RESET;
+ break;
+ case PIPE_B:
+ tmp &= ~PIPE_B_SCRAMBLE_RESET;
+ break;
+ case PIPE_C:
+ tmp &= ~PIPE_C_SCRAMBLE_RESET;
+ break;
+ default:
+ return;
+ }
+ if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
+ tmp &= ~DC_BALANCE_RESET_VLV;
+ intel_de_write(dev_priv, PORT_DFT2_G4X, tmp);
+}
+
+static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+ u32 *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *pipe_config;
+ struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(&dev_priv->drm);
+ if (!state) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ state->acquire_ctx = &ctx;
+
+retry:
+ pipe_config = intel_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(pipe_config)) {
+ ret = PTR_ERR(pipe_config);
+ goto put_state;
+ }
+
+ pipe_config->uapi.mode_changed = pipe_config->has_psr;
+ pipe_config->crc_enabled = enable;
+
+ if (IS_HASWELL(dev_priv) &&
+ pipe_config->hw.active && crtc->pipe == PIPE_A &&
+ pipe_config->cpu_transcoder == TRANSCODER_EDP)
+ pipe_config->uapi.mode_changed = true;
+
+ ret = drm_atomic_commit(state);
+
+put_state:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_atomic_state_put(state);
+unlock:
+ drm_WARN(&dev_priv->drm, ret,
+ "Toggling workaround to %i returns %i\n", enable, ret);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ u32 *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ u32 *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_1_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_2_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE3:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_3_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE4:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_4_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE5:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_5_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE6:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_6_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE7:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_7_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DMUX_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source, u32 *val)
+{
+ if (DISPLAY_VER(dev_priv) == 2)
+ return i8xx_pipe_crc_ctl_reg(source, val);
+ else if (DISPLAY_VER(dev_priv) < 5)
+ return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
+ return ilk_pipe_crc_ctl_reg(source, val);
+ else if (DISPLAY_VER(dev_priv) < 9)
+ return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ else
+ return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+}
+
+static int
+display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
+{
+ int i;
+
+ if (!buf) {
+ *s = INTEL_PIPE_CRC_SOURCE_NONE;
+ return 0;
+ }
+
+ i = match_string(pipe_crc_sources, ARRAY_SIZE(pipe_crc_sources), buf);
+ if (i < 0)
+ return i;
+
+ *s = i;
+ return 0;
+}
+
+void intel_crtc_crc_init(struct intel_crtc *crtc)
+{
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
+
+ spin_lock_init(&pipe_crc->lock);
+}
+
+static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_TV:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ case INTEL_PIPE_CRC_SOURCE_PLANE3:
+ case INTEL_PIPE_CRC_SOURCE_PLANE4:
+ case INTEL_PIPE_CRC_SOURCE_PLANE5:
+ case INTEL_PIPE_CRC_SOURCE_PLANE6:
+ case INTEL_PIPE_CRC_SOURCE_PLANE7:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ if (DISPLAY_VER(dev_priv) == 2)
+ return i8xx_crc_source_valid(dev_priv, source);
+ else if (DISPLAY_VER(dev_priv) < 5)
+ return i9xx_crc_source_valid(dev_priv, source);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return vlv_crc_source_valid(dev_priv, source);
+ else if (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv))
+ return ilk_crc_source_valid(dev_priv, source);
+ else if (DISPLAY_VER(dev_priv) < 9)
+ return ivb_crc_source_valid(dev_priv, source);
+ else
+ return skl_crc_source_valid(dev_priv, source);
+}
+
+const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count)
+{
+ *count = ARRAY_SIZE(pipe_crc_sources);
+ return pipe_crc_sources;
+}
+
+int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum intel_pipe_crc_source source;
+
+ if (display_crc_ctl_parse_source(source_name, &source) < 0) {
+ drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
+ return -EINVAL;
+ }
+
+ if (source == INTEL_PIPE_CRC_SOURCE_AUTO ||
+ intel_is_valid_crc_source(dev_priv, source) == 0) {
+ *values_cnt = 5;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
+{
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
+ enum intel_display_power_domain power_domain;
+ enum intel_pipe_crc_source source;
+ enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
+ u32 val = 0; /* shut up gcc */
+ int ret = 0;
+ bool enable;
+
+ if (display_crc_ctl_parse_source(source_name, &source) < 0) {
+ drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
+ return -EINVAL;
+ }
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Trying to capture CRC while pipe is off\n");
+ return -EIO;
+ }
+
+ enable = source != INTEL_PIPE_CRC_SOURCE_NONE;
+ if (enable)
+ intel_crtc_crc_setup_workarounds(crtc, true);
+
+ ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val);
+ if (ret != 0)
+ goto out;
+
+ pipe_crc->source = source;
+ intel_de_write(dev_priv, PIPE_CRC_CTL(pipe), val);
+ intel_de_posting_read(dev_priv, PIPE_CRC_CTL(pipe));
+
+ if (!source) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ vlv_undo_pipe_scramble_reset(dev_priv, pipe);
+ }
+
+ pipe_crc->skipped = 0;
+
+out:
+ if (!enable)
+ intel_crtc_crc_setup_workarounds(crtc, false);
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
+ enum pipe pipe = crtc->pipe;
+ u32 val = 0;
+
+ if (!crtc->base.crc.opened)
+ return;
+
+ if (get_new_crc_ctl_reg(dev_priv, pipe, &pipe_crc->source, &val) < 0)
+ return;
+
+ /* Don't need pipe_crc->lock here, IRQs are not generated. */
+ pipe_crc->skipped = 0;
+
+ intel_de_write(dev_priv, PIPE_CRC_CTL(pipe), val);
+ intel_de_posting_read(dev_priv, PIPE_CRC_CTL(pipe));
+}
+
+void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
+ enum pipe pipe = crtc->pipe;
+
+ /* Swallow crc's until we stop generating them. */
+ spin_lock_irq(&pipe_crc->lock);
+ pipe_crc->skipped = INT_MIN;
+ spin_unlock_irq(&pipe_crc->lock);
+
+ intel_de_write(dev_priv, PIPE_CRC_CTL(pipe), 0);
+ intel_de_posting_read(dev_priv, PIPE_CRC_CTL(pipe));
+ intel_synchronize_irq(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.h b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
new file mode 100644
index 000000000..43012b189
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PIPE_CRC_H__
+#define __INTEL_PIPE_CRC_H__
+
+#include <linux/types.h>
+
+struct drm_crtc;
+struct drm_i915_private;
+struct intel_crtc;
+
+#ifdef CONFIG_DEBUG_FS
+void intel_crtc_crc_init(struct intel_crtc *crtc);
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
+int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *source_name, size_t *values_cnt);
+const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count);
+void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
+void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
+#else
+static inline void intel_crtc_crc_init(struct intel_crtc *crtc) {}
+#define intel_crtc_set_crc_source NULL
+#define intel_crtc_verify_crc_source NULL
+#define intel_crtc_get_crc_sources NULL
+static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
+{
+}
+
+static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
+{
+}
+#endif
+
+#endif /* __INTEL_PIPE_CRC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
new file mode 100644
index 000000000..76be796df
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "gem/i915_gem_region.h"
+#include "i915_drv.h"
+#include "intel_atomic_plane.h"
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_plane_initial.h"
+
+static bool
+intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
+ const struct intel_initial_plane_config *plane_config,
+ struct drm_framebuffer **fb,
+ struct i915_vma **vma)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (!crtc_state->uapi.active)
+ continue;
+
+ if (!plane_state->ggtt_vma)
+ continue;
+
+ if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
+ *fb = plane_state->hw.fb;
+ *vma = plane_state->ggtt_vma;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static struct i915_vma *
+initial_plane_vma(struct drm_i915_private *i915,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct intel_memory_region *mem;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ resource_size_t phys_base;
+ u32 base, size;
+ u64 pinctl;
+
+ if (plane_config->size == 0)
+ return NULL;
+
+ base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
+ if (IS_DGFX(i915)) {
+ gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
+ gen8_pte_t pte;
+
+ gte += base / I915_GTT_PAGE_SIZE;
+
+ pte = ioread64(gte);
+ if (!(pte & GEN12_GGTT_PTE_LM)) {
+ drm_err(&i915->drm,
+ "Initial plane programming missing PTE_LM bit\n");
+ return NULL;
+ }
+
+ phys_base = pte & I915_GTT_PAGE_MASK;
+ mem = i915->mm.regions[INTEL_REGION_LMEM_0];
+
+ /*
+ * We don't currently expect this to ever be placed in the
+ * stolen portion.
+ */
+ if (phys_base >= resource_size(&mem->region)) {
+ drm_err(&i915->drm,
+ "Initial plane programming using invalid range, phys_base=%pa\n",
+ &phys_base);
+ return NULL;
+ }
+
+ drm_dbg(&i915->drm,
+ "Using phys_base=%pa, based on initial plane programming\n",
+ &phys_base);
+ } else {
+ phys_base = base;
+ mem = i915->mm.stolen_region;
+ }
+
+ if (!mem)
+ return NULL;
+
+ size = round_up(plane_config->base + plane_config->size,
+ mem->min_page_size);
+ size -= base;
+
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ */
+ if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
+ mem == i915->mm.stolen_region &&
+ size * 2 > i915->stolen_usable_size)
+ return NULL;
+
+ obj = i915_gem_object_create_region_at(mem, phys_base, size, 0);
+ if (IS_ERR(obj))
+ return NULL;
+
+ /*
+ * Mark it WT ahead of time to avoid changing the
+ * cache_level during fbdev initialization. The
+ * unbind there would get stuck waiting for rcu.
+ */
+ i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
+ I915_CACHE_WT : I915_CACHE_NONE);
+
+ switch (plane_config->tiling) {
+ case I915_TILING_NONE:
+ break;
+ case I915_TILING_X:
+ case I915_TILING_Y:
+ obj->tiling_and_stride =
+ plane_config->fb->base.pitches[0] |
+ plane_config->tiling;
+ break;
+ default:
+ MISSING_CASE(plane_config->tiling);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
+ if (IS_ERR(vma))
+ goto err_obj;
+
+ pinctl = PIN_GLOBAL | PIN_OFFSET_FIXED | base;
+ if (HAS_GMCH(i915))
+ pinctl |= PIN_MAPPABLE;
+ if (i915_vma_pin(vma, 0, 0, pinctl))
+ goto err_obj;
+
+ if (i915_gem_object_is_tiled(obj) &&
+ !i915_vma_is_map_and_fenceable(vma))
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return NULL;
+}
+
+static bool
+intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_framebuffer *fb = &plane_config->fb->base;
+ struct i915_vma *vma;
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_4_TILED:
+ break;
+ default:
+ drm_dbg(&dev_priv->drm,
+ "Unsupported modifier for initial FB: 0x%llx\n",
+ fb->modifier);
+ return false;
+ }
+
+ vma = initial_plane_vma(dev_priv, plane_config);
+ if (!vma)
+ return false;
+
+ mode_cmd.pixel_format = fb->format->format;
+ mode_cmd.width = fb->width;
+ mode_cmd.height = fb->height;
+ mode_cmd.pitches[0] = fb->pitches[0];
+ mode_cmd.modifier[0] = fb->modifier;
+ mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
+
+ if (intel_framebuffer_init(to_intel_framebuffer(fb),
+ vma->obj, &mode_cmd)) {
+ drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
+ goto err_vma;
+ }
+
+ plane_config->vma = vma;
+ return true;
+
+err_vma:
+ i915_vma_put(vma);
+ return false;
+}
+
+static void
+intel_find_initial_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct drm_framebuffer *fb;
+ struct i915_vma *vma;
+
+ /*
+ * TODO:
+ * Disable planes if get_initial_plane_config() failed.
+ * Make sure things work if the surface base is not page aligned.
+ */
+ if (!plane_config->fb)
+ return;
+
+ if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
+ fb = &plane_config->fb->base;
+ vma = plane_config->vma;
+ goto valid_fb;
+ }
+
+ /*
+ * Failed to alloc the obj, check to see if we should share
+ * an fb with another CRTC instead
+ */
+ if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
+ goto valid_fb;
+
+ /*
+ * We've failed to reconstruct the BIOS FB. Current display state
+ * indicates that the primary plane is visible, but has a NULL FB,
+ * which will lead to problems later if we don't fix it up. The
+ * simplest solution is to just disable the primary plane now and
+ * pretend the BIOS never had it enabled.
+ */
+ intel_plane_disable_noatomic(crtc, plane);
+
+ return;
+
+valid_fb:
+ plane_state->uapi.rotation = plane_config->rotation;
+ intel_fb_fill_view(to_intel_framebuffer(fb),
+ plane_state->uapi.rotation, &plane_state->view);
+
+ __i915_vma_pin(vma);
+ plane_state->ggtt_vma = i915_vma_get(vma);
+ if (intel_plane_uses_fence(plane_state) &&
+ i915_vma_pin_fence(vma) == 0 && vma->fence)
+ plane_state->flags |= PLANE_HAS_FENCE;
+
+ plane_state->uapi.src_x = 0;
+ plane_state->uapi.src_y = 0;
+ plane_state->uapi.src_w = fb->width << 16;
+ plane_state->uapi.src_h = fb->height << 16;
+
+ plane_state->uapi.crtc_x = 0;
+ plane_state->uapi.crtc_y = 0;
+ plane_state->uapi.crtc_w = fb->width;
+ plane_state->uapi.crtc_h = fb->height;
+
+ if (plane_config->tiling)
+ dev_priv->preserve_bios_swizzle = true;
+
+ plane_state->uapi.fb = fb;
+ drm_framebuffer_get(fb);
+
+ plane_state->uapi.crtc = &crtc->base;
+ intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
+
+ atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
+}
+
+static void plane_config_fini(struct intel_initial_plane_config *plane_config)
+{
+ if (plane_config->fb) {
+ struct drm_framebuffer *fb = &plane_config->fb->base;
+
+ /* We may only have the stub and not a full framebuffer */
+ if (drm_framebuffer_read_refcount(fb))
+ drm_framebuffer_put(fb);
+ else
+ kfree(fb);
+ }
+
+ if (plane_config->vma)
+ i915_vma_put(plane_config->vma);
+}
+
+void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_initial_plane_config plane_config = {};
+
+ /*
+ * Note that reserving the BIOS fb up front prevents us
+ * from stuffing other stolen allocations like the ring
+ * on top. This prevents some ugliness at boot time, and
+ * can even allow for smooth boot transitions if the BIOS
+ * fb is large enough for the active pipe configuration.
+ */
+ dev_priv->display.funcs.display->get_initial_plane_config(crtc, &plane_config);
+
+ /*
+ * If the fb is shared between multiple heads, we'll
+ * just get the first one.
+ */
+ intel_find_initial_plane_obj(crtc, &plane_config);
+
+ plane_config_fini(&plane_config);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.h b/drivers/gpu/drm/i915/display/intel_plane_initial.h
new file mode 100644
index 000000000..c7e35ab31
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_PLANE_INITIAL_H__
+#define __INTEL_PLANE_INITIAL_H__
+
+struct intel_crtc;
+
+void intel_crtc_initial_plane_config(struct intel_crtc *crtc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
new file mode 100644
index 000000000..21944f5bf
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -0,0 +1,1554 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "g4x_dp.h"
+#include "i915_drv.h"
+#include "intel_de.h"
+#include "intel_display_power_well.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_dpll.h"
+#include "intel_lvds.h"
+#include "intel_pps.h"
+#include "intel_quirks.h"
+
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+static void pps_init_delays(struct intel_dp *intel_dp);
+static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
+
+intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ /*
+ * See intel_pps_reset_all() why we need a power domain reference here.
+ */
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
+ mutex_lock(&dev_priv->display.pps.mutex);
+
+ return wakeref;
+}
+
+intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
+ intel_wakeref_t wakeref)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ mutex_unlock(&dev_priv->display.pps.mutex);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+
+ return 0;
+}
+
+static void
+vlv_power_sequencer_kick(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum pipe pipe = intel_dp->pps.pps_pipe;
+ bool pll_enabled, release_cl_override = false;
+ enum dpio_phy phy = DPIO_PHY(pipe);
+ enum dpio_channel ch = vlv_pipe_to_channel(pipe);
+ u32 DP;
+
+ if (drm_WARN(&dev_priv->drm,
+ intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
+ "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
+ DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ DP |= DP_PORT_WIDTH(1);
+ DP |= DP_LINK_TRAIN_PAT_1;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ DP |= DP_PIPE_SEL_CHV(pipe);
+ else
+ DP |= DP_PIPE_SEL(pipe);
+
+ pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
+
+ /*
+ * The DPLL for the pipe must be enabled for this to work.
+ * So enable temporarily it if it's not already enabled.
+ */
+ if (!pll_enabled) {
+ release_cl_override = IS_CHERRYVIEW(dev_priv) &&
+ !chv_phy_powergate_ch(dev_priv, phy, ch, true);
+
+ if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
+ drm_err(&dev_priv->drm,
+ "Failed to force on pll for pipe %c!\n",
+ pipe_name(pipe));
+ return;
+ }
+ }
+
+ /*
+ * Similar magic as in intel_dp_enable_port().
+ * We _must_ do this port enable + disable trick
+ * to make this power sequencer lock onto the port.
+ * Otherwise even VDD force bit won't work.
+ */
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
+
+ if (!pll_enabled) {
+ vlv_force_pll_off(dev_priv, pipe);
+
+ if (release_cl_override)
+ chv_phy_powergate_ch(dev_priv, phy, ch, false);
+ }
+}
+
+static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
+
+ /*
+ * We don't have power sequencer currently.
+ * Pick one that's not used by other ports.
+ */
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps.active_pipe != INVALID_PIPE &&
+ intel_dp->pps.active_pipe !=
+ intel_dp->pps.pps_pipe);
+
+ if (intel_dp->pps.pps_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->pps.pps_pipe);
+ } else {
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps.pps_pipe != INVALID_PIPE);
+
+ if (intel_dp->pps.active_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->pps.active_pipe);
+ }
+ }
+
+ if (pipes == 0)
+ return INVALID_PIPE;
+
+ return ffs(pipes) - 1;
+}
+
+static enum pipe
+vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum pipe pipe;
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ /* We should never land here with regular DP ports */
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
+ intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
+
+ if (intel_dp->pps.pps_pipe != INVALID_PIPE)
+ return intel_dp->pps.pps_pipe;
+
+ pipe = vlv_find_free_pps(dev_priv);
+
+ /*
+ * Didn't find one. This should not happen since there
+ * are two power sequencers and up to two eDP ports.
+ */
+ if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
+ pipe = PIPE_A;
+
+ vlv_steal_power_sequencer(dev_priv, pipe);
+ intel_dp->pps.pps_pipe = pipe;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps.pps_pipe),
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ /* init power sequencer on this pipe and port */
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, true);
+
+ /*
+ * Even vdd force doesn't work until we've made
+ * the power sequencer lock in on the port.
+ */
+ vlv_power_sequencer_kick(intel_dp);
+
+ return intel_dp->pps.pps_pipe;
+}
+
+static int
+bxt_power_sequencer_idx(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ int backlight_controller = connector->panel.vbt.backlight.controller;
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ /* We should never land here with regular DP ports */
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+
+ if (!intel_dp->pps.pps_reset)
+ return backlight_controller;
+
+ intel_dp->pps.pps_reset = false;
+
+ /*
+ * Only the HW needs to be reprogrammed, the SW state is fixed and
+ * has been setup during connector init.
+ */
+ pps_init_registers(intel_dp, false);
+
+ return backlight_controller;
+}
+
+typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
+}
+
+static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+}
+
+static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ return true;
+}
+
+static enum pipe
+vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
+ enum port port,
+ vlv_pipe_check pipe_check)
+{
+ enum pipe pipe;
+
+ for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
+ u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
+ PANEL_PORT_SELECT_MASK;
+
+ if (port_sel != PANEL_PORT_SELECT_VLV(port))
+ continue;
+
+ if (!pipe_check(dev_priv, pipe))
+ continue;
+
+ return pipe;
+ }
+
+ return INVALID_PIPE;
+}
+
+static void
+vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum port port = dig_port->base.port;
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ /* try to find a pipe with this port selected */
+ /* first pick one where the panel is on */
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ vlv_pipe_has_pp_on);
+ /* didn't find one? pick one where vdd is on */
+ if (intel_dp->pps.pps_pipe == INVALID_PIPE)
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ vlv_pipe_has_vdd_on);
+ /* didn't find one? pick one with just the correct port */
+ if (intel_dp->pps.pps_pipe == INVALID_PIPE)
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ vlv_pipe_any);
+
+ /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
+ if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "no initial power sequencer for [ENCODER:%d:%s]\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ return;
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name,
+ pipe_name(intel_dp->pps.pps_pipe));
+}
+
+void intel_pps_reset_all(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+
+ if (drm_WARN_ON(&dev_priv->drm, !IS_LP(dev_priv)))
+ return;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ /*
+ * We can't grab pps_mutex here due to deadlock with power_domain
+ * mutex when power_domain functions are called while holding pps_mutex.
+ * That also means that in order to use pps_pipe the code needs to
+ * hold both a power domain reference and pps_mutex, and the power domain
+ * reference get/put must be done while _not_ holding pps_mutex.
+ * pps_{lock,unlock}() do these steps in the correct order, so one
+ * should use them always.
+ */
+
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps.active_pipe != INVALID_PIPE);
+
+ if (encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ if (DISPLAY_VER(dev_priv) >= 9)
+ intel_dp->pps.pps_reset = true;
+ else
+ intel_dp->pps.pps_pipe = INVALID_PIPE;
+ }
+}
+
+struct pps_registers {
+ i915_reg_t pp_ctrl;
+ i915_reg_t pp_stat;
+ i915_reg_t pp_on;
+ i915_reg_t pp_off;
+ i915_reg_t pp_div;
+};
+
+static void intel_pps_get_registers(struct intel_dp *intel_dp,
+ struct pps_registers *regs)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int pps_idx = 0;
+
+ memset(regs, 0, sizeof(*regs));
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ pps_idx = bxt_power_sequencer_idx(intel_dp);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pps_idx = vlv_power_sequencer_pipe(intel_dp);
+
+ regs->pp_ctrl = PP_CONTROL(pps_idx);
+ regs->pp_stat = PP_STATUS(pps_idx);
+ regs->pp_on = PP_ON_DELAYS(pps_idx);
+ regs->pp_off = PP_OFF_DELAYS(pps_idx);
+
+ /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
+ INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ regs->pp_div = INVALID_MMIO_REG;
+ else
+ regs->pp_div = PP_DIVISOR(pps_idx);
+}
+
+static i915_reg_t
+_pp_ctrl_reg(struct intel_dp *intel_dp)
+{
+ struct pps_registers regs;
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ return regs.pp_ctrl;
+}
+
+static i915_reg_t
+_pp_stat_reg(struct intel_dp *intel_dp)
+{
+ struct pps_registers regs;
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ return regs.pp_stat;
+}
+
+static bool edp_have_panel_power(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_dp->pps.pps_pipe == INVALID_PIPE)
+ return false;
+
+ return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
+}
+
+static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_dp->pps.pps_pipe == INVALID_PIPE)
+ return false;
+
+ return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
+}
+
+void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
+ drm_WARN(&dev_priv->drm, 1,
+ "eDP powered off while attempting aux channel communication.\n");
+ drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
+ intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
+ intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
+ }
+}
+
+#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
+
+#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
+#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
+
+#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+static void intel_pps_verify_state(struct intel_dp *intel_dp);
+
+static void wait_panel_status(struct intel_dp *intel_dp,
+ u32 mask,
+ u32 value)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ i915_reg_t pp_stat_reg, pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ intel_pps_verify_state(intel_dp);
+
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "mask %08x value %08x status %08x control %08x\n",
+ mask, value,
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+
+ if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
+ mask, value, 5000))
+ drm_err(&dev_priv->drm,
+ "Panel status timeout: status %08x control %08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+
+ drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
+}
+
+static void wait_panel_on(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
+ wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+}
+
+static void wait_panel_off(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
+ wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+}
+
+static void wait_panel_power_cycle(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ ktime_t panel_power_on_time;
+ s64 panel_power_off_duration;
+
+ drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
+
+ /* take the difference of current time and panel power off time
+ * and then make panel wait for t11_t12 if needed. */
+ panel_power_on_time = ktime_get_boottime();
+ panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
+
+ /* When we disable the VDD override bit last we have to do the manual
+ * wait. */
+ if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
+ wait_remaining_ms_from_jiffies(jiffies,
+ intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
+
+ wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ wait_panel_power_cycle(intel_dp);
+}
+
+static void wait_backlight_on(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
+ intel_dp->pps.backlight_on_delay);
+}
+
+static void edp_wait_backlight_off(struct intel_dp *intel_dp)
+{
+ wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
+ intel_dp->pps.backlight_off_delay);
+}
+
+/* Read the current pp_control value, unlocking the register if it
+ * is locked
+ */
+
+static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 control;
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
+ if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
+ (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
+ control &= ~PANEL_UNLOCK_MASK;
+ control |= PANEL_UNLOCK_REGS;
+ }
+ return control;
+}
+
+/*
+ * Must be paired with intel_pps_vdd_off_unlocked().
+ * Must hold pps_mutex around the whole on/off sequence.
+ * Can be nested with intel_pps_vdd_{on,off}() calls.
+ */
+bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 pp;
+ i915_reg_t pp_stat_reg, pp_ctrl_reg;
+ bool need_to_disable = !intel_dp->pps.want_panel_vdd;
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return false;
+
+ cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
+ intel_dp->pps.want_panel_vdd = true;
+
+ if (edp_have_panel_vdd(intel_dp))
+ return need_to_disable;
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
+ intel_aux_power_domain(dig_port));
+
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ if (!edp_have_panel_power(intel_dp))
+ wait_panel_power_cycle(intel_dp);
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp |= EDP_FORCE_VDD;
+
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+ /*
+ * If the panel wasn't on, delay before accessing aux channel
+ */
+ if (!edp_have_panel_power(intel_dp)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] panel power wasn't enabled\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ msleep(intel_dp->pps.panel_power_up_delay);
+ }
+
+ return need_to_disable;
+}
+
+/*
+ * Must be paired with intel_pps_off().
+ * Nested calls to these functions are not allowed since
+ * we drop the lock. Caller must use some higher level
+ * locking to prevent nested calls from other threads.
+ */
+void intel_pps_vdd_on(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+ bool vdd;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ vdd = false;
+ with_intel_pps_lock(intel_dp, wakeref)
+ vdd = intel_pps_vdd_on_unlocked(intel_dp);
+ I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
+}
+
+static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port =
+ dp_to_dig_port(intel_dp);
+ u32 pp;
+ i915_reg_t pp_stat_reg, pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
+
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
+ dig_port->base.base.base.id,
+ dig_port->base.base.name);
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp &= ~EDP_FORCE_VDD;
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+ /* Make sure sequencer is idle before allowing subsequent activity */
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
+
+ if ((pp & PANEL_POWER_ON) == 0)
+ intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(dig_port),
+ fetch_and_zero(&intel_dp->pps.vdd_wakeref));
+}
+
+void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
+ /*
+ * vdd might still be enabled due to the delayed vdd off.
+ * Make sure vdd is actually turned off here.
+ */
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+}
+
+static void edp_panel_vdd_work(struct work_struct *__work)
+{
+ struct intel_pps *pps = container_of(to_delayed_work(__work),
+ struct intel_pps, panel_vdd_work);
+ struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
+ intel_wakeref_t wakeref;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ if (!intel_dp->pps.want_panel_vdd)
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+ }
+}
+
+static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
+{
+ unsigned long delay;
+
+ /*
+ * We may not yet know the real power sequencing delays,
+ * so keep VDD enabled until we're done with init.
+ */
+ if (intel_dp->pps.initializing)
+ return;
+
+ /*
+ * Queue the timer to fire a long time from now (relative to the power
+ * down delay) to keep the panel power up across a sequence of
+ * operations.
+ */
+ delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
+ schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
+}
+
+/*
+ * Must be paired with edp_panel_vdd_on().
+ * Must hold pps_mutex around the whole on/off sequence.
+ * Can be nested with intel_pps_vdd_{on,off}() calls.
+ */
+void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
+
+ intel_dp->pps.want_panel_vdd = false;
+
+ if (sync)
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+ else
+ edp_panel_vdd_schedule_off(intel_dp);
+}
+
+void intel_pps_on_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 pp;
+ i915_reg_t pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
+
+ if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
+ "[ENCODER:%d:%s] panel power already on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name))
+ return;
+
+ wait_panel_power_cycle(intel_dp);
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp = ilk_get_pp_control(intel_dp);
+ if (IS_IRONLAKE(dev_priv)) {
+ /* ILK workaround: disable reset around power sequence */
+ pp &= ~PANEL_POWER_RESET;
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+
+ pp |= PANEL_POWER_ON;
+ if (!IS_IRONLAKE(dev_priv))
+ pp |= PANEL_POWER_RESET;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+ wait_panel_on(intel_dp);
+ intel_dp->pps.last_power_on = jiffies;
+
+ if (IS_IRONLAKE(dev_priv)) {
+ pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+}
+
+void intel_pps_on(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_on_unlocked(intel_dp);
+}
+
+void intel_pps_off_unlocked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 pp;
+ i915_reg_t pp_ctrl_reg;
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
+
+ drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
+ "Need [ENCODER:%d:%s] VDD to turn off panel\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
+
+ pp = ilk_get_pp_control(intel_dp);
+ /* We need to switch off panel power _and_ force vdd, for otherwise some
+ * panels get very unhappy and cease to work. */
+ pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
+ EDP_BLC_ENABLE);
+
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ intel_dp->pps.want_panel_vdd = false;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+
+ wait_panel_off(intel_dp);
+ intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+
+ /* We got a reference when we enabled the VDD. */
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(dig_port),
+ fetch_and_zero(&intel_dp->pps.vdd_wakeref));
+}
+
+void intel_pps_off(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_off_unlocked(intel_dp);
+}
+
+/* Enable backlight in the panel power control. */
+void intel_pps_backlight_on(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ /*
+ * If we enable the backlight right away following a panel power
+ * on, we may see slight flicker as the panel syncs with the eDP
+ * link. So delay a bit to make sure the image is solid before
+ * allowing it to appear.
+ */
+ wait_backlight_on(intel_dp);
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ u32 pp;
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp |= EDP_BLC_ENABLE;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+}
+
+/* Disable backlight in the panel power control. */
+void intel_pps_backlight_off(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ u32 pp;
+
+ pp = ilk_get_pp_control(intel_dp);
+ pp &= ~EDP_BLC_ENABLE;
+
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ }
+
+ intel_dp->pps.last_backlight_off = jiffies;
+ edp_wait_backlight_off(intel_dp);
+}
+
+/*
+ * Hook for controlling the panel power control backlight through the bl_power
+ * sysfs attribute. Take care to handle multiple calls.
+ */
+void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ intel_wakeref_t wakeref;
+ bool is_enabled;
+
+ is_enabled = false;
+ with_intel_pps_lock(intel_dp, wakeref)
+ is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
+ if (is_enabled == enable)
+ return;
+
+ drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
+ enable ? "enable" : "disable");
+
+ if (enable)
+ intel_pps_backlight_on(intel_dp);
+ else
+ intel_pps_backlight_off(intel_dp);
+}
+
+static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum pipe pipe = intel_dp->pps.pps_pipe;
+ i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
+ intel_pps_vdd_off_sync_unlocked(intel_dp);
+
+ /*
+ * VLV seems to get confused when multiple power sequencers
+ * have the same port selected (even if only one has power/vdd
+ * enabled). The failure manifests as vlv_wait_port_ready() failing
+ * CHV on the other hand doesn't seem to mind having the same port
+ * selected in multiple power sequencers, but let's clear the
+ * port select always when logically disconnecting a power sequencer
+ * from a port.
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), dig_port->base.base.base.id,
+ dig_port->base.base.name);
+ intel_de_write(dev_priv, pp_on_reg, 0);
+ intel_de_posting_read(dev_priv, pp_on_reg);
+
+ intel_dp->pps.pps_pipe = INVALID_PIPE;
+}
+
+static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ struct intel_encoder *encoder;
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
+ "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
+
+ if (intel_dp->pps.pps_pipe != pipe)
+ continue;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
+
+ /* make sure vdd is off before we steal it */
+ vlv_detach_power_sequencer(intel_dp);
+ }
+}
+
+void vlv_pps_init(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+
+ if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
+ intel_dp->pps.pps_pipe != crtc->pipe) {
+ /*
+ * If another power sequencer was being used on this
+ * port previously make sure to turn off vdd there while
+ * we still have control of it.
+ */
+ vlv_detach_power_sequencer(intel_dp);
+ }
+
+ /*
+ * We may be stealing the power
+ * sequencer from another port.
+ */
+ vlv_steal_power_sequencer(dev_priv, crtc->pipe);
+
+ intel_dp->pps.active_pipe = crtc->pipe;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ /* now it's all ours */
+ intel_dp->pps.pps_pipe = crtc->pipe;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
+ encoder->base.name);
+
+ /* init power sequencer on this pipe and port */
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, true);
+}
+
+static void pps_vdd_init(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+
+ /*
+ * The VDD bit needs a power domain reference, so if the bit is
+ * already enabled when we boot or resume, grab this reference and
+ * schedule a vdd off, so we don't hold on to the reference
+ * indefinitely.
+ */
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD left on by BIOS, adjusting state tracking\n");
+ drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
+ intel_aux_power_domain(dig_port));
+}
+
+bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+ bool have_power = false;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ have_power = edp_have_panel_power(intel_dp) ||
+ edp_have_panel_vdd(intel_dp);
+ }
+
+ return have_power;
+}
+
+static void pps_init_timestamps(struct intel_dp *intel_dp)
+{
+ intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+ intel_dp->pps.last_power_on = jiffies;
+ intel_dp->pps.last_backlight_off = jiffies;
+}
+
+static void
+intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 pp_on, pp_off, pp_ctl;
+ struct pps_registers regs;
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ pp_ctl = ilk_get_pp_control(intel_dp);
+
+ /* Ensure PPS is unlocked */
+ if (!HAS_DDI(dev_priv))
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
+
+ pp_on = intel_de_read(dev_priv, regs.pp_on);
+ pp_off = intel_de_read(dev_priv, regs.pp_off);
+
+ /* Pull timing values out of registers */
+ seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
+ seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
+ seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
+ seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
+
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ u32 pp_div;
+
+ pp_div = intel_de_read(dev_priv, regs.pp_div);
+
+ seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
+ } else {
+ seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
+ }
+}
+
+static void
+intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
+ const struct edp_power_seq *seq)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ state_name,
+ seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
+}
+
+static void
+intel_pps_verify_state(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct edp_power_seq hw;
+ struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
+
+ intel_pps_readout_hw_state(intel_dp, &hw);
+
+ if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
+ hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
+ drm_err(&i915->drm, "PPS state mismatch\n");
+ intel_pps_dump_state(intel_dp, "sw", sw);
+ intel_pps_dump_state(intel_dp, "hw", &hw);
+ }
+}
+
+static bool pps_delays_valid(struct edp_power_seq *delays)
+{
+ return delays->t1_t3 || delays->t8 || delays->t9 ||
+ delays->t10 || delays->t11_t12;
+}
+
+static void pps_init_delays_bios(struct intel_dp *intel_dp,
+ struct edp_power_seq *bios)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
+ intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
+
+ *bios = intel_dp->pps.bios_pps_delays;
+
+ intel_pps_dump_state(intel_dp, "bios", bios);
+}
+
+static void pps_init_delays_vbt(struct intel_dp *intel_dp,
+ struct edp_power_seq *vbt)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ *vbt = connector->panel.vbt.edp.pps;
+
+ if (!pps_delays_valid(vbt))
+ return;
+
+ /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
+ * of 500ms appears to be too short. Ocassionally the panel
+ * just fails to power back on. Increasing the delay to 800ms
+ * seems sufficient to avoid this problem.
+ */
+ if (intel_has_quirk(dev_priv, QUIRK_INCREASE_T12_DELAY)) {
+ vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
+ drm_dbg_kms(&dev_priv->drm,
+ "Increasing T12 panel delay as per the quirk to %d\n",
+ vbt->t11_t12);
+ }
+
+ /* T11_T12 delay is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ vbt->t11_t12 += 100 * 10;
+
+ intel_pps_dump_state(intel_dp, "vbt", vbt);
+}
+
+static void pps_init_delays_spec(struct intel_dp *intel_dp,
+ struct edp_power_seq *spec)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+ * our hw here, which are all in 100usec. */
+ spec->t1_t3 = 210 * 10;
+ spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */
+ spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+ spec->t10 = 500 * 10;
+ /* This one is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ spec->t11_t12 = (510 + 100) * 10;
+
+ intel_pps_dump_state(intel_dp, "spec", spec);
+}
+
+static void pps_init_delays(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct edp_power_seq cur, vbt, spec,
+ *final = &intel_dp->pps.pps_delays;
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ /* already initialized? */
+ if (pps_delays_valid(final))
+ return;
+
+ pps_init_delays_bios(intel_dp, &cur);
+ pps_init_delays_vbt(intel_dp, &vbt);
+ pps_init_delays_spec(intel_dp, &spec);
+
+ /* Use the max of the register settings and vbt. If both are
+ * unset, fall back to the spec limits. */
+#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
+ spec.field : \
+ max(cur.field, vbt.field))
+ assign_final(t1_t3);
+ assign_final(t8);
+ assign_final(t9);
+ assign_final(t10);
+ assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
+ intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
+ intel_dp->pps.backlight_on_delay = get_delay(t8);
+ intel_dp->pps.backlight_off_delay = get_delay(t9);
+ intel_dp->pps.panel_power_down_delay = get_delay(t10);
+ intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->pps.panel_power_up_delay,
+ intel_dp->pps.panel_power_down_delay,
+ intel_dp->pps.panel_power_cycle_delay);
+
+ drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
+ intel_dp->pps.backlight_on_delay,
+ intel_dp->pps.backlight_off_delay);
+
+ /*
+ * We override the HW backlight delays to 1 because we do manual waits
+ * on them. For T8, even BSpec recommends doing it. For T9, if we
+ * don't do this, we'll end up waiting for the backlight off delay
+ * twice: once when we do the manual sleep, and once when we disable
+ * the panel and wait for the PP_STATUS bit to become zero.
+ */
+ final->t8 = 1;
+ final->t9 = 1;
+
+ /*
+ * HW has only a 100msec granularity for t11_t12 so round it up
+ * accordingly.
+ */
+ final->t11_t12 = roundup(final->t11_t12, 100 * 10);
+}
+
+static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 pp_on, pp_off, port_sel = 0;
+ int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
+ struct pps_registers regs;
+ enum port port = dp_to_dig_port(intel_dp)->base.port;
+ const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
+
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
+
+ intel_pps_get_registers(intel_dp, &regs);
+
+ /*
+ * On some VLV machines the BIOS can leave the VDD
+ * enabled even on power sequencers which aren't
+ * hooked up to any port. This would mess up the
+ * power domain tracking the first time we pick
+ * one of these power sequencers for use since
+ * intel_pps_vdd_on_unlocked() would notice that the VDD was
+ * already on and therefore wouldn't grab the power
+ * domain reference. Disable VDD first to avoid this.
+ * This also avoids spuriously turning the VDD on as
+ * soon as the new power sequencer gets initialized.
+ */
+ if (force_disable_vdd) {
+ u32 pp = ilk_get_pp_control(intel_dp);
+
+ drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
+ "Panel power already on\n");
+
+ if (pp & EDP_FORCE_VDD)
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD already on, disabling first\n");
+
+ pp &= ~EDP_FORCE_VDD;
+
+ intel_de_write(dev_priv, regs.pp_ctrl, pp);
+ }
+
+ pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
+ pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
+
+ /* Haswell doesn't have any port selection bits for the panel
+ * power sequencer any more. */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ port_sel = PANEL_PORT_SELECT_VLV(port);
+ } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ switch (port) {
+ case PORT_A:
+ port_sel = PANEL_PORT_SELECT_DPA;
+ break;
+ case PORT_C:
+ port_sel = PANEL_PORT_SELECT_DPC;
+ break;
+ case PORT_D:
+ port_sel = PANEL_PORT_SELECT_DPD;
+ break;
+ default:
+ MISSING_CASE(port);
+ break;
+ }
+ }
+
+ pp_on |= port_sel;
+
+ intel_de_write(dev_priv, regs.pp_on, pp_on);
+ intel_de_write(dev_priv, regs.pp_off, pp_off);
+
+ /*
+ * Compute the divisor for the pp clock, simply match the Bspec formula.
+ */
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ intel_de_write(dev_priv, regs.pp_div,
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+ } else {
+ u32 pp_ctl;
+
+ pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
+ pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
+ pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
+ }
+
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+ intel_de_read(dev_priv, regs.pp_on),
+ intel_de_read(dev_priv, regs.pp_off),
+ i915_mmio_reg_valid(regs.pp_div) ?
+ intel_de_read(dev_priv, regs.pp_div) :
+ (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
+}
+
+void intel_pps_encoder_reset(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ /*
+ * Reinit the power sequencer also on the resume path, in case
+ * BIOS did something nasty with it.
+ */
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ vlv_initial_power_sequencer_setup(intel_dp);
+
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, false);
+ pps_vdd_init(intel_dp);
+
+ if (edp_have_panel_vdd(intel_dp))
+ edp_panel_vdd_schedule_off(intel_dp);
+ }
+}
+
+void intel_pps_init(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
+
+ intel_dp->pps.initializing = true;
+ INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
+
+ pps_init_timestamps(intel_dp);
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ vlv_initial_power_sequencer_setup(intel_dp);
+
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, false);
+ pps_vdd_init(intel_dp);
+ }
+}
+
+void intel_pps_init_late(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ with_intel_pps_lock(intel_dp, wakeref) {
+ /* Reinit delays after per-panel info has been parsed from VBT */
+ memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays));
+ pps_init_delays(intel_dp);
+ pps_init_registers(intel_dp, false);
+
+ intel_dp->pps.initializing = false;
+
+ if (edp_have_panel_vdd(intel_dp))
+ edp_panel_vdd_schedule_off(intel_dp);
+ }
+}
+
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
+{
+ int pps_num;
+ int pps_idx;
+
+ if (!HAS_DISPLAY(dev_priv) || HAS_DDI(dev_priv))
+ return;
+ /*
+ * This w/a is needed at least on CPT/PPT, but to be sure apply it
+ * everywhere where registers can be write protected.
+ */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pps_num = 2;
+ else
+ pps_num = 1;
+
+ for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
+ u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
+
+ val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
+ intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
+ }
+}
+
+void intel_pps_setup(struct drm_i915_private *i915)
+{
+ if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ i915->display.pps.mmio_base = PCH_PPS_BASE;
+ else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ i915->display.pps.mmio_base = VLV_PPS_BASE;
+ else
+ i915->display.pps.mmio_base = PPS_BASE;
+}
+
+void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ i915_reg_t pp_reg;
+ u32 val;
+ enum pipe panel_pipe = INVALID_PIPE;
+ bool locked = true;
+
+ if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
+ return;
+
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ u32 port_sel;
+
+ pp_reg = PP_CONTROL(0);
+ port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+
+ switch (port_sel) {
+ case PANEL_PORT_SELECT_LVDS:
+ intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPA:
+ g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPC:
+ g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPD:
+ g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
+ break;
+ default:
+ MISSING_CASE(port_sel);
+ break;
+ }
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ /* presumably write lock depends on pipe, not port select */
+ pp_reg = PP_CONTROL(pipe);
+ panel_pipe = pipe;
+ } else {
+ u32 port_sel;
+
+ pp_reg = PP_CONTROL(0);
+ port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+
+ drm_WARN_ON(&dev_priv->drm,
+ port_sel != PANEL_PORT_SELECT_LVDS);
+ intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
+ }
+
+ val = intel_de_read(dev_priv, pp_reg);
+ if (!(val & PANEL_POWER_ON) ||
+ ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
+ locked = false;
+
+ I915_STATE_WARN(panel_pipe == pipe && locked,
+ "panel assertion failure, pipe %c regs locked\n",
+ pipe_name(pipe));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
new file mode 100644
index 000000000..a3a56f903
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_PPS_H__
+#define __INTEL_PPS_H__
+
+#include <linux/types.h>
+
+#include "intel_wakeref.h"
+
+enum pipe;
+struct drm_i915_private;
+struct intel_connector;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+
+intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp);
+intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref);
+
+#define with_intel_pps_lock(dp, wf) \
+ for ((wf) = intel_pps_lock(dp); (wf); (wf) = intel_pps_unlock((dp), (wf)))
+
+void intel_pps_backlight_on(struct intel_dp *intel_dp);
+void intel_pps_backlight_off(struct intel_dp *intel_dp);
+void intel_pps_backlight_power(struct intel_connector *connector, bool enable);
+
+bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp);
+void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync);
+void intel_pps_on_unlocked(struct intel_dp *intel_dp);
+void intel_pps_off_unlocked(struct intel_dp *intel_dp);
+void intel_pps_check_power_unlocked(struct intel_dp *intel_dp);
+
+void intel_pps_vdd_on(struct intel_dp *intel_dp);
+void intel_pps_on(struct intel_dp *intel_dp);
+void intel_pps_off(struct intel_dp *intel_dp);
+void intel_pps_vdd_off_sync(struct intel_dp *intel_dp);
+bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp);
+void intel_pps_wait_power_cycle(struct intel_dp *intel_dp);
+
+void intel_pps_init(struct intel_dp *intel_dp);
+void intel_pps_init_late(struct intel_dp *intel_dp);
+void intel_pps_encoder_reset(struct intel_dp *intel_dp);
+void intel_pps_reset_all(struct drm_i915_private *i915);
+
+void vlv_pps_init(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+
+void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
+void intel_pps_setup(struct drm_i915_private *i915);
+
+void assert_pps_unlocked(struct drm_i915_private *i915, enum pipe pipe);
+
+#endif /* __INTEL_PPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
new file mode 100644
index 000000000..e2d7c0a68
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -0,0 +1,2656 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
+
+#include "display/intel_dp.h"
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dp_aux.h"
+#include "intel_hdmi.h"
+#include "intel_psr.h"
+#include "intel_snps_phy.h"
+#include "skl_universal_plane.h"
+
+/**
+ * DOC: Panel Self Refresh (PSR/SRD)
+ *
+ * Since Haswell Display controller supports Panel Self-Refresh on display
+ * panels witch have a remote frame buffer (RFB) implemented according to PSR
+ * spec in eDP1.3. PSR feature allows the display to go to lower standby states
+ * when system is idle but display is on as it eliminates display refresh
+ * request to DDR memory completely as long as the frame buffer for that
+ * display is unchanged.
+ *
+ * Panel Self Refresh must be supported by both Hardware (source) and
+ * Panel (sink).
+ *
+ * PSR saves power by caching the framebuffer in the panel RFB, which allows us
+ * to power down the link and memory controller. For DSI panels the same idea
+ * is called "manual mode".
+ *
+ * The implementation uses the hardware-based PSR support which automatically
+ * enters/exits self-refresh mode. The hardware takes care of sending the
+ * required DP aux message and could even retrain the link (that part isn't
+ * enabled yet though). The hardware also keeps track of any frontbuffer
+ * changes to know when to exit self-refresh mode again. Unfortunately that
+ * part doesn't work too well, hence why the i915 PSR support uses the
+ * software frontbuffer tracking to make sure it doesn't miss a screen
+ * update. For this integration intel_psr_invalidate() and intel_psr_flush()
+ * get called by the frontbuffer tracking code. Note that because of locking
+ * issues the self-refresh re-enable code is done from a work queue, which
+ * must be correctly synchronized/cancelled when shutting down the pipe."
+ *
+ * DC3CO (DC3 clock off)
+ *
+ * On top of PSR2, GEN12 adds a intermediate power savings state that turns
+ * clock off automatically during PSR2 idle state.
+ * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
+ * entry/exit allows the HW to enter a low-power state even when page flipping
+ * periodically (for instance a 30fps video playback scenario).
+ *
+ * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
+ * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
+ * frames, if no other flip occurs and the function above is executed, DC3CO is
+ * disabled and PSR2 is configured to enter deep sleep, resetting again in case
+ * of another flip.
+ * Front buffer modifications do not trigger DC3CO activation on purpose as it
+ * would bring a lot of complexity and most of the moderns systems will only
+ * use page flips.
+ */
+
+static bool psr_global_enabled(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+ case I915_PSR_DEBUG_DEFAULT:
+ if (i915->params.enable_psr == -1)
+ return connector->panel.vbt.psr.enable;
+ return i915->params.enable_psr;
+ case I915_PSR_DEBUG_DISABLE:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool psr2_global_enabled(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+ case I915_PSR_DEBUG_DISABLE:
+ case I915_PSR_DEBUG_FORCE_PSR1:
+ return false;
+ default:
+ if (i915->params.enable_psr == 1)
+ return false;
+ return true;
+ }
+}
+
+static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
+ EDP_PSR_ERROR(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
+ EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
+ EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
+ EDP_PSR_MASK(intel_dp->psr.transcoder);
+}
+
+static void psr_irq_control(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ i915_reg_t imr_reg;
+ u32 mask, val;
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
+ else
+ imr_reg = EDP_PSR_IMR;
+
+ mask = psr_irq_psr_error_bit_get(intel_dp);
+ if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
+ mask |= psr_irq_post_exit_bit_get(intel_dp) |
+ psr_irq_pre_entry_bit_get(intel_dp);
+
+ val = intel_de_read(dev_priv, imr_reg);
+ val &= ~psr_irq_mask_get(intel_dp);
+ val |= ~mask;
+ intel_de_write(dev_priv, imr_reg, val);
+}
+
+static void psr_event_print(struct drm_i915_private *i915,
+ u32 val, bool psr2_enabled)
+{
+ drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
+ if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
+ drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
+ if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
+ drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
+ if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
+ drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
+ if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
+ drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
+ if (val & PSR_EVENT_GRAPHICS_RESET)
+ drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
+ if (val & PSR_EVENT_PCH_INTERRUPT)
+ drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
+ if (val & PSR_EVENT_MEMORY_UP)
+ drm_dbg_kms(&i915->drm, "\tMemory up\n");
+ if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
+ drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
+ if (val & PSR_EVENT_WD_TIMER_EXPIRE)
+ drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
+ if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
+ drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
+ if (val & PSR_EVENT_REGISTER_UPDATE)
+ drm_dbg_kms(&i915->drm, "\tRegister updated\n");
+ if (val & PSR_EVENT_HDCP_ENABLE)
+ drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
+ if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
+ drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
+ if (val & PSR_EVENT_VBI_ENABLE)
+ drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
+ if (val & PSR_EVENT_LPSP_MODE_EXIT)
+ drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
+ if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
+ drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
+}
+
+void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
+{
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ ktime_t time_ns = ktime_get();
+ i915_reg_t imr_reg;
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
+ else
+ imr_reg = EDP_PSR_IMR;
+
+ if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
+ intel_dp->psr.last_entry_attempt = time_ns;
+ drm_dbg_kms(&dev_priv->drm,
+ "[transcoder %s] PSR entry attempt in 2 vblanks\n",
+ transcoder_name(cpu_transcoder));
+ }
+
+ if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
+ intel_dp->psr.last_exit = time_ns;
+ drm_dbg_kms(&dev_priv->drm,
+ "[transcoder %s] PSR exit completed\n",
+ transcoder_name(cpu_transcoder));
+
+ if (DISPLAY_VER(dev_priv) >= 9) {
+ u32 val = intel_de_read(dev_priv,
+ PSR_EVENT(cpu_transcoder));
+ bool psr2_enabled = intel_dp->psr.psr2_enabled;
+
+ intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
+ val);
+ psr_event_print(dev_priv, val, psr2_enabled);
+ }
+ }
+
+ if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
+ u32 val;
+
+ drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
+ transcoder_name(cpu_transcoder));
+
+ intel_dp->psr.irq_aux_error = true;
+
+ /*
+ * If this interruption is not masked it will keep
+ * interrupting so fast that it prevents the scheduled
+ * work to run.
+ * Also after a PSR error, we don't want to arm PSR
+ * again so we don't care about unmask the interruption
+ * or unset irq_aux_error.
+ */
+ val = intel_de_read(dev_priv, imr_reg);
+ val |= psr_irq_psr_error_bit_get(intel_dp);
+ intel_de_write(dev_priv, imr_reg, val);
+
+ schedule_work(&intel_dp->psr.work);
+ }
+}
+
+static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
+{
+ u8 alpm_caps = 0;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
+ &alpm_caps) != 1)
+ return false;
+ return alpm_caps & DP_ALPM_CAP;
+}
+
+static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 val = 8; /* assume the worst if we can't read the value */
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
+ val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
+ else
+ drm_dbg_kms(&i915->drm,
+ "Unable to get sink synchronization latency, assuming 8 frames\n");
+ return val;
+}
+
+static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ ssize_t r;
+ u16 w;
+ u8 y;
+
+ /* If sink don't have specific granularity requirements set legacy ones */
+ if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
+ /* As PSR2 HW sends full lines, we do not care about x granularity */
+ w = 4;
+ y = 4;
+ goto exit;
+ }
+
+ r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
+ if (r != 2)
+ drm_dbg_kms(&i915->drm,
+ "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
+ /*
+ * Spec says that if the value read is 0 the default granularity should
+ * be used instead.
+ */
+ if (r != 2 || w == 0)
+ w = 4;
+
+ r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
+ if (r != 1) {
+ drm_dbg_kms(&i915->drm,
+ "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
+ y = 4;
+ }
+ if (y == 0)
+ y = 1;
+
+exit:
+ intel_dp->psr.su_w_granularity = w;
+ intel_dp->psr.su_y_granularity = y;
+}
+
+void intel_psr_init_dpcd(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+
+ if (!intel_dp->psr_dpcd[0])
+ return;
+ drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
+ intel_dp->psr_dpcd[0]);
+
+ if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR support not currently available for this panel\n");
+ return;
+ }
+
+ if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel lacks power state control, PSR cannot be enabled\n");
+ return;
+ }
+
+ intel_dp->psr.sink_support = true;
+ intel_dp->psr.sink_sync_latency =
+ intel_dp_get_sink_sync_latency(intel_dp);
+
+ if (DISPLAY_VER(dev_priv) >= 9 &&
+ (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
+ bool y_req = intel_dp->psr_dpcd[1] &
+ DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+ bool alpm = intel_dp_get_alpm_status(intel_dp);
+
+ /*
+ * All panels that supports PSR version 03h (PSR2 +
+ * Y-coordinate) can handle Y-coordinates in VSC but we are
+ * only sure that it is going to be used when required by the
+ * panel. This way panel is capable to do selective update
+ * without a aux frame sync.
+ *
+ * To support PSR version 02h and PSR version 03h without
+ * Y-coordinate requirement panels we would need to enable
+ * GTC first.
+ */
+ intel_dp->psr.sink_psr2_support = y_req && alpm;
+ drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
+ intel_dp->psr.sink_psr2_support ? "" : "not ");
+
+ if (intel_dp->psr.sink_psr2_support) {
+ intel_dp->psr.colorimetry_support =
+ intel_dp_get_colorimetry_status(intel_dp);
+ intel_dp_get_su_granularity(intel_dp);
+ }
+ }
+}
+
+static void intel_psr_enable_sink(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u8 dpcd_val = DP_PSR_ENABLE;
+
+ /* Enable ALPM at sink for psr2 */
+ if (intel_dp->psr.psr2_enabled) {
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
+ DP_ALPM_ENABLE |
+ DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
+
+ dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
+ } else {
+ if (intel_dp->psr.link_standby)
+ dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
+
+ if (DISPLAY_VER(dev_priv) >= 8)
+ dpcd_val |= DP_PSR_CRC_VERIFICATION;
+ }
+
+ if (intel_dp->psr.req_psr2_sdp_prior_scanline)
+ dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+}
+
+static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 val = 0;
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ val |= EDP_PSR_TP4_TIME_0US;
+
+ if (dev_priv->params.psr_safest_params) {
+ val |= EDP_PSR_TP1_TIME_2500us;
+ val |= EDP_PSR_TP2_TP3_TIME_2500us;
+ goto check_tp3_sel;
+ }
+
+ if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
+ val |= EDP_PSR_TP1_TIME_0us;
+ else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
+ val |= EDP_PSR_TP1_TIME_100us;
+ else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
+ val |= EDP_PSR_TP1_TIME_500us;
+ else
+ val |= EDP_PSR_TP1_TIME_2500us;
+
+ if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
+ else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+ val |= EDP_PSR_TP2_TP3_TIME_100us;
+ else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ val |= EDP_PSR_TP2_TP3_TIME_500us;
+ else
+ val |= EDP_PSR_TP2_TP3_TIME_2500us;
+
+check_tp3_sel:
+ if (intel_dp_source_supports_tps3(dev_priv) &&
+ drm_dp_tps3_supported(intel_dp->dpcd))
+ val |= EDP_PSR_TP1_TP3_SEL;
+ else
+ val |= EDP_PSR_TP1_TP2_SEL;
+
+ return val;
+}
+
+static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int idle_frames;
+
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
+ */
+ idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
+ idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
+
+ if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
+ idle_frames = 0xf;
+
+ return idle_frames;
+}
+
+static void hsw_activate_psr1(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 max_sleep_time = 0x1f;
+ u32 val = EDP_PSR_ENABLE;
+
+ val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
+
+ val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+ if (IS_HASWELL(dev_priv))
+ val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+
+ if (intel_dp->psr.link_standby)
+ val |= EDP_PSR_LINK_STANDBY;
+
+ val |= intel_psr1_get_tp_time(intel_dp);
+
+ if (DISPLAY_VER(dev_priv) >= 8)
+ val |= EDP_PSR_CRC_ENABLE;
+
+ val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
+ EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
+ intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
+}
+
+static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 val = 0;
+
+ if (dev_priv->params.psr_safest_params)
+ return EDP_PSR2_TP2_TIME_2500us;
+
+ if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
+ connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
+ val |= EDP_PSR2_TP2_TIME_50us;
+ else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
+ val |= EDP_PSR2_TP2_TIME_100us;
+ else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
+ val |= EDP_PSR2_TP2_TIME_500us;
+ else
+ val |= EDP_PSR2_TP2_TIME_2500us;
+
+ return val;
+}
+
+static void hsw_activate_psr2(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 val = EDP_PSR2_ENABLE;
+
+ val |= psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
+
+ if (!IS_ALDERLAKE_P(dev_priv))
+ val |= EDP_SU_TRACK_ENABLE;
+
+ if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
+ val |= EDP_Y_COORDINATE_ENABLE;
+
+ val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
+ val |= intel_psr2_get_tp_time(intel_dp);
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ if (intel_dp->psr.io_wake_lines < 9 &&
+ intel_dp->psr.fast_wake_lines < 9)
+ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+ else
+ val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
+ }
+
+ /* Wa_22012278275:adl-p */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
+ static const u8 map[] = {
+ 2, /* 5 lines */
+ 1, /* 6 lines */
+ 0, /* 7 lines */
+ 3, /* 8 lines */
+ 6, /* 9 lines */
+ 5, /* 10 lines */
+ 4, /* 11 lines */
+ 7, /* 12 lines */
+ };
+ /*
+ * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
+ * comments bellow for more information
+ */
+ u32 tmp;
+
+ tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
+ tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
+ val |= tmp;
+
+ tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
+ val |= tmp;
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
+ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
+ } else if (DISPLAY_VER(dev_priv) >= 9) {
+ val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
+ }
+
+ if (intel_dp->psr.req_psr2_sdp_prior_scanline)
+ val |= EDP_PSR2_SU_SDP_SCANLINE;
+
+ if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ u32 tmp;
+
+ /* Wa_1408330847 */
+ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
+ DIS_RAM_BYPASS_PSR2_MAN_TRACK,
+ DIS_RAM_BYPASS_PSR2_MAN_TRACK);
+
+ tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
+ } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
+ intel_de_write(dev_priv,
+ PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
+ }
+
+ /*
+ * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
+ * recommending keep this bit unset while PSR2 is enabled.
+ */
+ intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
+
+ intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
+}
+
+static bool
+transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
+{
+ if (IS_ALDERLAKE_P(dev_priv))
+ return trans == TRANSCODER_A || trans == TRANSCODER_B;
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ return trans == TRANSCODER_A;
+ else
+ return trans == TRANSCODER_EDP;
+}
+
+static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
+{
+ if (!cstate || !cstate->hw.active)
+ return 0;
+
+ return DIV_ROUND_UP(1000 * 1000,
+ drm_mode_vrefresh(&cstate->hw.adjusted_mode));
+}
+
+static void psr2_program_idle_frames(struct intel_dp *intel_dp,
+ u32 idle_frames)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 val;
+
+ idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
+ val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
+ val &= ~EDP_PSR2_IDLE_FRAME_MASK;
+ val |= idle_frames;
+ intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
+}
+
+static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ psr2_program_idle_frames(intel_dp, 0);
+ intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+}
+
+static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
+}
+
+static void tgl_dc3co_disable_work(struct work_struct *work)
+{
+ struct intel_dp *intel_dp =
+ container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
+
+ mutex_lock(&intel_dp->psr.lock);
+ /* If delayed work is pending, it is not idle */
+ if (delayed_work_pending(&intel_dp->psr.dc3co_work))
+ goto unlock;
+
+ tgl_psr2_disable_dc3co(intel_dp);
+unlock:
+ mutex_unlock(&intel_dp->psr.lock);
+}
+
+static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
+{
+ if (!intel_dp->psr.dc3co_exitline)
+ return;
+
+ cancel_delayed_work(&intel_dp->psr.dc3co_work);
+ /* Before PSR2 exit disallow dc3co*/
+ tgl_psr2_disable_dc3co(intel_dp);
+}
+
+static bool
+dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum port port = dig_port->base.port;
+
+ if (IS_ALDERLAKE_P(dev_priv))
+ return pipe <= PIPE_B && port <= PORT_B;
+ else
+ return pipe == PIPE_A && port == PORT_A;
+}
+
+static void
+tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 exit_scanlines;
+
+ /*
+ * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
+ * disable DC3CO until the changed dc3co activating/deactivating sequence
+ * is applied. B.Specs:49196
+ */
+ return;
+
+ /*
+ * DMC's DC3CO exit mechanism has an issue with Selective Fecth
+ * TODO: when the issue is addressed, this restriction should be removed.
+ */
+ if (crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ return;
+
+ if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
+ return;
+
+ /* Wa_16011303918:adl-p */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ return;
+
+ /*
+ * DC3CO Exit time 200us B.Spec 49196
+ * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
+ */
+ exit_scanlines =
+ intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
+
+ if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
+ return;
+
+ crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
+}
+
+static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (!dev_priv->params.enable_psr2_sel_fetch &&
+ intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 sel fetch not enabled, disabled by parameter\n");
+ return false;
+ }
+
+ if (crtc_state->uapi.async_flip) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 sel fetch not enabled, async flip enabled\n");
+ return false;
+ }
+
+ /* Wa_14010254185 Wa_14010103792 */
+ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 sel fetch not enabled, missing the implementation of WAs\n");
+ return false;
+ }
+
+ return crtc_state->enable_psr2_sel_fetch = true;
+}
+
+static bool psr2_granularity_check(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
+ const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+ u16 y_granularity = 0;
+
+ /* PSR2 HW only send full lines so we only need to validate the width */
+ if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
+ return false;
+
+ if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
+ return false;
+
+ /* HW tracking is only aligned to 4 lines */
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return intel_dp->psr.su_y_granularity == 4;
+
+ /*
+ * adl_p has 1 line granularity. For other platforms with SW tracking we
+ * can adjust the y coordinates to match sink requirement if multiple of
+ * 4.
+ */
+ if (IS_ALDERLAKE_P(dev_priv))
+ y_granularity = intel_dp->psr.su_y_granularity;
+ else if (intel_dp->psr.su_y_granularity <= 2)
+ y_granularity = 4;
+ else if ((intel_dp->psr.su_y_granularity % 4) == 0)
+ y_granularity = intel_dp->psr.su_y_granularity;
+
+ if (y_granularity == 0 || crtc_vdisplay % y_granularity)
+ return false;
+
+ crtc_state->su_y_granularity = y_granularity;
+ return true;
+}
+
+static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 hblank_total, hblank_ns, req_ns;
+
+ hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+ hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
+
+ /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
+ req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
+
+ if ((hblank_ns - req_ns) > 100)
+ return true;
+
+ /* Not supported <13 / Wa_22012279113:adl-p */
+ if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
+ return false;
+
+ crtc_state->req_psr2_sdp_prior_scanline = true;
+ return true;
+}
+
+static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
+ u8 max_wake_lines;
+
+ if (DISPLAY_VER(i915) >= 12) {
+ io_wake_time = 42;
+ /*
+ * According to Bspec it's 42us, but based on testing
+ * it is not enough -> use 45 us.
+ */
+ fast_wake_time = 45;
+ max_wake_lines = 12;
+ } else {
+ io_wake_time = 50;
+ fast_wake_time = 32;
+ max_wake_lines = 8;
+ }
+
+ io_wake_lines = intel_usecs_to_scanlines(
+ &crtc_state->hw.adjusted_mode, io_wake_time);
+ fast_wake_lines = intel_usecs_to_scanlines(
+ &crtc_state->hw.adjusted_mode, fast_wake_time);
+
+ if (io_wake_lines > max_wake_lines ||
+ fast_wake_lines > max_wake_lines)
+ return false;
+
+ if (i915->params.psr_safest_params)
+ io_wake_lines = fast_wake_lines = max_wake_lines;
+
+ /* According to Bspec lower limit should be set as 7 lines. */
+ intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
+ intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
+
+ return true;
+}
+
+static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
+ int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+ int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
+
+ if (!intel_dp->psr.sink_psr2_support)
+ return false;
+
+ /* JSL and EHL only supports eDP 1.3 */
+ if (IS_JSL_EHL(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
+ return false;
+ }
+
+ /* Wa_16011181250 */
+ if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
+ IS_DG2(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
+ return false;
+ }
+
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
+ return false;
+ }
+
+ if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not supported in transcoder %s\n",
+ transcoder_name(crtc_state->cpu_transcoder));
+ return false;
+ }
+
+ if (!psr2_global_enabled(intel_dp)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
+ return false;
+ }
+
+ /*
+ * DSC and PSR2 cannot be enabled simultaneously. If a requested
+ * resolution requires DSC to be enabled, priority is given to DSC
+ * over PSR2.
+ */
+ if (crtc_state->dsc.compression_enable) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 cannot be enabled since DSC is enabled\n");
+ return false;
+ }
+
+ if (crtc_state->crc_enabled) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
+ return false;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ psr_max_h = 5120;
+ psr_max_v = 3200;
+ max_bpp = 30;
+ } else if (DISPLAY_VER(dev_priv) >= 10) {
+ psr_max_h = 4096;
+ psr_max_v = 2304;
+ max_bpp = 24;
+ } else if (DISPLAY_VER(dev_priv) == 9) {
+ psr_max_h = 3640;
+ psr_max_v = 2304;
+ max_bpp = 24;
+ }
+
+ if (crtc_state->pipe_bpp > max_bpp) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, pipe bpp %d > max supported %d\n",
+ crtc_state->pipe_bpp, max_bpp);
+ return false;
+ }
+
+ /* Wa_16011303918:adl-p */
+ if (crtc_state->vrr.enable &&
+ IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, not compatible with HW stepping + VRR\n");
+ return false;
+ }
+
+ if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
+ return false;
+ }
+
+ if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, Unable to use long enough wake times\n");
+ return false;
+ }
+
+ if (HAS_PSR2_SEL_FETCH(dev_priv)) {
+ if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
+ !HAS_PSR_HW_TRACKING(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
+ return false;
+ }
+ }
+
+ /* Wa_2209313811 */
+ if (!crtc_state->enable_psr2_sel_fetch &&
+ IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
+ goto unsupported;
+ }
+
+ if (!psr2_granularity_check(intel_dp, crtc_state)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
+ goto unsupported;
+ }
+
+ if (!crtc_state->enable_psr2_sel_fetch &&
+ (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
+ crtc_hdisplay, crtc_vdisplay,
+ psr_max_h, psr_max_v);
+ goto unsupported;
+ }
+
+ tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
+ return true;
+
+unsupported:
+ crtc_state->enable_psr2_sel_fetch = false;
+ return false;
+}
+
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int psr_setup_time;
+
+ /*
+ * Current PSR panels don't work reliably with VRR enabled
+ * So if VRR is enabled, do not enable PSR.
+ */
+ if (crtc_state->vrr.enable)
+ return;
+
+ if (!CAN_PSR(intel_dp))
+ return;
+
+ if (!psr_global_enabled(intel_dp)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
+ return;
+ }
+
+ if (intel_dp->psr.sink_not_reliable) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR sink implementation is not reliable\n");
+ return;
+ }
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: Interlaced mode enabled\n");
+ return;
+ }
+
+ psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
+ if (psr_setup_time < 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
+ intel_dp->psr_dpcd[1]);
+ return;
+ }
+
+ if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
+ adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: PSR setup time (%d us) too long\n",
+ psr_setup_time);
+ return;
+ }
+
+ crtc_state->has_psr = true;
+ crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
+
+ crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
+ intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
+ &crtc_state->psr_vsc);
+}
+
+void intel_psr_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct intel_dp *intel_dp;
+ u32 val;
+
+ if (!dig_port)
+ return;
+
+ intel_dp = &dig_port->dp;
+ if (!CAN_PSR(intel_dp))
+ return;
+
+ mutex_lock(&intel_dp->psr.lock);
+ if (!intel_dp->psr.enabled)
+ goto unlock;
+
+ /*
+ * Not possible to read EDP_PSR/PSR2_CTL registers as it is
+ * enabled/disabled because of frontbuffer tracking and others.
+ */
+ pipe_config->has_psr = true;
+ pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
+ pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
+
+ if (!intel_dp->psr.psr2_enabled)
+ goto unlock;
+
+ if (HAS_PSR2_SEL_FETCH(dev_priv)) {
+ val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
+ if (val & PSR2_MAN_TRK_CTL_ENABLE)
+ pipe_config->enable_psr2_sel_fetch = true;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder));
+ val &= EXITLINE_MASK;
+ pipe_config->dc3co_exitline = val;
+ }
+unlock:
+ mutex_unlock(&intel_dp->psr.lock);
+}
+
+static void intel_psr_activate(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder transcoder = intel_dp->psr.transcoder;
+
+ if (transcoder_has_psr2(dev_priv, transcoder))
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
+
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
+ lockdep_assert_held(&intel_dp->psr.lock);
+
+ /* psr1 and psr2 are mutually exclusive.*/
+ if (intel_dp->psr.psr2_enabled)
+ hsw_activate_psr2(intel_dp);
+ else
+ hsw_activate_psr1(intel_dp);
+
+ intel_dp->psr.active = true;
+}
+
+static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
+{
+ switch (intel_dp->psr.pipe) {
+ case PIPE_A:
+ return LATENCY_REPORTING_REMOVED_PIPE_A;
+ case PIPE_B:
+ return LATENCY_REPORTING_REMOVED_PIPE_B;
+ case PIPE_C:
+ return LATENCY_REPORTING_REMOVED_PIPE_C;
+ default:
+ MISSING_CASE(intel_dp->psr.pipe);
+ return 0;
+ }
+}
+
+static void intel_psr_enable_source(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ u32 mask;
+
+ /*
+ * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
+ * mask LPSP to avoid dependency on other drivers that might block
+ * runtime_pm besides preventing other hw tracking issues now we
+ * can rely on frontbuffer tracking.
+ */
+ mask = EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD |
+ EDP_PSR_DEBUG_MASK_LPSP |
+ EDP_PSR_DEBUG_MASK_MAX_SLEEP;
+
+ if (DISPLAY_VER(dev_priv) < 11)
+ mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
+
+ intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
+ mask);
+
+ psr_irq_control(intel_dp);
+
+ if (intel_dp->psr.dc3co_exitline) {
+ u32 val;
+
+ /*
+ * TODO: if future platforms supports DC3CO in more than one
+ * transcoder, EXITLINE will need to be unset when disabling PSR
+ */
+ val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
+ val &= ~EXITLINE_MASK;
+ val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT;
+ val |= EXITLINE_ENABLE;
+ intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
+ }
+
+ if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
+ intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
+ intel_dp->psr.psr2_sel_fetch_enabled ?
+ IGNORE_PSR2_HW_TRACKING : 0);
+
+ if (intel_dp->psr.psr2_enabled) {
+ if (DISPLAY_VER(dev_priv) == 9)
+ intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
+ PSR2_VSC_ENABLE_PROG_HEADER |
+ PSR2_ADD_VERTICAL_LINE_COUNT);
+
+ /*
+ * Wa_16014451276:adlp
+ * All supported adlp panels have 1-based X granularity, this may
+ * cause issues if non-supported panels are used.
+ */
+ if (IS_ALDERLAKE_P(dev_priv))
+ intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
+ ADLP_1_BASED_X_GRANULARITY);
+
+ /* Wa_16011168373:adl-p */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_de_rmw(dev_priv,
+ TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
+ TRANS_SET_CONTEXT_LATENCY_MASK,
+ TRANS_SET_CONTEXT_LATENCY_VALUE(1));
+
+ /* Wa_16012604467:adlp */
+ if (IS_ALDERLAKE_P(dev_priv))
+ intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
+ CLKGATE_DIS_MISC_DMASC_GATING_DIS);
+
+ /* Wa_16013835468:tgl[b0+], dg1 */
+ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
+ IS_DG1(dev_priv)) {
+ u16 vtotal, vblank;
+
+ vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal -
+ crtc_state->uapi.adjusted_mode.crtc_vdisplay;
+ vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end -
+ crtc_state->uapi.adjusted_mode.crtc_vblank_start;
+ if (vblank > vtotal)
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0,
+ wa_16013835468_bit_get(intel_dp));
+ }
+ }
+}
+
+static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 val;
+
+ /*
+ * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
+ * will still keep the error set even after the reset done in the
+ * irq_preinstall and irq_uninstall hooks.
+ * And enabling in this situation cause the screen to freeze in the
+ * first time that PSR HW tries to activate so lets keep PSR disabled
+ * to avoid any rendering problems.
+ */
+ if (DISPLAY_VER(dev_priv) >= 12)
+ val = intel_de_read(dev_priv,
+ TRANS_PSR_IIR(intel_dp->psr.transcoder));
+ else
+ val = intel_de_read(dev_priv, EDP_PSR_IIR);
+ val &= psr_irq_psr_error_bit_get(intel_dp);
+ if (val) {
+ intel_dp->psr.sink_not_reliable = true;
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR interruption error set, not enabling PSR\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void intel_psr_enable_locked(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+ struct intel_encoder *encoder = &dig_port->base;
+ u32 val;
+
+ drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
+
+ intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
+ intel_dp->psr.busy_frontbuffer_bits = 0;
+ intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
+ /* DC5/DC6 requires at least 6 idle frames */
+ val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
+ intel_dp->psr.dc3co_exit_delay = val;
+ intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
+ intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
+ intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
+ intel_dp->psr.req_psr2_sdp_prior_scanline =
+ crtc_state->req_psr2_sdp_prior_scanline;
+
+ if (!psr_interrupt_error_check(intel_dp))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
+ intel_dp->psr.psr2_enabled ? "2" : "1");
+ intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
+ intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
+ intel_psr_enable_sink(intel_dp);
+ intel_psr_enable_source(intel_dp, crtc_state);
+ intel_dp->psr.enabled = true;
+ intel_dp->psr.paused = false;
+
+ intel_psr_activate(intel_dp);
+}
+
+static void intel_psr_exit(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 val;
+
+ if (!intel_dp->psr.active) {
+ if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(intel_dp->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
+ }
+
+ val = intel_de_read(dev_priv,
+ EDP_PSR_CTL(intel_dp->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
+
+ return;
+ }
+
+ if (intel_dp->psr.psr2_enabled) {
+ tgl_disallow_dc3co_on_psr2_exit(intel_dp);
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(intel_dp->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
+ val &= ~EDP_PSR2_ENABLE;
+ intel_de_write(dev_priv,
+ EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
+ } else {
+ val = intel_de_read(dev_priv,
+ EDP_PSR_CTL(intel_dp->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
+ val &= ~EDP_PSR_ENABLE;
+ intel_de_write(dev_priv,
+ EDP_PSR_CTL(intel_dp->psr.transcoder), val);
+ }
+ intel_dp->psr.active = false;
+}
+
+static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ i915_reg_t psr_status;
+ u32 psr_status_mask;
+
+ if (intel_dp->psr.psr2_enabled) {
+ psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
+ psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
+ } else {
+ psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
+ psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
+ }
+
+ /* Wait till PSR is idle */
+ if (intel_de_wait_for_clear(dev_priv, psr_status,
+ psr_status_mask, 2000))
+ drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
+}
+
+static void intel_psr_disable_locked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum phy phy = intel_port_to_phy(dev_priv,
+ dp_to_dig_port(intel_dp)->base.port);
+
+ lockdep_assert_held(&intel_dp->psr.lock);
+
+ if (!intel_dp->psr.enabled)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
+ intel_dp->psr.psr2_enabled ? "2" : "1");
+
+ intel_psr_exit(intel_dp);
+ intel_psr_wait_exit_locked(intel_dp);
+
+ /* Wa_1408330847 */
+ if (intel_dp->psr.psr2_sel_fetch_enabled &&
+ IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
+ DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
+
+ if (intel_dp->psr.psr2_enabled) {
+ /* Wa_16011168373:adl-p */
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_de_rmw(dev_priv,
+ TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
+ TRANS_SET_CONTEXT_LATENCY_MASK, 0);
+
+ /* Wa_16012604467:adlp */
+ if (IS_ALDERLAKE_P(dev_priv))
+ intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
+ CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
+
+ /* Wa_16013835468:tgl[b0+], dg1 */
+ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
+ IS_DG1(dev_priv))
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ wa_16013835468_bit_get(intel_dp), 0);
+ }
+
+ intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
+
+ /* Disable PSR on Sink */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+
+ if (intel_dp->psr.psr2_enabled)
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
+
+ intel_dp->psr.enabled = false;
+ intel_dp->psr.psr2_enabled = false;
+ intel_dp->psr.psr2_sel_fetch_enabled = false;
+ intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
+}
+
+/**
+ * intel_psr_disable - Disable PSR
+ * @intel_dp: Intel DP
+ * @old_crtc_state: old CRTC state
+ *
+ * This function needs to be called before disabling pipe.
+ */
+void intel_psr_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (!old_crtc_state->has_psr)
+ return;
+
+ if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
+ return;
+
+ mutex_lock(&intel_dp->psr.lock);
+
+ intel_psr_disable_locked(intel_dp);
+
+ mutex_unlock(&intel_dp->psr.lock);
+ cancel_work_sync(&intel_dp->psr.work);
+ cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
+}
+
+/**
+ * intel_psr_pause - Pause PSR
+ * @intel_dp: Intel DP
+ *
+ * This function need to be called after enabling psr.
+ */
+void intel_psr_pause(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_psr *psr = &intel_dp->psr;
+
+ if (!CAN_PSR(intel_dp))
+ return;
+
+ mutex_lock(&psr->lock);
+
+ if (!psr->enabled) {
+ mutex_unlock(&psr->lock);
+ return;
+ }
+
+ /* If we ever hit this, we will need to add refcount to pause/resume */
+ drm_WARN_ON(&dev_priv->drm, psr->paused);
+
+ intel_psr_exit(intel_dp);
+ intel_psr_wait_exit_locked(intel_dp);
+ psr->paused = true;
+
+ mutex_unlock(&psr->lock);
+
+ cancel_work_sync(&psr->work);
+ cancel_delayed_work_sync(&psr->dc3co_work);
+}
+
+/**
+ * intel_psr_resume - Resume PSR
+ * @intel_dp: Intel DP
+ *
+ * This function need to be called after pausing psr.
+ */
+void intel_psr_resume(struct intel_dp *intel_dp)
+{
+ struct intel_psr *psr = &intel_dp->psr;
+
+ if (!CAN_PSR(intel_dp))
+ return;
+
+ mutex_lock(&psr->lock);
+
+ if (!psr->paused)
+ goto unlock;
+
+ psr->paused = false;
+ intel_psr_activate(intel_dp);
+
+unlock:
+ mutex_unlock(&psr->lock);
+}
+
+static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
+{
+ return IS_ALDERLAKE_P(dev_priv) ? 0 : PSR2_MAN_TRK_CTL_ENABLE;
+}
+
+static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
+{
+ return IS_ALDERLAKE_P(dev_priv) ?
+ ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
+ PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
+}
+
+static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
+{
+ return IS_ALDERLAKE_P(dev_priv) ?
+ ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
+ PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
+}
+
+static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
+{
+ return IS_ALDERLAKE_P(dev_priv) ?
+ ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
+ PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
+}
+
+static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (intel_dp->psr.psr2_sel_fetch_enabled)
+ intel_de_write(dev_priv,
+ PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
+ man_trk_ctl_enable_bit_get(dev_priv) |
+ man_trk_ctl_partial_frame_bit_get(dev_priv) |
+ man_trk_ctl_single_full_frame_bit_get(dev_priv));
+
+ /*
+ * Display WA #0884: skl+
+ * This documented WA for bxt can be safely applied
+ * broadly so we can force HW tracking to exit PSR
+ * instead of disabling and re-enabling.
+ * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+ * but it makes more sense write to the current active
+ * pipe.
+ *
+ * This workaround do not exist for platforms with display 10 or newer
+ * but testing proved that it works for up display 13, for newer
+ * than that testing will be needed.
+ */
+ intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
+}
+
+void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
+}
+
+void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ const struct drm_rect *clip;
+ u32 val;
+ int x, y;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ if (plane->id == PLANE_CURSOR) {
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ plane_state->ctl);
+ return;
+ }
+
+ clip = &plane_state->psr2_sel_fetch_area;
+
+ val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
+ val |= plane_state->uapi.dst.x1;
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
+
+ x = plane_state->view.color_plane[color_plane].x;
+
+ /*
+ * From Bspec: UV surface Start Y Position = half of Y plane Y
+ * start position.
+ */
+ if (!color_plane)
+ y = plane_state->view.color_plane[color_plane].y + clip->y1;
+ else
+ y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
+
+ val = y << 16 | x;
+
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
+ val);
+
+ /* Sizes are 0 based */
+ val = (drm_rect_height(clip) - 1) << 16;
+ val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
+
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ PLANE_SEL_FETCH_CTL_ENABLE);
+}
+
+void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_encoder *encoder;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ lockdep_assert_held(&intel_dp->psr.lock);
+ if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
+ return;
+ break;
+ }
+
+ intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
+ crtc_state->psr2_man_track_ctl);
+}
+
+static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
+ struct drm_rect *clip, bool full_update)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 val = man_trk_ctl_enable_bit_get(dev_priv);
+
+ /* SF partial frame enable has to be set even on full update */
+ val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
+
+ if (full_update) {
+ /*
+ * Not applying Wa_14014971508:adlp as we do not support the
+ * feature that requires this workaround.
+ */
+ val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
+ goto exit;
+ }
+
+ if (clip->y1 == -1)
+ goto exit;
+
+ if (IS_ALDERLAKE_P(dev_priv)) {
+ val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
+ val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
+ } else {
+ drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
+
+ val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
+ val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
+ }
+exit:
+ crtc_state->psr2_man_track_ctl = val;
+}
+
+static void clip_area_update(struct drm_rect *overlap_damage_area,
+ struct drm_rect *damage_area,
+ struct drm_rect *pipe_src)
+{
+ if (!drm_rect_intersect(damage_area, pipe_src))
+ return;
+
+ if (overlap_damage_area->y1 == -1) {
+ overlap_damage_area->y1 = damage_area->y1;
+ overlap_damage_area->y2 = damage_area->y2;
+ return;
+ }
+
+ if (damage_area->y1 < overlap_damage_area->y1)
+ overlap_damage_area->y1 = damage_area->y1;
+
+ if (damage_area->y2 > overlap_damage_area->y2)
+ overlap_damage_area->y2 = damage_area->y2;
+}
+
+static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
+ struct drm_rect *pipe_clip)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ const u16 y_alignment = crtc_state->su_y_granularity;
+
+ pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
+ if (pipe_clip->y2 % y_alignment)
+ pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
+
+ if (IS_ALDERLAKE_P(dev_priv) && crtc_state->dsc.compression_enable)
+ drm_warn(&dev_priv->drm, "Missing PSR2 sel fetch alignment with DSC\n");
+}
+
+/*
+ * TODO: Not clear how to handle planes with negative position,
+ * also planes are not updated if they have a negative X
+ * position so for now doing a full update in this cases
+ *
+ * Plane scaling and rotation is not supported by selective fetch and both
+ * properties can change without a modeset, so need to be check at every
+ * atomic commit.
+ */
+static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
+{
+ if (plane_state->uapi.dst.y1 < 0 ||
+ plane_state->uapi.dst.x1 < 0 ||
+ plane_state->scaler_id >= 0 ||
+ plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
+ return false;
+
+ return true;
+}
+
+/*
+ * Check for pipe properties that is not supported by selective fetch.
+ *
+ * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
+ * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
+ * enabled and going to the full update path.
+ */
+static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->scaler_state.scaler_id >= 0)
+ return false;
+
+ return true;
+}
+
+int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
+ struct intel_plane_state *new_plane_state, *old_plane_state;
+ struct intel_plane *plane;
+ bool full_update = false;
+ int i, ret;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return 0;
+
+ if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
+ full_update = true;
+ goto skip_sel_fetch_set_loop;
+ }
+
+ /*
+ * Calculate minimal selective fetch area of each plane and calculate
+ * the pipe damaged area.
+ * In the next loop the plane selective fetch area will actually be set
+ * using whole pipe damaged area.
+ */
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i) {
+ struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
+ .x2 = INT_MAX };
+
+ if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
+ continue;
+
+ if (!new_plane_state->uapi.visible &&
+ !old_plane_state->uapi.visible)
+ continue;
+
+ if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
+ full_update = true;
+ break;
+ }
+
+ /*
+ * If visibility or plane moved, mark the whole plane area as
+ * damaged as it needs to be complete redraw in the new and old
+ * position.
+ */
+ if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
+ !drm_rect_equals(&new_plane_state->uapi.dst,
+ &old_plane_state->uapi.dst)) {
+ if (old_plane_state->uapi.visible) {
+ damaged_area.y1 = old_plane_state->uapi.dst.y1;
+ damaged_area.y2 = old_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area,
+ &crtc_state->pipe_src);
+ }
+
+ if (new_plane_state->uapi.visible) {
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area,
+ &crtc_state->pipe_src);
+ }
+ continue;
+ } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
+ /* If alpha changed mark the whole plane area as damaged */
+ damaged_area.y1 = new_plane_state->uapi.dst.y1;
+ damaged_area.y2 = new_plane_state->uapi.dst.y2;
+ clip_area_update(&pipe_clip, &damaged_area,
+ &crtc_state->pipe_src);
+ continue;
+ }
+
+ src = drm_plane_state_src(&new_plane_state->uapi);
+ drm_rect_fp_to_int(&src, &src);
+
+ if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
+ &new_plane_state->uapi, &damaged_area))
+ continue;
+
+ damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
+ damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
+ damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
+ damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
+
+ clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
+ }
+
+ /*
+ * TODO: For now we are just using full update in case
+ * selective fetch area calculation fails. To optimize this we
+ * should identify cases where this happens and fix the area
+ * calculation for those.
+ */
+ if (pipe_clip.y1 == -1) {
+ drm_info_once(&dev_priv->drm,
+ "Selective fetch area calculation failed in pipe %c\n",
+ pipe_name(crtc->pipe));
+ full_update = true;
+ }
+
+ if (full_update)
+ goto skip_sel_fetch_set_loop;
+
+ ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+ if (ret)
+ return ret;
+
+ intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
+
+ /*
+ * Now that we have the pipe damaged area check if it intersect with
+ * every plane, if it does set the plane selective fetch area.
+ */
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i) {
+ struct drm_rect *sel_fetch_area, inter;
+ struct intel_plane *linked = new_plane_state->planar_linked_plane;
+
+ if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
+ !new_plane_state->uapi.visible)
+ continue;
+
+ inter = pipe_clip;
+ if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
+ continue;
+
+ if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
+ full_update = true;
+ break;
+ }
+
+ sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
+ sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
+ sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
+ crtc_state->update_planes |= BIT(plane->id);
+
+ /*
+ * Sel_fetch_area is calculated for UV plane. Use
+ * same area for Y plane as well.
+ */
+ if (linked) {
+ struct intel_plane_state *linked_new_plane_state;
+ struct drm_rect *linked_sel_fetch_area;
+
+ linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
+ if (IS_ERR(linked_new_plane_state))
+ return PTR_ERR(linked_new_plane_state);
+
+ linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
+ linked_sel_fetch_area->y1 = sel_fetch_area->y1;
+ linked_sel_fetch_area->y2 = sel_fetch_area->y2;
+ crtc_state->update_planes |= BIT(linked->id);
+ }
+ }
+
+skip_sel_fetch_set_loop:
+ psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
+ return 0;
+}
+
+void intel_psr_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_encoder *encoder;
+
+ if (!HAS_PSR(i915))
+ return;
+
+ for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
+ old_crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_psr *psr = &intel_dp->psr;
+ bool needs_to_disable = false;
+
+ mutex_lock(&psr->lock);
+
+ /*
+ * Reasons to disable:
+ * - PSR disabled in new state
+ * - All planes will go inactive
+ * - Changing between PSR versions
+ */
+ needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
+ needs_to_disable |= !new_crtc_state->has_psr;
+ needs_to_disable |= !new_crtc_state->active_planes;
+ needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
+
+ if (psr->enabled && needs_to_disable)
+ intel_psr_disable_locked(intel_dp);
+
+ mutex_unlock(&psr->lock);
+ }
+}
+
+static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_encoder *encoder;
+
+ if (!crtc_state->has_psr)
+ return;
+
+ for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_psr *psr = &intel_dp->psr;
+
+ mutex_lock(&psr->lock);
+
+ if (psr->sink_not_reliable)
+ goto exit;
+
+ drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
+
+ /* Only enable if there is active planes */
+ if (!psr->enabled && crtc_state->active_planes)
+ intel_psr_enable_locked(intel_dp, crtc_state);
+
+ /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
+ if (crtc_state->crc_enabled && psr->enabled)
+ psr_force_hw_tracking_exit(intel_dp);
+
+exit:
+ mutex_unlock(&psr->lock);
+ }
+}
+
+void intel_psr_post_plane_update(const struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ if (!HAS_PSR(dev_priv))
+ return;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
+ _intel_psr_post_plane_update(state, crtc_state);
+}
+
+static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ /*
+ * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
+ * As all higher states has bit 4 of PSR2 state set we can just wait for
+ * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
+ */
+ return intel_de_wait_for_clear(dev_priv,
+ EDP_PSR2_STATUS(intel_dp->psr.transcoder),
+ EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
+}
+
+static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ /*
+ * From bspec: Panel Self Refresh (BDW+)
+ * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
+ * exit training time + 1.5 ms of aux channel handshake. 50 ms is
+ * defensive enough to cover everything.
+ */
+ return intel_de_wait_for_clear(dev_priv,
+ EDP_PSR_STATUS(intel_dp->psr.transcoder),
+ EDP_PSR_STATUS_STATE_MASK, 50);
+}
+
+/**
+ * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
+ * @new_crtc_state: new CRTC state
+ *
+ * This function is expected to be called from pipe_update_start() where it is
+ * not expected to race with PSR enable or disable.
+ */
+void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
+ struct intel_encoder *encoder;
+
+ if (!new_crtc_state->has_psr)
+ return;
+
+ for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
+ new_crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int ret;
+
+ lockdep_assert_held(&intel_dp->psr.lock);
+
+ if (!intel_dp->psr.enabled)
+ continue;
+
+ if (intel_dp->psr.psr2_enabled)
+ ret = _psr2_ready_for_pipe_update_locked(intel_dp);
+ else
+ ret = _psr1_ready_for_pipe_update_locked(intel_dp);
+
+ if (ret)
+ drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
+ }
+}
+
+static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ i915_reg_t reg;
+ u32 mask;
+ int err;
+
+ if (!intel_dp->psr.enabled)
+ return false;
+
+ if (intel_dp->psr.psr2_enabled) {
+ reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
+ mask = EDP_PSR2_STATUS_STATE_MASK;
+ } else {
+ reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
+ mask = EDP_PSR_STATUS_STATE_MASK;
+ }
+
+ mutex_unlock(&intel_dp->psr.lock);
+
+ err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
+ if (err)
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for PSR Idle for re-enable\n");
+
+ /* After the unlocked wait, verify that PSR is still wanted! */
+ mutex_lock(&intel_dp->psr.lock);
+ return err == 0 && intel_dp->psr.enabled;
+}
+
+static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_connector *conn;
+ int err = 0;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+ state->acquire_ctx = &ctx;
+
+retry:
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(conn, &conn_iter) {
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+
+ if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
+ continue;
+
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ if (IS_ERR(conn_state)) {
+ err = PTR_ERR(conn_state);
+ break;
+ }
+
+ if (!conn_state->crtc)
+ continue;
+
+ crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
+ if (IS_ERR(crtc_state)) {
+ err = PTR_ERR(crtc_state);
+ break;
+ }
+
+ /* Mark mode as changed to trigger a pipe->update() */
+ crtc_state->mode_changed = true;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (err == 0)
+ err = drm_atomic_commit(state);
+
+ if (err == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ err = drm_modeset_backoff(&ctx);
+ if (!err)
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_atomic_state_put(state);
+
+ return err;
+}
+
+int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
+ u32 old_mode;
+ int ret;
+
+ if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
+ mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
+ drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&intel_dp->psr.lock);
+ if (ret)
+ return ret;
+
+ old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
+ intel_dp->psr.debug = val;
+
+ /*
+ * Do it right away if it's already enabled, otherwise it will be done
+ * when enabling the source.
+ */
+ if (intel_dp->psr.enabled)
+ psr_irq_control(intel_dp);
+
+ mutex_unlock(&intel_dp->psr.lock);
+
+ if (old_mode != mode)
+ ret = intel_psr_fastset_force(dev_priv);
+
+ return ret;
+}
+
+static void intel_psr_handle_irq(struct intel_dp *intel_dp)
+{
+ struct intel_psr *psr = &intel_dp->psr;
+
+ intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
+ /* let's make sure that sink is awaken */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+}
+
+static void intel_psr_work(struct work_struct *work)
+{
+ struct intel_dp *intel_dp =
+ container_of(work, typeof(*intel_dp), psr.work);
+
+ mutex_lock(&intel_dp->psr.lock);
+
+ if (!intel_dp->psr.enabled)
+ goto unlock;
+
+ if (READ_ONCE(intel_dp->psr.irq_aux_error))
+ intel_psr_handle_irq(intel_dp);
+
+ /*
+ * We have to make sure PSR is ready for re-enable
+ * otherwise it keeps disabled until next full enable/disable cycle.
+ * PSR might take some time to get fully disabled
+ * and be ready for re-enable.
+ */
+ if (!__psr_wait_for_idle_locked(intel_dp))
+ goto unlock;
+
+ /*
+ * The delayed work can race with an invalidate hence we need to
+ * recheck. Since psr_flush first clears this and then reschedules we
+ * won't ever miss a flush when bailing out here.
+ */
+ if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
+ goto unlock;
+
+ intel_psr_activate(intel_dp);
+unlock:
+ mutex_unlock(&intel_dp->psr.lock);
+}
+
+static void _psr_invalidate_handle(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ u32 val;
+
+ if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
+ /* Send one update otherwise lag is observed in screen */
+ intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
+ return;
+ }
+
+ val = man_trk_ctl_enable_bit_get(dev_priv) |
+ man_trk_ctl_partial_frame_bit_get(dev_priv) |
+ man_trk_ctl_continuos_full_frame(dev_priv);
+ intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), val);
+ intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
+ intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
+ } else {
+ intel_psr_exit(intel_dp);
+ }
+}
+
+/**
+ * intel_psr_invalidate - Invalidate PSR
+ * @dev_priv: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the invalidate
+ *
+ * Since the hardware frontbuffer tracking has gaps we need to integrate
+ * with the software frontbuffer tracking. This function gets called every
+ * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
+ * disabled if the frontbuffer mask contains a buffer relevant to PSR.
+ *
+ * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
+ */
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
+ unsigned frontbuffer_bits, enum fb_op_origin origin)
+{
+ struct intel_encoder *encoder;
+
+ if (origin == ORIGIN_FLIP)
+ return;
+
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock(&intel_dp->psr.lock);
+ if (!intel_dp->psr.enabled) {
+ mutex_unlock(&intel_dp->psr.lock);
+ continue;
+ }
+
+ pipe_frontbuffer_bits &=
+ INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
+ intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
+
+ if (pipe_frontbuffer_bits)
+ _psr_invalidate_handle(intel_dp);
+
+ mutex_unlock(&intel_dp->psr.lock);
+ }
+}
+/*
+ * When we will be completely rely on PSR2 S/W tracking in future,
+ * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
+ * event also therefore tgl_dc3co_flush_locked() require to be changed
+ * accordingly in future.
+ */
+static void
+tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
+{
+ if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
+ !intel_dp->psr.active)
+ return;
+
+ /*
+ * At every frontbuffer flush flip event modified delay of delayed work,
+ * when delayed work schedules that means display has been idle.
+ */
+ if (!(frontbuffer_bits &
+ INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
+ return;
+
+ tgl_psr2_enable_dc3co(intel_dp);
+ mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
+ intel_dp->psr.dc3co_exit_delay);
+}
+
+static void _psr_flush_handle(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
+ /* can we turn CFF off? */
+ if (intel_dp->psr.busy_frontbuffer_bits == 0) {
+ u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
+ man_trk_ctl_partial_frame_bit_get(dev_priv) |
+ man_trk_ctl_single_full_frame_bit_get(dev_priv);
+
+ /*
+ * turn continuous full frame off and do a single
+ * full frame
+ */
+ intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
+ val);
+ intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
+ intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
+ }
+ } else {
+ /*
+ * continuous full frame is disabled, only a single full
+ * frame is required
+ */
+ psr_force_hw_tracking_exit(intel_dp);
+ }
+ } else {
+ psr_force_hw_tracking_exit(intel_dp);
+
+ if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
+ schedule_work(&intel_dp->psr.work);
+ }
+}
+
+/**
+ * intel_psr_flush - Flush PSR
+ * @dev_priv: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
+ *
+ * Since the hardware frontbuffer tracking has gaps we need to integrate
+ * with the software frontbuffer tracking. This function gets called every
+ * time frontbuffer rendering has completed and flushed out to memory. PSR
+ * can be enabled again if no other frontbuffer relevant to PSR is dirty.
+ *
+ * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
+ */
+void intel_psr_flush(struct drm_i915_private *dev_priv,
+ unsigned frontbuffer_bits, enum fb_op_origin origin)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock(&intel_dp->psr.lock);
+ if (!intel_dp->psr.enabled) {
+ mutex_unlock(&intel_dp->psr.lock);
+ continue;
+ }
+
+ pipe_frontbuffer_bits &=
+ INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
+ intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
+
+ /*
+ * If the PSR is paused by an explicit intel_psr_paused() call,
+ * we have to ensure that the PSR is not activated until
+ * intel_psr_resume() is called.
+ */
+ if (intel_dp->psr.paused)
+ goto unlock;
+
+ if (origin == ORIGIN_FLIP ||
+ (origin == ORIGIN_CURSOR_UPDATE &&
+ !intel_dp->psr.psr2_sel_fetch_enabled)) {
+ tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
+ goto unlock;
+ }
+
+ if (pipe_frontbuffer_bits == 0)
+ goto unlock;
+
+ /* By definition flush = invalidate + flush */
+ _psr_flush_handle(intel_dp);
+unlock:
+ mutex_unlock(&intel_dp->psr.lock);
+ }
+}
+
+/**
+ * intel_psr_init - Init basic PSR work and mutex.
+ * @intel_dp: Intel DP
+ *
+ * This function is called after the initializing connector.
+ * (the initializing of connector treats the handling of connector capabilities)
+ * And it initializes basic PSR stuff for each DP Encoder.
+ */
+void intel_psr_init(struct intel_dp *intel_dp)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (!HAS_PSR(dev_priv))
+ return;
+
+ /*
+ * HSW spec explicitly says PSR is tied to port A.
+ * BDW+ platforms have a instance of PSR registers per transcoder but
+ * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
+ * than eDP one.
+ * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
+ * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
+ * But GEN12 supports a instance of PSR registers per transcoder.
+ */
+ if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: Port not supported\n");
+ return;
+ }
+
+ intel_dp->psr.source_support = true;
+
+ /* Set link_standby x link_off defaults */
+ if (DISPLAY_VER(dev_priv) < 12)
+ /* For new platforms up to TGL let's respect VBT back again */
+ intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
+
+ INIT_WORK(&intel_dp->psr.work, intel_psr_work);
+ INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
+ mutex_init(&intel_dp->psr.lock);
+}
+
+static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
+ u8 *status, u8 *error_status)
+{
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
+ if (ret != 1)
+ return ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
+ if (ret != 1)
+ return ret;
+
+ *status = *status & DP_PSR_SINK_STATE_MASK;
+
+ return 0;
+}
+
+static void psr_alpm_check(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ struct intel_psr *psr = &intel_dp->psr;
+ u8 val;
+ int r;
+
+ if (!psr->psr2_enabled)
+ return;
+
+ r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
+ if (r != 1) {
+ drm_err(&dev_priv->drm, "Error reading ALPM status\n");
+ return;
+ }
+
+ if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
+ intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
+ drm_dbg_kms(&dev_priv->drm,
+ "ALPM lock timeout error, disabling PSR\n");
+
+ /* Clearing error */
+ drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
+ }
+}
+
+static void psr_capability_changed_check(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_psr *psr = &intel_dp->psr;
+ u8 val;
+ int r;
+
+ r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
+ if (r != 1) {
+ drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
+ return;
+ }
+
+ if (val & DP_PSR_CAPS_CHANGE) {
+ intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
+ drm_dbg_kms(&dev_priv->drm,
+ "Sink PSR capability changed, disabling PSR\n");
+
+ /* Clearing it */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
+ }
+}
+
+void intel_psr_short_pulse(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_psr *psr = &intel_dp->psr;
+ u8 status, error_status;
+ const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
+ DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
+ DP_PSR_LINK_CRC_ERROR;
+
+ if (!CAN_PSR(intel_dp))
+ return;
+
+ mutex_lock(&psr->lock);
+
+ if (!psr->enabled)
+ goto exit;
+
+ if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
+ drm_err(&dev_priv->drm,
+ "Error reading PSR status or error status\n");
+ goto exit;
+ }
+
+ if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
+ intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
+ }
+
+ if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR sink internal error, disabling PSR\n");
+ if (error_status & DP_PSR_RFB_STORAGE_ERROR)
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR RFB storage error, disabling PSR\n");
+ if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR VSC SDP uncorrectable error, disabling PSR\n");
+ if (error_status & DP_PSR_LINK_CRC_ERROR)
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR Link CRC error, disabling PSR\n");
+
+ if (error_status & ~errors)
+ drm_err(&dev_priv->drm,
+ "PSR_ERROR_STATUS unhandled errors %x\n",
+ error_status & ~errors);
+ /* clear status register */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
+
+ psr_alpm_check(intel_dp);
+ psr_capability_changed_check(intel_dp);
+
+exit:
+ mutex_unlock(&psr->lock);
+}
+
+bool intel_psr_enabled(struct intel_dp *intel_dp)
+{
+ bool ret;
+
+ if (!CAN_PSR(intel_dp))
+ return false;
+
+ mutex_lock(&intel_dp->psr.lock);
+ ret = intel_dp->psr.enabled;
+ mutex_unlock(&intel_dp->psr.lock);
+
+ return ret;
+}
+
+/**
+ * intel_psr_lock - grab PSR lock
+ * @crtc_state: the crtc state
+ *
+ * This is initially meant to be used by around CRTC update, when
+ * vblank sensitive registers are updated and we need grab the lock
+ * before it to avoid vblank evasion.
+ */
+void intel_psr_lock(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_encoder *encoder;
+
+ if (!crtc_state->has_psr)
+ return;
+
+ for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock(&intel_dp->psr.lock);
+ break;
+ }
+}
+
+/**
+ * intel_psr_unlock - release PSR lock
+ * @crtc_state: the crtc state
+ *
+ * Release the PSR lock that was held during pipe update.
+ */
+void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_encoder *encoder;
+
+ if (!crtc_state->has_psr)
+ return;
+
+ for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_unlock(&intel_dp->psr.lock);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
new file mode 100644
index 000000000..2ac3a46cc
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PSR_H__
+#define __INTEL_PSR_H__
+
+#include <linux/types.h>
+
+enum fb_op_origin;
+struct drm_connector;
+struct drm_connector_state;
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+struct intel_plane;
+struct intel_plane_state;
+
+void intel_psr_init_dpcd(struct intel_dp *intel_dp);
+void intel_psr_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_psr_post_plane_update(const struct intel_atomic_state *state);
+void intel_psr_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state);
+int intel_psr_debug_set(struct intel_dp *intel_dp, u64 value);
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
+ unsigned frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_psr_flush(struct drm_i915_private *dev_priv,
+ unsigned frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_psr_init(struct intel_dp *intel_dp);
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+void intel_psr_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config);
+void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir);
+void intel_psr_short_pulse(struct intel_dp *intel_dp);
+void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state);
+bool intel_psr_enabled(struct intel_dp *intel_dp);
+int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state);
+void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane);
+void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+void intel_psr_pause(struct intel_dp *intel_dp);
+void intel_psr_resume(struct intel_dp *intel_dp);
+
+void intel_psr_lock(const struct intel_crtc_state *crtc_state);
+void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.c b/drivers/gpu/drm/i915/display/intel_qp_tables.c
new file mode 100644
index 000000000..6f8e4ec5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/display/drm_dsc.h>
+
+#include "i915_utils.h"
+#include "intel_qp_tables.h"
+
+/* from BPP 6 to 24 in steps of 0.5 */
+#define RC_RANGE_QP444_8BPC_MAX_NUM_BPP 37
+
+/* from BPP 6 to 30 in steps of 0.5 */
+#define RC_RANGE_QP444_10BPC_MAX_NUM_BPP 49
+
+/* from BPP 6 to 36 in steps of 0.5 */
+#define RC_RANGE_QP444_12BPC_MAX_NUM_BPP 61
+
+/*
+ * These qp tables are as per the C model
+ * and it has the rows pointing to bpps which increment
+ * in steps of 0.5
+ * We do not support fractional bpps as of today,
+ * hence we would skip the fractional bpps during
+ * our references for qp calclulations.
+ */
+static const u8 rc_range_minqp444_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_8BPC_MAX_NUM_BPP] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0 },
+ { 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0 },
+ { 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3,
+ 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0 },
+ { 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0 },
+ { 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4,
+ 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0 },
+ { 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5,
+ 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1 },
+ { 14, 14, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 9, 9, 9, 8, 8,
+ 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3 }
+};
+
+static const u8 rc_range_maxqp444_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_8BPC_MAX_NUM_BPP] = {
+ { 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 6, 6, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 8, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 2, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 8, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 4, 3, 3, 2, 2, 2, 2, 2,
+ 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0 },
+ { 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 4, 4, 3, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0 },
+ { 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 4, 4, 3, 3, 3, 3, 3,
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1 },
+ { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 4, 4, 3, 3, 3, 3, 3,
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1 },
+ { 10, 10, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 6, 5, 5, 4, 4, 4, 4, 3,
+ 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1 },
+ { 11, 11, 10, 10, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 6, 6, 5, 5, 5, 5, 5,
+ 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
+ { 12, 11, 11, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 8, 8, 7, 6, 6, 5, 5, 5,
+ 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
+ { 12, 12, 11, 11, 10, 10, 10, 10, 10, 10, 9, 9, 9, 8, 8, 7, 7, 6, 6, 6,
+ 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1 },
+ { 12, 12, 12, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7,
+ 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1 },
+ { 12, 12, 12, 12, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 8, 8, 8, 7, 7, 7,
+ 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1 },
+ { 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 8, 8, 8,
+ 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 },
+ { 15, 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 9,
+ 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4 }
+};
+
+static const u8 rc_range_minqp444_10bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_10BPC_MAX_NUM_BPP] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 7, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2,
+ 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 7, 7, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3,
+ 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3,
+ 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4,
+ 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0 },
+ { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 5,
+ 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0,
+ 0, 0, 0 },
+ { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5,
+ 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 0,
+ 0, 0, 0 },
+ { 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 5,
+ 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1,
+ 1, 0, 0 },
+ { 10, 9, 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 0 },
+ { 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6,
+ 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 1,
+ 1, 1, 1 },
+ { 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8,
+ 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2,
+ 2, 1, 1, 1 },
+ { 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8,
+ 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2,
+ 2, 2, 2, 1 },
+ { 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4,
+ 4, 3, 3, 3, 3, 2, 2, 2, 2, 1 },
+ { 18, 18, 17, 17, 16, 16, 16, 16, 15, 15, 14, 14, 14, 14, 13, 13, 13,
+ 12, 12, 12, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7,
+ 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3 }
+};
+
+static const u8 rc_range_maxqp444_10bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_10BPC_MAX_NUM_BPP] = {
+ { 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 10, 10, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3,
+ 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0 },
+ { 12, 11, 11, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 6, 6, 5, 5, 5, 4,
+ 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0 },
+ { 12, 12, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 8, 7, 7, 6,
+ 6, 6, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1,
+ 1, 0, 0, 0, 0, 0, 0 },
+ { 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 8, 8, 7,
+ 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1,
+ 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 13, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 9, 8, 8,
+ 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2,
+ 2, 2, 1, 1, 1, 1, 0, 0 },
+ { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 8, 8,
+ 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2,
+ 2, 2, 2, 1, 1, 1, 1, 1 },
+ { 14, 14, 13, 13, 12, 12, 12, 12, 12, 12, 12, 12, 12, 11, 11, 10, 9, 9,
+ 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3,
+ 3, 2, 2, 2, 1, 1, 1, 1 },
+ { 15, 15, 14, 14, 13, 13, 13, 13, 13, 13, 12, 12, 12, 11, 11, 10, 10, 9,
+ 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3,
+ 3, 3, 2, 2, 2, 2, 1, 1 },
+ { 16, 15, 15, 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 11, 10,
+ 10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4,
+ 4, 3, 3, 3, 2, 2, 2, 2, 1 },
+ { 16, 16, 15, 15, 14, 14, 14, 14, 14, 14, 13, 13, 13, 12, 12, 11, 11,
+ 10, 10, 10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5,
+ 4, 4, 4, 3, 3, 3, 2, 2, 2, 2 },
+ { 16, 16, 16, 15, 15, 15, 14, 14, 14, 14, 13, 13, 13, 13, 12, 12, 12,
+ 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5,
+ 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 },
+ { 16, 16, 16, 16, 15, 15, 15, 15, 15, 14, 14, 13, 13, 13, 12, 12, 12,
+ 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5,
+ 5, 4, 4, 4, 4, 3, 3, 3, 3, 2 },
+ { 17, 17, 17, 17, 16, 16, 15, 15, 15, 15, 14, 14, 14, 14, 13, 13, 12,
+ 12, 12, 12, 11, 11, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6,
+ 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2 },
+ { 19, 19, 18, 18, 17, 17, 17, 17, 16, 16, 15, 15, 15, 15, 14, 14, 14,
+ 13, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 10, 9, 9, 8, 8, 8,
+ 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4 }
+};
+
+static const u8 rc_range_minqp444_12bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_12BPC_MAX_NUM_BPP] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 11, 10, 10, 9, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3,
+ 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 11, 11, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 6, 6, 6, 6, 6, 5, 5, 5,
+ 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 8, 7, 7, 7,
+ 7, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 13, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 9, 9, 8, 8,
+ 8, 8, 6, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 9, 9, 9, 9,
+ 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3,
+ 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 },
+ { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10,
+ 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4,
+ 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 },
+ { 13, 13, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 10, 10, 10, 10, 9, 9, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5,
+ 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0 },
+ { 14, 13, 13, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5,
+ 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 0 },
+ { 14, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12,
+ 12, 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7,
+ 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1 },
+ { 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 12, 12, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8,
+ 7, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1 },
+ { 14, 14, 14, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 12, 12, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8,
+ 7, 7, 7, 7, 6, 6, 6, 6, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1 },
+ { 17, 17, 17, 17, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8,
+ 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 1 },
+ { 22, 22, 21, 21, 20, 20, 20, 20, 19, 19, 18, 18, 18, 18, 17, 17, 17, 16, 16,
+ 16, 15, 15, 15, 15, 14, 14, 13, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 11,
+ 10, 10, 9, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3 }
+};
+
+static const u8 rc_range_maxqp444_12bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_12BPC_MAX_NUM_BPP] = {
+ { 12, 12, 12, 12, 12, 12, 11, 11, 11, 10, 9, 9, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4,
+ 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 14, 14, 13, 13, 12, 12, 12, 12, 12, 12, 11, 11, 9, 9, 9, 8, 8, 7, 7, 7, 7, 5,
+ 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 16, 15, 15, 14, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 11, 10, 10, 9, 9,
+ 9, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
+ 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 16, 16, 15, 15, 14, 14, 14, 14, 14, 14, 14, 14, 13, 13, 13, 12, 11, 11, 10,
+ 10, 10, 8, 8, 8, 8, 8, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 2,
+ 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 17, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 13, 12, 12, 11, 10,
+ 10, 10, 10, 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+ { 17, 16, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 14, 13, 12, 12, 11,
+ 11, 11, 11, 9, 9, 9, 9, 8, 8, 8, 8, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3,
+ 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0 },
+ { 17, 17, 16, 16, 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 14, 13, 12, 12, 11,
+ 11, 11, 11, 11, 10, 10, 10, 9, 9, 9, 8, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5,
+ 5, 5, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0 },
+ { 18, 18, 17, 17, 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 15, 14, 13, 13, 12,
+ 12, 12, 12, 11, 11, 11, 11, 10, 10, 10, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6,
+ 6, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1 },
+ { 19, 19, 18, 18, 17, 17, 17, 17, 17, 17, 16, 16, 16, 15, 15, 14, 14, 13, 13,
+ 13, 13, 13, 12, 12, 12, 12, 11, 11, 10, 9, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6,
+ 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1 },
+ { 20, 19, 19, 18, 18, 18, 17, 17, 17, 17, 17, 17, 17, 16, 16, 15, 14, 14, 13,
+ 13, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7,
+ 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 1 },
+ { 20, 20, 19, 19, 18, 18, 18, 18, 18, 18, 17, 17, 17, 16, 16, 15, 15, 14, 14,
+ 14, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9,
+ 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2 },
+ { 20, 20, 20, 19, 19, 19, 18, 18, 18, 18, 17, 17, 17, 17, 16, 16, 16, 15, 15,
+ 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9,
+ 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 },
+ { 20, 20, 20, 20, 19, 19, 19, 19, 19, 18, 18, 17, 17, 17, 16, 16, 16, 15, 15,
+ 15, 14, 14, 13, 13, 13, 13, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9,
+ 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2 },
+ { 21, 21, 21, 21, 20, 20, 19, 19, 19, 19, 18, 18, 18, 18, 17, 17, 16, 16, 16,
+ 16, 15, 15, 14, 14, 14, 14, 13, 13, 12, 12, 12, 12, 11, 11, 10, 10, 10, 10,
+ 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2 },
+ { 23, 23, 22, 22, 21, 21, 21, 21, 20, 20, 19, 19, 19, 19, 18, 18, 18, 17, 17,
+ 17, 16, 16, 16, 16, 15, 15, 14, 14, 14, 14, 14, 13, 13, 12, 12, 12, 12, 12,
+ 11, 11, 10, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4 }
+};
+
+#define PARAM_TABLE(_minmax, _bpc, _row, _col) do { \
+ if (bpc == (_bpc)) \
+ return rc_range_##_minmax##qp444_##_bpc##bpc[_row][_col]; \
+} while (0)
+
+u8 intel_lookup_range_min_qp(int bpc, int buf_i, int bpp_i)
+{
+ PARAM_TABLE(min, 8, buf_i, bpp_i);
+ PARAM_TABLE(min, 10, buf_i, bpp_i);
+ PARAM_TABLE(min, 12, buf_i, bpp_i);
+
+ MISSING_CASE(bpc);
+ return 0;
+}
+
+u8 intel_lookup_range_max_qp(int bpc, int buf_i, int bpp_i)
+{
+ PARAM_TABLE(max, 8, buf_i, bpp_i);
+ PARAM_TABLE(max, 10, buf_i, bpp_i);
+ PARAM_TABLE(max, 12, buf_i, bpp_i);
+
+ MISSING_CASE(bpc);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.h b/drivers/gpu/drm/i915/display/intel_qp_tables.h
new file mode 100644
index 000000000..9fb3c36bd
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _INTEL_QP_TABLES_H_
+#define _INTEL_QP_TABLES_H_
+
+#include <linux/types.h>
+
+u8 intel_lookup_range_min_qp(int bpc, int buf_i, int bpp_i);
+u8 intel_lookup_range_max_qp(int bpc, int buf_i, int bpp_i);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
new file mode 100644
index 000000000..a280448df
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/dmi.h>
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_quirks.h"
+
+static void intel_set_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk)
+{
+ i915->display.quirks.mask |= BIT(quirk);
+}
+
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_i915_private *i915)
+{
+ intel_set_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
+ drm_info(&i915->drm, "applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_i915_private *i915)
+{
+ intel_set_quirk(i915, QUIRK_INVERT_BRIGHTNESS);
+ drm_info(&i915->drm, "applying inverted panel brightness quirk\n");
+}
+
+/* Some VBT's incorrectly indicate no backlight is present */
+static void quirk_backlight_present(struct drm_i915_private *i915)
+{
+ intel_set_quirk(i915, QUIRK_BACKLIGHT_PRESENT);
+ drm_info(&i915->drm, "applying backlight present quirk\n");
+}
+
+/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
+ * which is 300 ms greater than eDP spec T12 min.
+ */
+static void quirk_increase_t12_delay(struct drm_i915_private *i915)
+{
+ intel_set_quirk(i915, QUIRK_INCREASE_T12_DELAY);
+ drm_info(&i915->drm, "Applying T12 delay quirk\n");
+}
+
+/*
+ * GeminiLake NUC HDMI outputs require additional off time
+ * this allows the onboard retimer to correctly sync to signal
+ */
+static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
+{
+ intel_set_quirk(i915, QUIRK_INCREASE_DDI_DISABLED_TIME);
+ drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n");
+}
+
+static void quirk_no_pps_backlight_power_hook(struct drm_i915_private *i915)
+{
+ intel_set_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK);
+ drm_info(&i915->drm, "Applying no pps backlight power quirk\n");
+}
+
+struct intel_quirk {
+ int device;
+ int subsystem_vendor;
+ int subsystem_device;
+ void (*hook)(struct drm_i915_private *i915);
+};
+
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+ void (*hook)(struct drm_i915_private *i915);
+ const struct dmi_system_id (*dmi_id_list)[];
+};
+
+static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+ DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
+ return 1;
+}
+
+static int intel_dmi_no_pps_backlight(const struct dmi_system_id *id)
+{
+ DRM_INFO("No pps backlight support on %s\n", id->ident);
+ return 1;
+}
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+ {
+ .dmi_id_list = &(const struct dmi_system_id[]) {
+ {
+ .callback = intel_dmi_reverse_brightness,
+ .ident = "NCR Corporation",
+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
+ },
+ },
+ {
+ .callback = intel_dmi_reverse_brightness,
+ .ident = "Thundersoft TST178 tablet",
+ /* DMI strings are too generic, also match on BIOS date */
+ .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BIOS_DATE, "04/15/2014"),
+ },
+ },
+ { } /* terminating entry */
+ },
+ .hook = quirk_invert_brightness,
+ },
+ {
+ .dmi_id_list = &(const struct dmi_system_id[]) {
+ {
+ .callback = intel_dmi_no_pps_backlight,
+ .ident = "Google Lillipup sku524294",
+ .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Google"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Lindar"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "sku524294"),
+ },
+ },
+ {
+ .callback = intel_dmi_no_pps_backlight,
+ .ident = "Google Lillipup sku524295",
+ .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Google"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Lindar"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "sku524295"),
+ },
+ },
+ { }
+ },
+ .hook = quirk_no_pps_backlight_power_hook,
+ },
+};
+
+static struct intel_quirk intel_quirks[] = {
+ /* Lenovo U160 cannot use SSC on LVDS */
+ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+ /* Sony Vaio Y cannot use SSC on LVDS */
+ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+ /* Acer Aspire 5734Z must invert backlight brightness */
+ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+ /* Acer/eMachines G725 */
+ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+ /* Acer/eMachines e725 */
+ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+ /* Acer Aspire 5336 */
+ { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
+ /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
+ { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+
+ /* Acer C720 Chromebook (Core i3 4005U) */
+ { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
+
+ /* Apple Macbook 2,1 (Core 2 T7400) */
+ { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
+
+ /* Apple Macbook 4,1 */
+ { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
+
+ /* Toshiba CB35 Chromebook (Celeron 2955U) */
+ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+ /* HP Chromebook 14 (Celeron 2955U) */
+ { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
+
+ /* Dell Chromebook 11 */
+ { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
+
+ /* Dell Chromebook 11 (2015 version) */
+ { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
+
+ /* Toshiba Satellite P50-C-18C */
+ { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
+
+ /* GeminiLake NUC */
+ { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+ /* ASRock ITX*/
+ { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ /* ECS Liva Q2 */
+ { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
+ /* HP Notebook - 14-r206nv */
+ { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
+};
+
+void intel_init_quirks(struct drm_i915_private *i915)
+{
+ struct pci_dev *d = to_pci_dev(i915->drm.dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+ struct intel_quirk *q = &intel_quirks[i];
+
+ if (d->device == q->device &&
+ (d->subsystem_vendor == q->subsystem_vendor ||
+ q->subsystem_vendor == PCI_ANY_ID) &&
+ (d->subsystem_device == q->subsystem_device ||
+ q->subsystem_device == PCI_ANY_ID))
+ q->hook(i915);
+ }
+ for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+ if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
+ intel_dmi_quirks[i].hook(i915);
+ }
+}
+
+bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk)
+{
+ return i915->display.quirks.mask & BIT(quirk);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
new file mode 100644
index 000000000..10a4d1631
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_quirks.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_QUIRKS_H__
+#define __INTEL_QUIRKS_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+enum intel_quirk_id {
+ QUIRK_BACKLIGHT_PRESENT,
+ QUIRK_INCREASE_DDI_DISABLED_TIME,
+ QUIRK_INCREASE_T12_DELAY,
+ QUIRK_INVERT_BRIGHTNESS,
+ QUIRK_LVDS_SSC_DISABLE,
+ QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK,
+};
+
+void intel_init_quirks(struct drm_i915_private *i915);
+bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk);
+
+#endif /* __INTEL_QUIRKS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
new file mode 100644
index 000000000..8294dddfd
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -0,0 +1,3446 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_connector.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_fifo_underrun.h"
+#include "intel_gmbus.h"
+#include "intel_hdmi.h"
+#include "intel_hotplug.h"
+#include "intel_panel.h"
+#include "intel_sdvo.h"
+#include "intel_sdvo_regs.h"
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+ SDVO_TV_MASK)
+
+#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
+#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
+
+
+static const char * const tv_format_names[] = {
+ "NTSC_M" , "NTSC_J" , "NTSC_443",
+ "PAL_B" , "PAL_D" , "PAL_G" ,
+ "PAL_H" , "PAL_I" , "PAL_M" ,
+ "PAL_N" , "PAL_NC" , "PAL_60" ,
+ "SECAM_B" , "SECAM_D" , "SECAM_G" ,
+ "SECAM_K" , "SECAM_K1", "SECAM_L" ,
+ "SECAM_60"
+};
+
+#define TV_FORMAT_NUM ARRAY_SIZE(tv_format_names)
+
+struct intel_sdvo {
+ struct intel_encoder base;
+
+ struct i2c_adapter *i2c;
+ u8 slave_addr;
+
+ struct i2c_adapter ddc;
+
+ /* Register for the SDVO device: SDVOB or SDVOC */
+ i915_reg_t sdvo_reg;
+
+ /* Active outputs controlled by this SDVO output */
+ u16 controlled_output;
+
+ /*
+ * Capabilities of the SDVO device returned by
+ * intel_sdvo_get_capabilities()
+ */
+ struct intel_sdvo_caps caps;
+
+ u8 colorimetry_cap;
+
+ /* Pixel clock limitations reported by the SDVO device, in kHz */
+ int pixel_clock_min, pixel_clock_max;
+
+ /*
+ * For multiple function SDVO device,
+ * this is for current attached outputs.
+ */
+ u16 attached_output;
+
+ /*
+ * Hotplug activation bits for this device
+ */
+ u16 hotplug_active;
+
+ enum port port;
+
+ bool has_hdmi_audio;
+
+ /* DDC bus used by this SDVO encoder */
+ u8 ddc_bus;
+
+ /*
+ * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd
+ */
+ u8 dtd_sdvo_flags;
+};
+
+struct intel_sdvo_connector {
+ struct intel_connector base;
+
+ /* Mark the type of connector */
+ u16 output_flag;
+
+ /* This contains all current supported TV format */
+ u8 tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format;
+
+ /* add the property for the SDVO-TV */
+ struct drm_property *left;
+ struct drm_property *right;
+ struct drm_property *top;
+ struct drm_property *bottom;
+ struct drm_property *hpos;
+ struct drm_property *vpos;
+ struct drm_property *contrast;
+ struct drm_property *saturation;
+ struct drm_property *hue;
+ struct drm_property *sharpness;
+ struct drm_property *flicker_filter;
+ struct drm_property *flicker_filter_adaptive;
+ struct drm_property *flicker_filter_2d;
+ struct drm_property *tv_chroma_filter;
+ struct drm_property *tv_luma_filter;
+ struct drm_property *dot_crawl;
+
+ /* add the property for the SDVO-TV/LVDS */
+ struct drm_property *brightness;
+
+ /* this is to get the range of margin.*/
+ u32 max_hscan, max_vscan;
+
+ /**
+ * This is set if we treat the device as HDMI, instead of DVI.
+ */
+ bool is_hdmi;
+};
+
+struct intel_sdvo_connector_state {
+ /* base.base: tv.saturation/contrast/hue/brightness */
+ struct intel_digital_connector_state base;
+
+ struct {
+ unsigned overscan_h, overscan_v, hpos, vpos, sharpness;
+ unsigned flicker_filter, flicker_filter_2d, flicker_filter_adaptive;
+ unsigned chroma_filter, luma_filter, dot_crawl;
+ } tv;
+};
+
+static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
+{
+ return container_of(encoder, struct intel_sdvo, base);
+}
+
+static struct intel_sdvo *intel_attached_sdvo(struct intel_connector *connector)
+{
+ return to_sdvo(intel_attached_encoder(connector));
+}
+
+static struct intel_sdvo_connector *
+to_intel_sdvo_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct intel_sdvo_connector, base.base);
+}
+
+#define to_intel_sdvo_connector_state(conn_state) \
+ container_of((conn_state), struct intel_sdvo_connector_state, base.base)
+
+static bool
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags);
+static bool
+intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type);
+static bool
+intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector);
+
+/*
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 bval = val, cval = val;
+ int i;
+
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val);
+ intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg);
+ /*
+ * HW workaround, need to write this twice for issue
+ * that may result in first write getting masked.
+ */
+ if (HAS_PCH_IBX(dev_priv)) {
+ intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val);
+ intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg);
+ }
+ return;
+ }
+
+ if (intel_sdvo->port == PORT_B)
+ cval = intel_de_read(dev_priv, GEN3_SDVOC);
+ else
+ bval = intel_de_read(dev_priv, GEN3_SDVOB);
+
+ /*
+ * Write the registers twice for luck. Sometimes,
+ * writing them only once doesn't appear to 'stick'.
+ * The BIOS does this too. Yay, magic
+ */
+ for (i = 0; i < 2; i++) {
+ intel_de_write(dev_priv, GEN3_SDVOB, bval);
+ intel_de_posting_read(dev_priv, GEN3_SDVOB);
+
+ intel_de_write(dev_priv, GEN3_SDVOC, cval);
+ intel_de_posting_read(dev_priv, GEN3_SDVOC);
+ }
+}
+
+static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = intel_sdvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &addr,
+ },
+ {
+ .addr = intel_sdvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = ch,
+ }
+ };
+ int ret;
+
+ if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
+ return true;
+
+ DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+ return false;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd_) { .cmd = SDVO_CMD_ ## cmd_, .name = #cmd_ }
+
+/** Mapping of command numbers to names, for debug output */
+static const struct {
+ u8 cmd;
+ const char *name;
+} __packed sdvo_cmd_names[] = {
+ SDVO_CMD_NAME_ENTRY(RESET),
+ SDVO_CMD_NAME_ENTRY(GET_DEVICE_CAPS),
+ SDVO_CMD_NAME_ENTRY(GET_FIRMWARE_REV),
+ SDVO_CMD_NAME_ENTRY(GET_TRAINED_INPUTS),
+ SDVO_CMD_NAME_ENTRY(GET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(GET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(GET_ATTACHED_DISPLAYS),
+ SDVO_CMD_NAME_ENTRY(GET_HOT_PLUG_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(GET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(GET_INTERRUPT_EVENT_SOURCE),
+ SDVO_CMD_NAME_ENTRY(SET_TARGET_INPUT),
+ SDVO_CMD_NAME_ENTRY(SET_TARGET_OUTPUT),
+ SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(CREATE_PREFERRED_INPUT_TIMING),
+ SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART1),
+ SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART2),
+ SDVO_CMD_NAME_ENTRY(GET_INPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(GET_OUTPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_CLOCK_RATE_MULTS),
+ SDVO_CMD_NAME_ENTRY(GET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_TV_FORMATS),
+ SDVO_CMD_NAME_ENTRY(GET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_POWER_STATES),
+ SDVO_CMD_NAME_ENTRY(GET_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SET_ENCODER_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SET_DISPLAY_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SET_CONTROL_BUS_SWITCH),
+ SDVO_CMD_NAME_ENTRY(GET_SDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_ENHANCEMENTS),
+
+ /* Add the op code for SDVO enhancements */
+ SDVO_CMD_NAME_ENTRY(GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SET_HUE),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SET_TV_LUMA_FILTER),
+
+ /* HDMI op code */
+ SDVO_CMD_NAME_ENTRY(GET_SUPP_ENCODE),
+ SDVO_CMD_NAME_ENTRY(GET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(GET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY_CAP),
+ SDVO_CMD_NAME_ENTRY(SET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(GET_AUDIO_ENCRYPT_PREFER),
+ SDVO_CMD_NAME_ENTRY(SET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(GET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_INFO),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_DATA),
+};
+
+#undef SDVO_CMD_NAME_ENTRY
+
+static const char *sdvo_cmd_name(u8 cmd)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+ if (cmd == sdvo_cmd_names[i].cmd)
+ return sdvo_cmd_names[i].name;
+ }
+
+ return NULL;
+}
+
+#define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC")
+
+static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
+ const char *cmd_name;
+ int i, pos = 0;
+ char buffer[64];
+
+#define BUF_PRINT(args...) \
+ pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
+
+ for (i = 0; i < args_len; i++) {
+ BUF_PRINT("%02X ", ((u8 *)args)[i]);
+ }
+ for (; i < 8; i++) {
+ BUF_PRINT(" ");
+ }
+
+ cmd_name = sdvo_cmd_name(cmd);
+ if (cmd_name)
+ BUF_PRINT("(%s)", cmd_name);
+ else
+ BUF_PRINT("(%02X)", cmd);
+
+ drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
+#undef BUF_PRINT
+
+ DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
+}
+
+static const char * const cmd_status_names[] = {
+ [SDVO_CMD_STATUS_POWER_ON] = "Power on",
+ [SDVO_CMD_STATUS_SUCCESS] = "Success",
+ [SDVO_CMD_STATUS_NOTSUPP] = "Not supported",
+ [SDVO_CMD_STATUS_INVALID_ARG] = "Invalid arg",
+ [SDVO_CMD_STATUS_PENDING] = "Pending",
+ [SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED] = "Target not specified",
+ [SDVO_CMD_STATUS_SCALING_NOT_SUPP] = "Scaling not supported",
+};
+
+static const char *sdvo_cmd_status(u8 status)
+{
+ if (status < ARRAY_SIZE(cmd_status_names))
+ return cmd_status_names[status];
+ else
+ return NULL;
+}
+
+static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len,
+ bool unlocked)
+{
+ u8 *buf, status;
+ struct i2c_msg *msgs;
+ int i, ret = true;
+
+ /* Would be simpler to allocate both in one go ? */
+ buf = kzalloc(args_len * 2 + 2, GFP_KERNEL);
+ if (!buf)
+ return false;
+
+ msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
+ if (!msgs) {
+ kfree(buf);
+ return false;
+ }
+
+ intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
+
+ for (i = 0; i < args_len; i++) {
+ msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2 *i;
+ buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+ buf[2*i + 1] = ((u8*)args)[i];
+ }
+ msgs[i].addr = intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2*i;
+ buf[2*i + 0] = SDVO_I2C_OPCODE;
+ buf[2*i + 1] = cmd;
+
+ /* the following two are to read the response */
+ status = SDVO_I2C_CMD_STATUS;
+ msgs[i+1].addr = intel_sdvo->slave_addr;
+ msgs[i+1].flags = 0;
+ msgs[i+1].len = 1;
+ msgs[i+1].buf = &status;
+
+ msgs[i+2].addr = intel_sdvo->slave_addr;
+ msgs[i+2].flags = I2C_M_RD;
+ msgs[i+2].len = 1;
+ msgs[i+2].buf = &status;
+
+ if (unlocked)
+ ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
+ else
+ ret = __i2c_transfer(intel_sdvo->i2c, msgs, i+3);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+ ret = false;
+ goto out;
+ }
+ if (ret != i+3) {
+ /* failure in I2C transfer */
+ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+ ret = false;
+ }
+
+out:
+ kfree(msgs);
+ kfree(buf);
+ return ret;
+}
+
+static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ return __intel_sdvo_write_cmd(intel_sdvo, cmd, args, args_len, true);
+}
+
+static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
+ void *response, int response_len)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
+ const char *cmd_status;
+ u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
+ u8 status;
+ int i, pos = 0;
+ char buffer[64];
+
+ buffer[0] = '\0';
+
+ /*
+ * The documentation states that all commands will be
+ * processed within 15µs, and that we need only poll
+ * the status byte a maximum of 3 times in order for the
+ * command to be complete.
+ *
+ * Check 5 times in case the hardware failed to read the docs.
+ *
+ * Also beware that the first response by many devices is to
+ * reply PENDING and stall for time. TVs are notorious for
+ * requiring longer than specified to complete their replies.
+ * Originally (in the DDX long ago), the delay was only ever 15ms
+ * with an additional delay of 30ms applied for TVs added later after
+ * many experiments. To accommodate both sets of delays, we do a
+ * sequence of slow checks if the device is falling behind and fails
+ * to reply within 5*15µs.
+ */
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+
+ while ((status == SDVO_CMD_STATUS_PENDING ||
+ status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
+ if (retry < 10)
+ msleep(15);
+ else
+ udelay(15);
+
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+ }
+
+#define BUF_PRINT(args...) \
+ pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
+
+ cmd_status = sdvo_cmd_status(status);
+ if (cmd_status)
+ BUF_PRINT("(%s)", cmd_status);
+ else
+ BUF_PRINT("(??? %d)", status);
+
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ goto log_fail;
+
+ /* Read the command response */
+ for (i = 0; i < response_len; i++) {
+ if (!intel_sdvo_read_byte(intel_sdvo,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]))
+ goto log_fail;
+ BUF_PRINT(" %02X", ((u8 *)response)[i]);
+ }
+
+ drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
+#undef BUF_PRINT
+
+ DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
+ return true;
+
+log_fail:
+ DRM_DEBUG_KMS("%s: R: ... failed %s\n",
+ SDVO_NAME(intel_sdvo), buffer);
+ return false;
+}
+
+static int intel_sdvo_get_pixel_multiplier(const struct drm_display_mode *adjusted_mode)
+{
+ if (adjusted_mode->crtc_clock >= 100000)
+ return 1;
+ else if (adjusted_mode->crtc_clock >= 50000)
+ return 2;
+ else
+ return 4;
+}
+
+static bool __intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+ u8 ddc_bus)
+{
+ /* This must be the immediately preceding write before the i2c xfer */
+ return __intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &ddc_bus, 1, false);
+}
+
+static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
+{
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+ return false;
+
+ return intel_sdvo_read_response(intel_sdvo, NULL, 0);
+}
+
+static bool
+intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len)
+{
+ if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0))
+ return false;
+
+ return intel_sdvo_read_response(intel_sdvo, value, len);
+}
+
+static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
+{
+ struct intel_sdvo_set_target_input_args targets = {0};
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TARGET_INPUT,
+ &targets, sizeof(targets));
+}
+
+/*
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2)
+{
+ struct intel_sdvo_get_trained_inputs_response response;
+
+ BUILD_BUG_ON(sizeof(response) != 1);
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+ &response, sizeof(response)))
+ return false;
+
+ *input_1 = response.input0_trained;
+ *input_2 = response.input1_trained;
+ return true;
+}
+
+static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
+ u16 outputs)
+{
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_ACTIVE_OUTPUTS,
+ &outputs, sizeof(outputs));
+}
+
+static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
+ u16 *outputs)
+{
+ return intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ACTIVE_OUTPUTS,
+ outputs, sizeof(*outputs));
+}
+
+static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
+ int mode)
+{
+ u8 state = SDVO_ENCODER_STATE_ON;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ state = SDVO_ENCODER_STATE_ON;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ state = SDVO_ENCODER_STATE_STANDBY;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ state = SDVO_ENCODER_STATE_SUSPEND;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ state = SDVO_ENCODER_STATE_OFF;
+ break;
+ }
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
+}
+
+static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo,
+ int *clock_min,
+ int *clock_max)
+{
+ struct intel_sdvo_pixel_clock_range clocks;
+
+ BUILD_BUG_ON(sizeof(clocks) != 4);
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ &clocks, sizeof(clocks)))
+ return false;
+
+ /* Convert the values from units of 10 kHz to kHz. */
+ *clock_min = clocks.min * 10;
+ *clock_max = clocks.max * 10;
+ return true;
+}
+
+static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo,
+ u16 outputs)
+{
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TARGET_OUTPUT,
+ &outputs, sizeof(outputs));
+}
+
+static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool intel_sdvo_get_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_get_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_get_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_set_timing(intel_sdvo,
+ SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_set_timing(intel_sdvo,
+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_get_timing(intel_sdvo,
+ SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool
+intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ const struct drm_display_mode *mode)
+{
+ struct intel_sdvo_preferred_input_timing_args args;
+
+ memset(&args, 0, sizeof(args));
+ args.clock = mode->clock / 10;
+ args.width = mode->hdisplay;
+ args.height = mode->vdisplay;
+ args.interlace = 0;
+
+ if (IS_LVDS(intel_sdvo_connector)) {
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(&intel_sdvo_connector->base, mode);
+
+ if (fixed_mode->hdisplay != args.width ||
+ fixed_mode->vdisplay != args.height)
+ args.scaled = 1;
+ }
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ &args, sizeof(args));
+}
+
+static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_dtd *dtd)
+{
+ BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+ BUILD_BUG_ON(sizeof(dtd->part2) != 8);
+ return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ &dtd->part1, sizeof(dtd->part1)) &&
+ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+}
+
+static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
+ const struct drm_display_mode *mode)
+{
+ u16 width, height;
+ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+ u16 h_sync_offset, v_sync_offset;
+ int mode_clock;
+
+ memset(dtd, 0, sizeof(*dtd));
+
+ width = mode->hdisplay;
+ height = mode->vdisplay;
+
+ /* do some mode translations */
+ h_blank_len = mode->htotal - mode->hdisplay;
+ h_sync_len = mode->hsync_end - mode->hsync_start;
+
+ v_blank_len = mode->vtotal - mode->vdisplay;
+ v_sync_len = mode->vsync_end - mode->vsync_start;
+
+ h_sync_offset = mode->hsync_start - mode->hdisplay;
+ v_sync_offset = mode->vsync_start - mode->vdisplay;
+
+ mode_clock = mode->clock;
+ mode_clock /= 10;
+ dtd->part1.clock = mode_clock;
+
+ dtd->part1.h_active = width & 0xff;
+ dtd->part1.h_blank = h_blank_len & 0xff;
+ dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
+ ((h_blank_len >> 8) & 0xf);
+ dtd->part1.v_active = height & 0xff;
+ dtd->part1.v_blank = v_blank_len & 0xff;
+ dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
+ ((v_blank_len >> 8) & 0xf);
+
+ dtd->part2.h_sync_off = h_sync_offset & 0xff;
+ dtd->part2.h_sync_width = h_sync_len & 0xff;
+ dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+ (v_sync_len & 0xf);
+ dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+ ((v_sync_len & 0x30) >> 4);
+
+ dtd->part2.dtd_flags = 0x18;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
+
+ dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+}
+
+static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode,
+ const struct intel_sdvo_dtd *dtd)
+{
+ struct drm_display_mode mode = {};
+
+ mode.hdisplay = dtd->part1.h_active;
+ mode.hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+ mode.hsync_start = mode.hdisplay + dtd->part2.h_sync_off;
+ mode.hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+ mode.hsync_end = mode.hsync_start + dtd->part2.h_sync_width;
+ mode.hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+ mode.htotal = mode.hdisplay + dtd->part1.h_blank;
+ mode.htotal += (dtd->part1.h_high & 0xf) << 8;
+
+ mode.vdisplay = dtd->part1.v_active;
+ mode.vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+ mode.vsync_start = mode.vdisplay;
+ mode.vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+ mode.vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+ mode.vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+ mode.vsync_end = mode.vsync_start +
+ (dtd->part2.v_sync_off_width & 0xf);
+ mode.vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+ mode.vtotal = mode.vdisplay + dtd->part1.v_blank;
+ mode.vtotal += (dtd->part1.v_high & 0xf) << 8;
+
+ mode.clock = dtd->part1.clock * 10;
+
+ if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+ mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
+ mode.flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ mode.flags |= DRM_MODE_FLAG_NHSYNC;
+ if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
+ mode.flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ mode.flags |= DRM_MODE_FLAG_NVSYNC;
+
+ drm_mode_set_crtcinfo(&mode, 0);
+
+ drm_mode_copy(pmode, &mode);
+}
+
+static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
+{
+ struct intel_sdvo_encode encode;
+
+ BUILD_BUG_ON(sizeof(encode) != 2);
+ return intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPP_ENCODE,
+ &encode, sizeof(encode));
+}
+
+static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
+ u8 mode)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
+}
+
+static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
+ u8 mode)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+}
+
+static bool intel_sdvo_set_pixel_replication(struct intel_sdvo *intel_sdvo,
+ u8 pixel_repeat)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_PIXEL_REPLI,
+ &pixel_repeat, 1);
+}
+
+static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
+ u8 audio_state)
+{
+ return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_AUDIO_STAT,
+ &audio_state, 1);
+}
+
+static bool intel_sdvo_get_hbuf_size(struct intel_sdvo *intel_sdvo,
+ u8 *hbuf_size)
+{
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+ hbuf_size, 1))
+ return false;
+
+ /* Buffer size is 0 based, hooray! However zero means zero. */
+ if (*hbuf_size)
+ (*hbuf_size)++;
+
+ return true;
+}
+
+#if 0
+static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
+{
+ int i, j;
+ u8 set_buf_index[2];
+ u8 av_split;
+ u8 buf_size;
+ u8 buf[48];
+ u8 *pos;
+
+ intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
+
+ for (i = 0; i <= av_split; i++) {
+ set_buf_index[0] = i; set_buf_index[1] = 0;
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2);
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+ intel_sdvo_read_response(encoder, &buf_size, 1);
+
+ pos = buf;
+ for (j = 0; j <= buf_size; j += 8) {
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
+ NULL, 0);
+ intel_sdvo_read_response(encoder, pos, 8);
+ pos += 8;
+ }
+ }
+}
+#endif
+
+static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
+ unsigned int if_index, u8 tx_rate,
+ const u8 *data, unsigned int length)
+{
+ u8 set_buf_index[2] = { if_index, 0 };
+ u8 hbuf_size, tmp[8];
+ int i;
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
+
+ if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
+ return false;
+
+ DRM_DEBUG_KMS("writing sdvo hbuf: %i, length %u, hbuf_size: %i\n",
+ if_index, length, hbuf_size);
+
+ if (hbuf_size < length)
+ return false;
+
+ for (i = 0; i < hbuf_size; i += 8) {
+ memset(tmp, 0, 8);
+ if (i < length)
+ memcpy(tmp, data + i, min_t(unsigned, 8, length - i));
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
+ tmp, 8))
+ return false;
+ }
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_TXRATE,
+ &tx_rate, 1);
+}
+
+static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
+ unsigned int if_index,
+ u8 *data, unsigned int length)
+{
+ u8 set_buf_index[2] = { if_index, 0 };
+ u8 hbuf_size, tx_rate, av_split;
+ int i;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_HBUF_AV_SPLIT,
+ &av_split, 1))
+ return -ENXIO;
+
+ if (av_split < if_index)
+ return 0;
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return -ENXIO;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_HBUF_TXRATE,
+ &tx_rate, 1))
+ return -ENXIO;
+
+ if (tx_rate == SDVO_HBUF_TX_DISABLED)
+ return 0;
+
+ if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
+ return false;
+
+ DRM_DEBUG_KMS("reading sdvo hbuf: %i, length %u, hbuf_size: %i\n",
+ if_index, length, hbuf_size);
+
+ hbuf_size = min_t(unsigned int, length, hbuf_size);
+
+ for (i = 0; i < hbuf_size; i += 8) {
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HBUF_DATA, NULL, 0))
+ return -ENXIO;
+ if (!intel_sdvo_read_response(intel_sdvo, &data[i],
+ min_t(unsigned int, 8, hbuf_size - i)))
+ return -ENXIO;
+ }
+
+ return hbuf_size;
+}
+
+static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
+ struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int ret;
+
+ if (!crtc_state->has_hdmi_sink)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(frame,
+ conn_state->connector,
+ adjusted_mode);
+ if (ret)
+ return false;
+
+ drm_hdmi_avi_infoframe_quant_range(frame,
+ conn_state->connector,
+ adjusted_mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+
+ ret = hdmi_avi_infoframe_check(frame);
+ if (drm_WARN_ON(&dev_priv->drm, ret))
+ return false;
+
+ return true;
+}
+
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
+ u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+ const union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
+ ssize_t len;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) == 0)
+ return true;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ frame->any.type != HDMI_INFOFRAME_TYPE_AVI))
+ return false;
+
+ len = hdmi_infoframe_pack_only(frame, sdvo_data, sizeof(sdvo_data));
+ if (drm_WARN_ON(&dev_priv->drm, len < 0))
+ return false;
+
+ return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
+ SDVO_HBUF_TX_VSYNC,
+ sdvo_data, len);
+}
+
+static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ struct intel_crtc_state *crtc_state)
+{
+ u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+ union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
+ ssize_t len;
+ int ret;
+
+ if (!crtc_state->has_hdmi_sink)
+ return;
+
+ len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
+ sdvo_data, sizeof(sdvo_data));
+ if (len < 0) {
+ DRM_DEBUG_KMS("failed to read AVI infoframe\n");
+ return;
+ } else if (len == 0) {
+ return;
+ }
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ ret = hdmi_infoframe_unpack(frame, sdvo_data, len);
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to unpack AVI infoframe\n");
+ return;
+ }
+
+ if (frame->any.type != HDMI_INFOFRAME_TYPE_AVI)
+ DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+ frame->any.type, HDMI_INFOFRAME_TYPE_AVI);
+}
+
+static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_sdvo_tv_format format;
+ u32 format_map;
+
+ format_map = 1 << conn_state->tv.mode;
+ memset(&format, 0, sizeof(format));
+ memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+
+ BUILD_BUG_ON(sizeof(format) != 6);
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_TV_FORMAT,
+ &format, sizeof(format));
+}
+
+static bool
+intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
+ const struct drm_display_mode *mode)
+{
+ struct intel_sdvo_dtd output_dtd;
+
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return false;
+
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+ return false;
+
+ return true;
+}
+
+/*
+ * Asks the sdvo controller for the preferred input mode given the output mode.
+ * Unfortunately we have to set up the full output mode to do that.
+ */
+static bool
+intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_sdvo_dtd input_dtd;
+
+ /* Reset the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return false;
+
+ if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
+ intel_sdvo_connector,
+ mode))
+ return false;
+
+ if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
+ &input_dtd))
+ return false;
+
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+ intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags;
+
+ return true;
+}
+
+static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(pipe_config->uapi.crtc->dev);
+ unsigned dotclock = pipe_config->port_clock;
+ struct dpll *clock = &pipe_config->dpll;
+
+ /*
+ * SDVO TV has fixed PLL values depend on its clock range,
+ * this mirrors vbios setting.
+ */
+ if (dotclock >= 100000 && dotclock < 140500) {
+ clock->p1 = 2;
+ clock->p2 = 10;
+ clock->n = 3;
+ clock->m1 = 16;
+ clock->m2 = 8;
+ } else if (dotclock >= 140500 && dotclock <= 200000) {
+ clock->p1 = 1;
+ clock->p2 = 10;
+ clock->n = 6;
+ clock->m1 = 12;
+ clock->m2 = 8;
+ } else {
+ drm_WARN(&dev_priv->drm, 1,
+ "SDVO TV clock out of range: %i\n", dotclock);
+ }
+
+ pipe_config->clock_set = true;
+}
+
+static bool intel_has_hdmi_sink(struct intel_sdvo_connector *intel_sdvo_connector,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_connector *connector = conn_state->connector;
+
+ return intel_sdvo_connector->is_hdmi &&
+ connector->display_info.is_hdmi &&
+ READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
+}
+
+static bool intel_sdvo_limited_color_range(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+
+ if ((intel_sdvo->colorimetry_cap & SDVO_COLORIMETRY_RGB220) == 0)
+ return false;
+
+ return intel_hdmi_limited_color_range(crtc_state, conn_state);
+}
+
+static int intel_sdvo_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+ struct intel_sdvo_connector_state *intel_sdvo_state =
+ to_intel_sdvo_connector_state(conn_state);
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(conn_state->connector);
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_display_mode *mode = &pipe_config->hw.mode;
+
+ DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
+ pipe_config->pipe_bpp = 8*3;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
+ if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
+ pipe_config->has_pch_encoder = true;
+
+ /*
+ * We need to construct preferred input timings based on our
+ * output timings. To do that, we have to set the output
+ * timings, even though this isn't really the right place in
+ * the sequence to do it. Oh well.
+ */
+ if (IS_TV(intel_sdvo_connector)) {
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
+ return -EINVAL;
+
+ (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ intel_sdvo_connector,
+ mode,
+ adjusted_mode);
+ pipe_config->sdvo_tv_clock = true;
+ } else if (IS_LVDS(intel_sdvo_connector)) {
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(&intel_sdvo_connector->base, mode);
+ int ret;
+
+ ret = intel_panel_compute_config(&intel_sdvo_connector->base,
+ adjusted_mode);
+ if (ret)
+ return ret;
+
+ if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, fixed_mode))
+ return -EINVAL;
+
+ (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ intel_sdvo_connector,
+ mode,
+ adjusted_mode);
+ }
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return -EINVAL;
+
+ /*
+ * Make the CRTC code factor in the SDVO pixel multiplier. The
+ * SDVO device will factor out the multiplier during mode_set.
+ */
+ pipe_config->pixel_multiplier =
+ intel_sdvo_get_pixel_multiplier(adjusted_mode);
+
+ pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, conn_state);
+
+ if (pipe_config->has_hdmi_sink) {
+ if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO)
+ pipe_config->has_audio = intel_sdvo->has_hdmi_audio;
+ else
+ pipe_config->has_audio =
+ intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON;
+ }
+
+ pipe_config->limited_color_range =
+ intel_sdvo_limited_color_range(encoder, pipe_config,
+ conn_state);
+
+ /* Clock computation needs to happen after pixel multiplier. */
+ if (IS_TV(intel_sdvo_connector))
+ i9xx_adjust_sdvo_tv_clock(pipe_config);
+
+ if (conn_state->picture_aspect_ratio)
+ adjusted_mode->picture_aspect_ratio =
+ conn_state->picture_aspect_ratio;
+
+ if (!intel_sdvo_compute_avi_infoframe(intel_sdvo,
+ pipe_config, conn_state)) {
+ DRM_DEBUG_KMS("bad AVI infoframe\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define UPDATE_PROPERTY(input, NAME) \
+ do { \
+ val = input; \
+ intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_##NAME, &val, sizeof(val)); \
+ } while (0)
+
+static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
+ const struct intel_sdvo_connector_state *sdvo_state)
+{
+ const struct drm_connector_state *conn_state = &sdvo_state->base.base;
+ struct intel_sdvo_connector *intel_sdvo_conn =
+ to_intel_sdvo_connector(conn_state->connector);
+ u16 val;
+
+ if (intel_sdvo_conn->left)
+ UPDATE_PROPERTY(sdvo_state->tv.overscan_h, OVERSCAN_H);
+
+ if (intel_sdvo_conn->top)
+ UPDATE_PROPERTY(sdvo_state->tv.overscan_v, OVERSCAN_V);
+
+ if (intel_sdvo_conn->hpos)
+ UPDATE_PROPERTY(sdvo_state->tv.hpos, HPOS);
+
+ if (intel_sdvo_conn->vpos)
+ UPDATE_PROPERTY(sdvo_state->tv.vpos, VPOS);
+
+ if (intel_sdvo_conn->saturation)
+ UPDATE_PROPERTY(conn_state->tv.saturation, SATURATION);
+
+ if (intel_sdvo_conn->contrast)
+ UPDATE_PROPERTY(conn_state->tv.contrast, CONTRAST);
+
+ if (intel_sdvo_conn->hue)
+ UPDATE_PROPERTY(conn_state->tv.hue, HUE);
+
+ if (intel_sdvo_conn->brightness)
+ UPDATE_PROPERTY(conn_state->tv.brightness, BRIGHTNESS);
+
+ if (intel_sdvo_conn->sharpness)
+ UPDATE_PROPERTY(sdvo_state->tv.sharpness, SHARPNESS);
+
+ if (intel_sdvo_conn->flicker_filter)
+ UPDATE_PROPERTY(sdvo_state->tv.flicker_filter, FLICKER_FILTER);
+
+ if (intel_sdvo_conn->flicker_filter_2d)
+ UPDATE_PROPERTY(sdvo_state->tv.flicker_filter_2d, FLICKER_FILTER_2D);
+
+ if (intel_sdvo_conn->flicker_filter_adaptive)
+ UPDATE_PROPERTY(sdvo_state->tv.flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+
+ if (intel_sdvo_conn->tv_chroma_filter)
+ UPDATE_PROPERTY(sdvo_state->tv.chroma_filter, TV_CHROMA_FILTER);
+
+ if (intel_sdvo_conn->tv_luma_filter)
+ UPDATE_PROPERTY(sdvo_state->tv.luma_filter, TV_LUMA_FILTER);
+
+ if (intel_sdvo_conn->dot_crawl)
+ UPDATE_PROPERTY(sdvo_state->tv.dot_crawl, DOT_CRAWL);
+
+#undef UPDATE_PROPERTY
+}
+
+static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *intel_encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ const struct intel_sdvo_connector_state *sdvo_state =
+ to_intel_sdvo_connector_state(conn_state);
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(conn_state->connector);
+ const struct drm_display_mode *mode = &crtc_state->hw.mode;
+ struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
+ u32 sdvox;
+ struct intel_sdvo_in_out_map in_out;
+ struct intel_sdvo_dtd input_dtd, output_dtd;
+ int rate;
+
+ intel_sdvo_update_props(intel_sdvo, sdvo_state);
+
+ /*
+ * First, set the input mapping for the first input to our controlled
+ * output. This is only correct if we're a single-input device, in
+ * which case the first input is the output from the appropriate SDVO
+ * channel on the motherboard. In a two-input device, the first input
+ * will be SDVOB and the second SDVOC.
+ */
+ in_out.in0 = intel_sdvo->attached_output;
+ in_out.in1 = 0;
+
+ intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_IN_OUT_MAP,
+ &in_out, sizeof(in_out));
+
+ /* Set the output timings to the screen */
+ if (!intel_sdvo_set_target_output(intel_sdvo,
+ intel_sdvo->attached_output))
+ return;
+
+ /* lvds has a special fixed output timing. */
+ if (IS_LVDS(intel_sdvo_connector)) {
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_fixed_mode(&intel_sdvo_connector->base, mode);
+
+ intel_sdvo_get_dtd_from_mode(&output_dtd, fixed_mode);
+ } else {
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ }
+ if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+ drm_info(&dev_priv->drm,
+ "Setting output timings on %s failed\n",
+ SDVO_NAME(intel_sdvo));
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ return;
+
+ if (crtc_state->has_hdmi_sink) {
+ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_sdvo,
+ crtc_state->limited_color_range ?
+ SDVO_COLORIMETRY_RGB220 :
+ SDVO_COLORIMETRY_RGB256);
+ intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
+ intel_sdvo_set_pixel_replication(intel_sdvo,
+ !!(adjusted_mode->flags &
+ DRM_MODE_FLAG_DBLCLK));
+ } else
+ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
+
+ if (IS_TV(intel_sdvo_connector) &&
+ !intel_sdvo_set_tv_format(intel_sdvo, conn_state))
+ return;
+
+ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+
+ if (IS_TV(intel_sdvo_connector) || IS_LVDS(intel_sdvo_connector))
+ input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
+ if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
+ drm_info(&dev_priv->drm,
+ "Setting input timings on %s failed\n",
+ SDVO_NAME(intel_sdvo));
+
+ switch (crtc_state->pixel_multiplier) {
+ default:
+ drm_WARN(&dev_priv->drm, 1,
+ "unknown pixel multiplier specified\n");
+ fallthrough;
+ case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+ case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+ case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+ }
+ if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate))
+ return;
+
+ /* Set the SDVO control regs. */
+ if (DISPLAY_VER(dev_priv) >= 4) {
+ /* The real mode polarity is set by the SDVO commands, using
+ * struct intel_sdvo_dtd. */
+ sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
+ if (DISPLAY_VER(dev_priv) < 5)
+ sdvox |= SDVO_BORDER_ENABLE;
+ } else {
+ sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
+ if (intel_sdvo->port == PORT_B)
+ sdvox &= SDVOB_PRESERVE_MASK;
+ else
+ sdvox &= SDVOC_PRESERVE_MASK;
+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+ }
+
+ if (HAS_PCH_CPT(dev_priv))
+ sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
+ else
+ sdvox |= SDVO_PIPE_SEL(crtc->pipe);
+
+ if (DISPLAY_VER(dev_priv) >= 4) {
+ /* done in crtc_mode_set as the dpll_md reg must be written early */
+ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
+ /* done in crtc_mode_set as it lives inside the dpll register */
+ } else {
+ sdvox |= (crtc_state->pixel_multiplier - 1)
+ << SDVO_PORT_MULTIPLY_SHIFT;
+ }
+
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
+ DISPLAY_VER(dev_priv) < 5)
+ sdvox |= SDVO_STALL_SELECT;
+ intel_sdvo_write_sdvox(intel_sdvo, sdvox);
+}
+
+static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
+{
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(&connector->base);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+ u16 active_outputs = 0;
+
+ intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+ return active_outputs & intel_sdvo_connector->output_flag;
+}
+
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t sdvo_reg, enum pipe *pipe)
+{
+ u32 val;
+
+ val = intel_de_read(dev_priv, sdvo_reg);
+
+ /* asserts want to know the pipe even if the port is disabled */
+ if (HAS_PCH_CPT(dev_priv))
+ *pipe = (val & SDVO_PIPE_SEL_MASK_CPT) >> SDVO_PIPE_SEL_SHIFT_CPT;
+ else if (IS_CHERRYVIEW(dev_priv))
+ *pipe = (val & SDVO_PIPE_SEL_MASK_CHV) >> SDVO_PIPE_SEL_SHIFT_CHV;
+ else
+ *pipe = (val & SDVO_PIPE_SEL_MASK) >> SDVO_PIPE_SEL_SHIFT;
+
+ return val & SDVO_ENABLE;
+}
+
+static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+ u16 active_outputs = 0;
+ bool ret;
+
+ intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+ ret = intel_sdvo_port_enabled(dev_priv, intel_sdvo->sdvo_reg, pipe);
+
+ return ret || active_outputs;
+}
+
+static void intel_sdvo_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+ struct intel_sdvo_dtd dtd;
+ int encoder_pixel_multiplier = 0;
+ int dotclock;
+ u32 flags = 0, sdvox;
+ u8 val;
+ bool ret;
+
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_SDVO);
+
+ sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
+
+ ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
+ if (!ret) {
+ /*
+ * Some sdvo encoders are not spec compliant and don't
+ * implement the mandatory get_timings function.
+ */
+ drm_dbg(&dev_priv->drm, "failed to retrieve SDVO DTD\n");
+ pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
+ } else {
+ if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dtd.part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ }
+
+ pipe_config->hw.adjusted_mode.flags |= flags;
+
+ /*
+ * pixel multiplier readout is tricky: Only on i915g/gm it is stored in
+ * the sdvo port register, on all other platforms it is part of the dpll
+ * state. Since the general pipe state readout happens before the
+ * encoder->get_config we so already have a valid pixel multplier on all
+ * other platfroms.
+ */
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
+ pipe_config->pixel_multiplier =
+ ((sdvox & SDVO_PORT_MULTIPLY_MASK)
+ >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
+ }
+
+ dotclock = pipe_config->port_clock;
+
+ if (pipe_config->pixel_multiplier)
+ dotclock /= pipe_config->pixel_multiplier;
+
+ pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
+
+ /* Cross check the port pixel multiplier with the sdvo encoder state. */
+ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
+ &val, 1)) {
+ switch (val) {
+ case SDVO_CLOCK_RATE_MULT_1X:
+ encoder_pixel_multiplier = 1;
+ break;
+ case SDVO_CLOCK_RATE_MULT_2X:
+ encoder_pixel_multiplier = 2;
+ break;
+ case SDVO_CLOCK_RATE_MULT_4X:
+ encoder_pixel_multiplier = 4;
+ break;
+ }
+ }
+
+ drm_WARN(dev,
+ encoder_pixel_multiplier != pipe_config->pixel_multiplier,
+ "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
+ pipe_config->pixel_multiplier, encoder_pixel_multiplier);
+
+ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_COLORIMETRY,
+ &val, 1)) {
+ if (val == SDVO_COLORIMETRY_RGB220)
+ pipe_config->limited_color_range = true;
+ }
+
+ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT,
+ &val, 1)) {
+ u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT;
+
+ if ((val & mask) == mask)
+ pipe_config->has_audio = true;
+ }
+
+ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
+ &val, 1)) {
+ if (val == SDVO_ENCODE_HDMI)
+ pipe_config->has_hdmi_sink = true;
+ }
+
+ intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config);
+}
+
+static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo)
+{
+ intel_sdvo_set_audio_state(intel_sdvo, 0);
+}
+
+static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ struct drm_connector *connector = conn_state->connector;
+ u8 *eld = connector->eld;
+
+ eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+
+ intel_sdvo_set_audio_state(intel_sdvo, 0);
+
+ intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
+ SDVO_HBUF_TX_DISABLED,
+ eld, drm_eld_size(eld));
+
+ intel_sdvo_set_audio_state(intel_sdvo, SDVO_AUDIO_ELD_VALID |
+ SDVO_AUDIO_PRESENCE_DETECT);
+}
+
+static void intel_disable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ u32 temp;
+
+ if (old_crtc_state->has_audio)
+ intel_sdvo_disable_audio(intel_sdvo);
+
+ intel_sdvo_set_active_outputs(intel_sdvo, 0);
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_OFF);
+
+ temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
+
+ temp &= ~SDVO_ENABLE;
+ intel_sdvo_write_sdvox(intel_sdvo, temp);
+
+ /*
+ * HW workaround for IBX, we need to move the port
+ * to transcoder A after disabling it to allow the
+ * matching DP port to be enabled on transcoder A.
+ */
+ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+ /*
+ * We get CPU/PCH FIFO underruns on the other pipe when
+ * doing the workaround. Sweep them under the rug.
+ */
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+
+ temp &= ~SDVO_PIPE_SEL_MASK;
+ temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
+ intel_sdvo_write_sdvox(intel_sdvo, temp);
+
+ temp &= ~SDVO_ENABLE;
+ intel_sdvo_write_sdvox(intel_sdvo, temp);
+
+ intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ }
+}
+
+static void pch_disable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+}
+
+static void pch_post_disable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_disable_sdvo(state, encoder, old_crtc_state, old_conn_state);
+}
+
+static void intel_enable_sdvo(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ u32 temp;
+ bool input1, input2;
+ int i;
+ bool success;
+
+ temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
+ temp |= SDVO_ENABLE;
+ intel_sdvo_write_sdvox(intel_sdvo, temp);
+
+ for (i = 0; i < 2; i++)
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
+ /*
+ * Warn if the device reported failure to sync.
+ *
+ * A lot of SDVO devices fail to notify of sync, but it's
+ * a given it the status is a success, we succeeded.
+ */
+ if (success && !input1) {
+ drm_dbg_kms(&dev_priv->drm,
+ "First %s output reported failure to "
+ "sync\n", SDVO_NAME(intel_sdvo));
+ }
+
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_sdvo,
+ DRM_MODE_DPMS_ON);
+ intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+
+ if (pipe_config->has_audio)
+ intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state);
+}
+
+static enum drm_mode_status
+intel_sdvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(connector);
+ bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, connector->state);
+ int max_dotclk = i915->max_dotclk_freq;
+ enum drm_mode_status status;
+ int clock = mode->clock;
+
+ status = intel_cpu_transcoder_mode_valid(i915, mode);
+ if (status != MODE_OK)
+ return status;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ if (!has_hdmi_sink)
+ return MODE_CLOCK_LOW;
+ clock *= 2;
+ }
+
+ if (intel_sdvo->pixel_clock_min > clock)
+ return MODE_CLOCK_LOW;
+
+ if (intel_sdvo->pixel_clock_max < clock)
+ return MODE_CLOCK_HIGH;
+
+ if (IS_LVDS(intel_sdvo_connector)) {
+ enum drm_mode_status status;
+
+ status = intel_panel_mode_valid(&intel_sdvo_connector->base, mode);
+ if (status != MODE_OK)
+ return status;
+ }
+
+ return MODE_OK;
+}
+
+static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
+{
+ BUILD_BUG_ON(sizeof(*caps) != 8);
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_DEVICE_CAPS,
+ caps, sizeof(*caps)))
+ return false;
+
+ DRM_DEBUG_KMS("SDVO capabilities:\n"
+ " vendor_id: %d\n"
+ " device_id: %d\n"
+ " device_rev_id: %d\n"
+ " sdvo_version_major: %d\n"
+ " sdvo_version_minor: %d\n"
+ " sdvo_inputs_mask: %d\n"
+ " smooth_scaling: %d\n"
+ " sharp_scaling: %d\n"
+ " up_scaling: %d\n"
+ " down_scaling: %d\n"
+ " stall_support: %d\n"
+ " output_flags: %d\n",
+ caps->vendor_id,
+ caps->device_id,
+ caps->device_rev_id,
+ caps->sdvo_version_major,
+ caps->sdvo_version_minor,
+ caps->sdvo_inputs_mask,
+ caps->smooth_scaling,
+ caps->sharp_scaling,
+ caps->up_scaling,
+ caps->down_scaling,
+ caps->stall_support,
+ caps->output_flags);
+
+ return true;
+}
+
+static u8 intel_sdvo_get_colorimetry_cap(struct intel_sdvo *intel_sdvo)
+{
+ u8 cap;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_COLORIMETRY_CAP,
+ &cap, sizeof(cap)))
+ return SDVO_COLORIMETRY_RGB256;
+
+ return cap;
+}
+
+static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev);
+ u16 hotplug;
+
+ if (!I915_HAS_HOTPLUG(dev_priv))
+ return 0;
+
+ /*
+ * HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
+ * on the line.
+ */
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
+ return 0;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &hotplug, sizeof(hotplug)))
+ return 0;
+
+ return hotplug;
+}
+
+static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
+{
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+ &intel_sdvo->hotplug_active, 2);
+}
+
+static enum intel_hotplug_state
+intel_sdvo_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector)
+{
+ intel_sdvo_enable_hotplug(encoder);
+
+ return intel_encoder_hotplug(encoder, connector);
+}
+
+static bool
+intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
+{
+ /* Is there more than one type of output? */
+ return hweight16(intel_sdvo->caps.output_flags) > 1;
+}
+
+static struct edid *
+intel_sdvo_get_edid(struct drm_connector *connector)
+{
+ struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
+ return drm_get_edid(connector, &sdvo->ddc);
+}
+
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+
+ return drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ dev_priv->display.vbt.crt_ddc_pin));
+}
+
+static enum drm_connector_status
+intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
+ struct intel_sdvo_connector *intel_sdvo_connector =
+ to_intel_sdvo_connector(connector);
+ enum drm_connector_status status;
+ struct edid *edid;
+
+ edid = intel_sdvo_get_edid(connector);
+
+ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
+ u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
+
+ /*
+ * Don't use the 1 as the argument of DDC bus switch to get
+ * the EDID. It is used for SDVO SPD ROM.
+ */
+ for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+ intel_sdvo->ddc_bus = ddc;
+ edid = intel_sdvo_get_edid(connector);
+ if (edid)
+ break;
+ }
+ /*
+ * If we found the EDID on the other bus,
+ * assume that is the correct DDC bus.
+ */
+ if (edid == NULL)
+ intel_sdvo->ddc_bus = saved_ddc;
+ }
+
+ /*
+ * When there is no edid and no monitor is connected with VGA
+ * port, try to use the CRT ddc to read the EDID for DVI-connector.
+ */
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+
+ status = connector_status_unknown;
+ if (edid != NULL) {
+ /* DDC bus is shared, match EDID to connector type */
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ if (intel_sdvo_connector->is_hdmi) {
+ intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ }
+ } else
+ status = connector_status_disconnected;
+ kfree(edid);
+ }
+
+ return status;
+}
+
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+ struct edid *edid)
+{
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+ DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+ connector_is_digital, monitor_is_digital);
+ return connector_is_digital == monitor_is_digital;
+}
+
+static enum drm_connector_status
+intel_sdvo_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ enum drm_connector_status ret;
+ u16 response;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+ if (!INTEL_DISPLAY_ENABLED(i915))
+ return connector_status_disconnected;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS,
+ &response, 2))
+ return connector_status_unknown;
+
+ DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+ response & 0xff, response >> 8,
+ intel_sdvo_connector->output_flag);
+
+ if (response == 0)
+ return connector_status_disconnected;
+
+ intel_sdvo->attached_output = response;
+
+ intel_sdvo->has_hdmi_audio = false;
+
+ if ((intel_sdvo_connector->output_flag & response) == 0)
+ ret = connector_status_disconnected;
+ else if (IS_TMDS(intel_sdvo_connector))
+ ret = intel_sdvo_tmds_sink_detect(connector);
+ else {
+ struct edid *edid;
+
+ /* if we have an edid check it matches the connection */
+ edid = intel_sdvo_get_edid(connector);
+ if (edid == NULL)
+ edid = intel_sdvo_get_analog_edid(connector);
+ if (edid != NULL) {
+ if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+ edid))
+ ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+
+ kfree(edid);
+ } else
+ ret = connector_status_connected;
+ }
+
+ return ret;
+}
+
+static int intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+{
+ int num_modes = 0;
+ struct edid *edid;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+ /* set the bus switch and get the modes */
+ edid = intel_sdvo_get_edid(connector);
+
+ /*
+ * Mac mini hack. On this device, the DVI-I connector shares one DDC
+ * link between analog and digital outputs. So, if the regular SDVO
+ * DDC fails, check to see if the analog output is disconnected, in
+ * which case we'll look there for the digital DDC data.
+ */
+ if (!edid)
+ edid = intel_sdvo_get_analog_edid(connector);
+
+ if (!edid)
+ return 0;
+
+ if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+ edid))
+ num_modes += intel_connector_update_modes(connector, edid);
+
+ kfree(edid);
+
+ return num_modes;
+}
+
+/*
+ * Set of SDVO TV modes.
+ * Note! This is in reply order (see loop in get_tv_modes).
+ * XXX: all 60Hz refresh?
+ */
+static const struct drm_display_mode sdvo_tv_modes[] = {
+ { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
+ 416, 0, 200, 201, 232, 233, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
+ 416, 0, 240, 241, 272, 273, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
+ 496, 0, 300, 301, 332, 333, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
+ 736, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
+ 736, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
+ 736, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
+ 800, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
+ 800, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
+ 816, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
+ 816, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
+ 816, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
+ 816, 0, 540, 541, 572, 573, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
+ 816, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
+ 864, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
+ 896, 0, 600, 601, 632, 633, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
+ 928, 0, 624, 625, 656, 657, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
+ 1016, 0, 766, 767, 798, 799, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
+ 1120, 0, 768, 769, 800, 801, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
+ 1376, 0, 1024, 1025, 1056, 1057, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
+ const struct drm_connector_state *conn_state = connector->state;
+ struct intel_sdvo_sdtv_resolution_request tv_res;
+ u32 reply = 0, format_map = 0;
+ int num_modes = 0;
+ int i;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+ /*
+ * Read the list of supported input resolutions for the selected TV
+ * format.
+ */
+ format_map = 1 << conn_state->tv.mode;
+ memcpy(&tv_res, &format_map,
+ min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
+
+ if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
+ return 0;
+
+ BUILD_BUG_ON(sizeof(tv_res) != 3);
+ if (!intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ &tv_res, sizeof(tv_res)))
+ return 0;
+ if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) {
+ if (reply & (1 << i)) {
+ struct drm_display_mode *nmode;
+ nmode = drm_mode_duplicate(connector->dev,
+ &sdvo_tv_modes[i]);
+ if (nmode) {
+ drm_mode_probed_add(connector, nmode);
+ num_modes++;
+ }
+ }
+ }
+
+ return num_modes;
+}
+
+static int intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+{
+ struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ int num_modes = 0;
+
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+ num_modes += intel_panel_get_modes(to_intel_connector(connector));
+ num_modes += intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+
+ return num_modes;
+}
+
+static int intel_sdvo_get_modes(struct drm_connector *connector)
+{
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+
+ if (IS_TV(intel_sdvo_connector))
+ return intel_sdvo_get_tv_modes(connector);
+ else if (IS_LVDS(intel_sdvo_connector))
+ return intel_sdvo_get_lvds_modes(connector);
+ else
+ return intel_sdvo_get_ddc_modes(connector);
+}
+
+static int
+intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ u64 *val)
+{
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ const struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state((void *)state);
+
+ if (property == intel_sdvo_connector->tv_format) {
+ int i;
+
+ for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
+ if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) {
+ *val = i;
+
+ return 0;
+ }
+
+ drm_WARN_ON(connector->dev, 1);
+ *val = 0;
+ } else if (property == intel_sdvo_connector->top ||
+ property == intel_sdvo_connector->bottom)
+ *val = intel_sdvo_connector->max_vscan - sdvo_state->tv.overscan_v;
+ else if (property == intel_sdvo_connector->left ||
+ property == intel_sdvo_connector->right)
+ *val = intel_sdvo_connector->max_hscan - sdvo_state->tv.overscan_h;
+ else if (property == intel_sdvo_connector->hpos)
+ *val = sdvo_state->tv.hpos;
+ else if (property == intel_sdvo_connector->vpos)
+ *val = sdvo_state->tv.vpos;
+ else if (property == intel_sdvo_connector->saturation)
+ *val = state->tv.saturation;
+ else if (property == intel_sdvo_connector->contrast)
+ *val = state->tv.contrast;
+ else if (property == intel_sdvo_connector->hue)
+ *val = state->tv.hue;
+ else if (property == intel_sdvo_connector->brightness)
+ *val = state->tv.brightness;
+ else if (property == intel_sdvo_connector->sharpness)
+ *val = sdvo_state->tv.sharpness;
+ else if (property == intel_sdvo_connector->flicker_filter)
+ *val = sdvo_state->tv.flicker_filter;
+ else if (property == intel_sdvo_connector->flicker_filter_2d)
+ *val = sdvo_state->tv.flicker_filter_2d;
+ else if (property == intel_sdvo_connector->flicker_filter_adaptive)
+ *val = sdvo_state->tv.flicker_filter_adaptive;
+ else if (property == intel_sdvo_connector->tv_chroma_filter)
+ *val = sdvo_state->tv.chroma_filter;
+ else if (property == intel_sdvo_connector->tv_luma_filter)
+ *val = sdvo_state->tv.luma_filter;
+ else if (property == intel_sdvo_connector->dot_crawl)
+ *val = sdvo_state->tv.dot_crawl;
+ else
+ return intel_digital_connector_atomic_get_property(connector, state, property, val);
+
+ return 0;
+}
+
+static int
+intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ u64 val)
+{
+ struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
+
+ if (property == intel_sdvo_connector->tv_format) {
+ state->tv.mode = intel_sdvo_connector->tv_format_supported[val];
+
+ if (state->crtc) {
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state->state, state->crtc);
+
+ crtc_state->connectors_changed = true;
+ }
+ } else if (property == intel_sdvo_connector->top ||
+ property == intel_sdvo_connector->bottom)
+ /* Cannot set these independent from each other */
+ sdvo_state->tv.overscan_v = intel_sdvo_connector->max_vscan - val;
+ else if (property == intel_sdvo_connector->left ||
+ property == intel_sdvo_connector->right)
+ /* Cannot set these independent from each other */
+ sdvo_state->tv.overscan_h = intel_sdvo_connector->max_hscan - val;
+ else if (property == intel_sdvo_connector->hpos)
+ sdvo_state->tv.hpos = val;
+ else if (property == intel_sdvo_connector->vpos)
+ sdvo_state->tv.vpos = val;
+ else if (property == intel_sdvo_connector->saturation)
+ state->tv.saturation = val;
+ else if (property == intel_sdvo_connector->contrast)
+ state->tv.contrast = val;
+ else if (property == intel_sdvo_connector->hue)
+ state->tv.hue = val;
+ else if (property == intel_sdvo_connector->brightness)
+ state->tv.brightness = val;
+ else if (property == intel_sdvo_connector->sharpness)
+ sdvo_state->tv.sharpness = val;
+ else if (property == intel_sdvo_connector->flicker_filter)
+ sdvo_state->tv.flicker_filter = val;
+ else if (property == intel_sdvo_connector->flicker_filter_2d)
+ sdvo_state->tv.flicker_filter_2d = val;
+ else if (property == intel_sdvo_connector->flicker_filter_adaptive)
+ sdvo_state->tv.flicker_filter_adaptive = val;
+ else if (property == intel_sdvo_connector->tv_chroma_filter)
+ sdvo_state->tv.chroma_filter = val;
+ else if (property == intel_sdvo_connector->tv_luma_filter)
+ sdvo_state->tv.luma_filter = val;
+ else if (property == intel_sdvo_connector->dot_crawl)
+ sdvo_state->tv.dot_crawl = val;
+ else
+ return intel_digital_connector_atomic_set_property(connector, state, property, val);
+
+ return 0;
+}
+
+static int
+intel_sdvo_connector_register(struct drm_connector *connector)
+{
+ struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
+ int ret;
+
+ ret = intel_connector_register(connector);
+ if (ret)
+ return ret;
+
+ return sysfs_create_link(&connector->kdev->kobj,
+ &sdvo->ddc.dev.kobj,
+ sdvo->ddc.dev.kobj.name);
+}
+
+static void
+intel_sdvo_connector_unregister(struct drm_connector *connector)
+{
+ struct intel_sdvo *sdvo = intel_attached_sdvo(to_intel_connector(connector));
+
+ sysfs_remove_link(&connector->kdev->kobj,
+ sdvo->ddc.dev.kobj.name);
+ intel_connector_unregister(connector);
+}
+
+static struct drm_connector_state *
+intel_sdvo_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct intel_sdvo_connector_state *state;
+
+ state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector, &state->base.base);
+ return &state->base.base;
+}
+
+static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
+ .detect = intel_sdvo_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_sdvo_connector_atomic_get_property,
+ .atomic_set_property = intel_sdvo_connector_atomic_set_property,
+ .late_register = intel_sdvo_connector_register,
+ .early_unregister = intel_sdvo_connector_unregister,
+ .destroy = intel_connector_destroy,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = intel_sdvo_connector_duplicate_state,
+};
+
+static int intel_sdvo_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *new_conn_state =
+ drm_atomic_get_new_connector_state(state, conn);
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(state, conn);
+ struct intel_sdvo_connector_state *old_state =
+ to_intel_sdvo_connector_state(old_conn_state);
+ struct intel_sdvo_connector_state *new_state =
+ to_intel_sdvo_connector_state(new_conn_state);
+
+ if (new_conn_state->crtc &&
+ (memcmp(&old_state->tv, &new_state->tv, sizeof(old_state->tv)) ||
+ memcmp(&old_conn_state->tv, &new_conn_state->tv, sizeof(old_conn_state->tv)))) {
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state,
+ new_conn_state->crtc);
+
+ crtc_state->connectors_changed = true;
+ }
+
+ return intel_digital_connector_atomic_check(conn, state);
+}
+
+static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
+ .get_modes = intel_sdvo_get_modes,
+ .mode_valid = intel_sdvo_mode_valid,
+ .atomic_check = intel_sdvo_atomic_check,
+};
+
+static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
+
+ i2c_del_adapter(&intel_sdvo->ddc);
+ intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
+ .destroy = intel_sdvo_enc_destroy,
+};
+
+static void
+intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
+{
+ u16 mask = 0;
+ unsigned int num_bits;
+
+ /*
+ * Make a mask of outputs less than or equal to our own priority in the
+ * list.
+ */
+ switch (sdvo->controlled_output) {
+ case SDVO_OUTPUT_LVDS1:
+ mask |= SDVO_OUTPUT_LVDS1;
+ fallthrough;
+ case SDVO_OUTPUT_LVDS0:
+ mask |= SDVO_OUTPUT_LVDS0;
+ fallthrough;
+ case SDVO_OUTPUT_TMDS1:
+ mask |= SDVO_OUTPUT_TMDS1;
+ fallthrough;
+ case SDVO_OUTPUT_TMDS0:
+ mask |= SDVO_OUTPUT_TMDS0;
+ fallthrough;
+ case SDVO_OUTPUT_RGB1:
+ mask |= SDVO_OUTPUT_RGB1;
+ fallthrough;
+ case SDVO_OUTPUT_RGB0:
+ mask |= SDVO_OUTPUT_RGB0;
+ break;
+ }
+
+ /* Count bits to find what number we are in the priority list. */
+ mask &= sdvo->caps.output_flags;
+ num_bits = hweight16(mask);
+ /* If more than 3 outputs, default to DDC bus 3 for now. */
+ if (num_bits > 3)
+ num_bits = 3;
+
+ /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+ sdvo->ddc_bus = 1 << num_bits;
+}
+
+/*
+ * Choose the appropriate DDC bus for control bus switch command for this
+ * SDVO output based on the controlled output.
+ *
+ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
+ * outputs, then LVDS outputs.
+ */
+static void
+intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
+ struct intel_sdvo *sdvo)
+{
+ struct sdvo_device_mapping *mapping;
+
+ if (sdvo->port == PORT_B)
+ mapping = &dev_priv->display.vbt.sdvo_mappings[0];
+ else
+ mapping = &dev_priv->display.vbt.sdvo_mappings[1];
+
+ if (mapping->initialized)
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+ else
+ intel_sdvo_guess_ddc_bus(sdvo);
+}
+
+static void
+intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
+ struct intel_sdvo *sdvo)
+{
+ struct sdvo_device_mapping *mapping;
+ u8 pin;
+
+ if (sdvo->port == PORT_B)
+ mapping = &dev_priv->display.vbt.sdvo_mappings[0];
+ else
+ mapping = &dev_priv->display.vbt.sdvo_mappings[1];
+
+ if (mapping->initialized &&
+ intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin))
+ pin = mapping->i2c_pin;
+ else
+ pin = GMBUS_PIN_DPB;
+
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
+
+ /*
+ * With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
+ * our code totally fails once we start using gmbus. Hence fall back to
+ * bit banging for now.
+ */
+ intel_gmbus_force_bit(sdvo->i2c, true);
+}
+
+/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
+static void
+intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
+{
+ intel_gmbus_force_bit(sdvo->i2c, false);
+}
+
+static bool
+intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
+{
+ return intel_sdvo_check_supp_encode(intel_sdvo);
+}
+
+static u8
+intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv,
+ struct intel_sdvo *sdvo)
+{
+ struct sdvo_device_mapping *my_mapping, *other_mapping;
+
+ if (sdvo->port == PORT_B) {
+ my_mapping = &dev_priv->display.vbt.sdvo_mappings[0];
+ other_mapping = &dev_priv->display.vbt.sdvo_mappings[1];
+ } else {
+ my_mapping = &dev_priv->display.vbt.sdvo_mappings[1];
+ other_mapping = &dev_priv->display.vbt.sdvo_mappings[0];
+ }
+
+ /* If the BIOS described our SDVO device, take advantage of it. */
+ if (my_mapping->slave_addr)
+ return my_mapping->slave_addr;
+
+ /*
+ * If the BIOS only described a different SDVO device, use the
+ * address that it isn't using.
+ */
+ if (other_mapping->slave_addr) {
+ if (other_mapping->slave_addr == 0x70)
+ return 0x72;
+ else
+ return 0x70;
+ }
+
+ /*
+ * No SDVO device info is found for another DVO port,
+ * so use mapping assumption we had before BIOS parsing.
+ */
+ if (sdvo->port == PORT_B)
+ return 0x70;
+ else
+ return 0x72;
+}
+
+static int
+intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
+ struct intel_sdvo *encoder)
+{
+ struct drm_connector *drm_connector;
+ int ret;
+
+ drm_connector = &connector->base.base;
+ ret = drm_connector_init(encoder->base.base.dev,
+ drm_connector,
+ &intel_sdvo_connector_funcs,
+ connector->base.base.connector_type);
+ if (ret < 0)
+ return ret;
+
+ drm_connector_helper_add(drm_connector,
+ &intel_sdvo_connector_helper_funcs);
+
+ connector->base.base.interlace_allowed = 1;
+ connector->base.base.doublescan_allowed = 0;
+ connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
+
+ intel_connector_attach_encoder(&connector->base, &encoder->base);
+
+ return 0;
+}
+
+static void
+intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *connector)
+{
+ intel_attach_force_audio_property(&connector->base.base);
+ if (intel_sdvo->colorimetry_cap & SDVO_COLORIMETRY_RGB220)
+ intel_attach_broadcast_rgb_property(&connector->base.base);
+ intel_attach_aspect_ratio_property(&connector->base.base);
+}
+
+static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
+{
+ struct intel_sdvo_connector *sdvo_connector;
+ struct intel_sdvo_connector_state *conn_state;
+
+ sdvo_connector = kzalloc(sizeof(*sdvo_connector), GFP_KERNEL);
+ if (!sdvo_connector)
+ return NULL;
+
+ conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
+ if (!conn_state) {
+ kfree(sdvo_connector);
+ return NULL;
+ }
+
+ __drm_atomic_helper_connector_reset(&sdvo_connector->base.base,
+ &conn_state->base.base);
+
+ intel_panel_init_alloc(&sdvo_connector->base);
+
+ return sdvo_connector;
+}
+
+static bool
+intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ DRM_DEBUG_KMS("initialising DVI device %d\n", device);
+
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
+ if (!intel_sdvo_connector)
+ return false;
+
+ if (device == 0)
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ else if (device == 1)
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ if (intel_sdvo_get_hotplug_support(intel_sdvo) &
+ intel_sdvo_connector->output_flag) {
+ intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
+ /*
+ * Some SDVO devices have one-shot hotplug interrupts.
+ * Ensure that they get re-enabled when an interrupt happens.
+ */
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
+ intel_encoder->hotplug = intel_sdvo_hotplug;
+ intel_sdvo_enable_hotplug(intel_encoder);
+ } else {
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ }
+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+ if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ intel_sdvo_connector->is_hdmi = true;
+ }
+
+ if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+ kfree(intel_sdvo_connector);
+ return false;
+ }
+
+ if (intel_sdvo_connector->is_hdmi)
+ intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
+
+ return true;
+}
+
+static bool
+intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ DRM_DEBUG_KMS("initialising TV type %d\n", type);
+
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
+ if (!intel_sdvo_connector)
+ return false;
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+ intel_sdvo_connector->output_flag = type;
+
+ if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+ kfree(intel_sdvo_connector);
+ return false;
+ }
+
+ if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+ goto err;
+
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ intel_connector_destroy(connector);
+ return false;
+}
+
+static bool
+intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ DRM_DEBUG_KMS("initialising analog device %d\n", device);
+
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
+ if (!intel_sdvo_connector)
+ return false;
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+ if (device == 0)
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ else if (device == 1)
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+
+ if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+ kfree(intel_sdvo_connector);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_i915_private *i915 = to_i915(encoder->dev);
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *intel_sdvo_connector;
+
+ DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
+
+ intel_sdvo_connector = intel_sdvo_connector_alloc();
+ if (!intel_sdvo_connector)
+ return false;
+
+ intel_connector = &intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+ if (device == 0)
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ else if (device == 1)
+ intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+
+ if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
+ kfree(intel_sdvo_connector);
+ return false;
+ }
+
+ if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ goto err;
+
+ intel_bios_init_panel_late(i915, &intel_connector->panel, NULL, NULL);
+
+ /*
+ * Fetch modes from VBT. For SDVO prefer the VBT mode since some
+ * SDVO->LVDS transcoders can't cope with the EDID mode.
+ */
+ intel_panel_add_vbt_sdvo_fixed_mode(intel_connector);
+
+ if (!intel_panel_preferred_fixed_mode(intel_connector)) {
+ mutex_lock(&i915->drm.mode_config.mutex);
+
+ intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+ intel_panel_add_edid_fixed_modes(intel_connector, false);
+
+ mutex_unlock(&i915->drm.mode_config.mutex);
+ }
+
+ intel_panel_init(intel_connector);
+
+ if (!intel_panel_preferred_fixed_mode(intel_connector))
+ goto err;
+
+ return true;
+
+err:
+ intel_connector_destroy(connector);
+ return false;
+}
+
+static u16 intel_sdvo_filter_output_flags(u16 flags)
+{
+ flags &= SDVO_OUTPUT_MASK;
+
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+ if (!(flags & SDVO_OUTPUT_TMDS0))
+ flags &= ~SDVO_OUTPUT_TMDS1;
+
+ if (!(flags & SDVO_OUTPUT_RGB0))
+ flags &= ~SDVO_OUTPUT_RGB1;
+
+ if (!(flags & SDVO_OUTPUT_LVDS0))
+ flags &= ~SDVO_OUTPUT_LVDS1;
+
+ return flags;
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
+{
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+
+ flags = intel_sdvo_filter_output_flags(flags);
+
+ intel_sdvo->controlled_output = flags;
+
+ intel_sdvo_select_ddc_bus(i915, intel_sdvo);
+
+ if (flags & SDVO_OUTPUT_TMDS0)
+ if (!intel_sdvo_dvi_init(intel_sdvo, 0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_TMDS1)
+ if (!intel_sdvo_dvi_init(intel_sdvo, 1))
+ return false;
+
+ /* TV has no XXX1 function block */
+ if (flags & SDVO_OUTPUT_SVID0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_CVBS0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_YPRPB0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_RGB0)
+ if (!intel_sdvo_analog_init(intel_sdvo, 0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_RGB1)
+ if (!intel_sdvo_analog_init(intel_sdvo, 1))
+ return false;
+
+ if (flags & SDVO_OUTPUT_LVDS0)
+ if (!intel_sdvo_lvds_init(intel_sdvo, 0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_LVDS1)
+ if (!intel_sdvo_lvds_init(intel_sdvo, 1))
+ return false;
+
+ if (flags == 0) {
+ unsigned char bytes[2];
+
+ memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
+ DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+ SDVO_NAME(intel_sdvo),
+ bytes[0], bytes[1]);
+ return false;
+ }
+ intel_sdvo->base.pipe_mask = ~0;
+
+ return true;
+}
+
+static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector, *tmp;
+
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (intel_attached_encoder(to_intel_connector(connector)) == &intel_sdvo->base) {
+ drm_connector_unregister(connector);
+ intel_connector_destroy(connector);
+ }
+ }
+}
+
+static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct intel_sdvo_tv_format format;
+ u32 format_map, i;
+
+ if (!intel_sdvo_set_target_output(intel_sdvo, type))
+ return false;
+
+ BUILD_BUG_ON(sizeof(format) != 6);
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+ &format, sizeof(format)))
+ return false;
+
+ memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
+
+ if (format_map == 0)
+ return false;
+
+ intel_sdvo_connector->format_supported_num = 0;
+ for (i = 0 ; i < TV_FORMAT_NUM; i++)
+ if (format_map & (1 << i))
+ intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i;
+
+
+ intel_sdvo_connector->tv_format =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", intel_sdvo_connector->format_supported_num);
+ if (!intel_sdvo_connector->tv_format)
+ return false;
+
+ for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
+ drm_property_add_enum(intel_sdvo_connector->tv_format, i,
+ tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
+
+ intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
+ drm_object_attach_property(&intel_sdvo_connector->base.base.base,
+ intel_sdvo_connector->tv_format, 0);
+ return true;
+
+}
+
+#define _ENHANCEMENT(state_assignment, name, NAME) do { \
+ if (enhancements.name) { \
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+ !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+ return false; \
+ intel_sdvo_connector->name = \
+ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
+ if (!intel_sdvo_connector->name) return false; \
+ state_assignment = response; \
+ drm_object_attach_property(&connector->base, \
+ intel_sdvo_connector->name, 0); \
+ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+ data_value[0], data_value[1], response); \
+ } \
+} while (0)
+
+#define ENHANCEMENT(state, name, NAME) _ENHANCEMENT((state)->name, name, NAME)
+
+static bool
+intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ struct drm_connector_state *conn_state = connector->state;
+ struct intel_sdvo_connector_state *sdvo_state =
+ to_intel_sdvo_connector_state(conn_state);
+ u16 response, data_value[2];
+
+ /* when horizontal overscan is supported, Add the left/right property */
+ if (enhancements.overscan_h) {
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_H,
+ &data_value, 4))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_H,
+ &response, 2))
+ return false;
+
+ sdvo_state->tv.overscan_h = response;
+
+ intel_sdvo_connector->max_hscan = data_value[0];
+ intel_sdvo_connector->left =
+ drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->left)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->left, 0);
+
+ intel_sdvo_connector->right =
+ drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->right)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->right, 0);
+ DRM_DEBUG_KMS("h_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ if (enhancements.overscan_v) {
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_V,
+ &data_value, 4))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_V,
+ &response, 2))
+ return false;
+
+ sdvo_state->tv.overscan_v = response;
+
+ intel_sdvo_connector->max_vscan = data_value[0];
+ intel_sdvo_connector->top =
+ drm_property_create_range(dev, 0,
+ "top_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->top)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->top, 0);
+
+ intel_sdvo_connector->bottom =
+ drm_property_create_range(dev, 0,
+ "bottom_margin", 0, data_value[0]);
+ if (!intel_sdvo_connector->bottom)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->bottom, 0);
+ DRM_DEBUG_KMS("v_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ ENHANCEMENT(&sdvo_state->tv, hpos, HPOS);
+ ENHANCEMENT(&sdvo_state->tv, vpos, VPOS);
+ ENHANCEMENT(&conn_state->tv, saturation, SATURATION);
+ ENHANCEMENT(&conn_state->tv, contrast, CONTRAST);
+ ENHANCEMENT(&conn_state->tv, hue, HUE);
+ ENHANCEMENT(&conn_state->tv, brightness, BRIGHTNESS);
+ ENHANCEMENT(&sdvo_state->tv, sharpness, SHARPNESS);
+ ENHANCEMENT(&sdvo_state->tv, flicker_filter, FLICKER_FILTER);
+ ENHANCEMENT(&sdvo_state->tv, flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+ ENHANCEMENT(&sdvo_state->tv, flicker_filter_2d, FLICKER_FILTER_2D);
+ _ENHANCEMENT(sdvo_state->tv.chroma_filter, tv_chroma_filter, TV_CHROMA_FILTER);
+ _ENHANCEMENT(sdvo_state->tv.luma_filter, tv_luma_filter, TV_LUMA_FILTER);
+
+ if (enhancements.dot_crawl) {
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+ return false;
+
+ sdvo_state->tv.dot_crawl = response & 0x1;
+ intel_sdvo_connector->dot_crawl =
+ drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
+ if (!intel_sdvo_connector->dot_crawl)
+ return false;
+
+ drm_object_attach_property(&connector->base,
+ intel_sdvo_connector->dot_crawl, 0);
+ DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+ }
+
+ return true;
+}
+
+static bool
+intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ struct intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ u16 response, data_value[2];
+
+ ENHANCEMENT(&connector->state->tv, brightness, BRIGHTNESS);
+
+ return true;
+}
+#undef ENHANCEMENT
+#undef _ENHANCEMENT
+
+static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector)
+{
+ union {
+ struct intel_sdvo_enhancements_reply reply;
+ u16 response;
+ } enhancements;
+
+ BUILD_BUG_ON(sizeof(enhancements) != 2);
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ &enhancements, sizeof(enhancements)) ||
+ enhancements.response == 0) {
+ DRM_DEBUG_KMS("No enhancement is supported\n");
+ return true;
+ }
+
+ if (IS_TV(intel_sdvo_connector))
+ return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ else if (IS_LVDS(intel_sdvo_connector))
+ return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+ else
+ return true;
+}
+
+static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+
+ if (!__intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+ return -EIO;
+
+ return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
+
+static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+ return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
+
+static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
+ .master_xfer = intel_sdvo_ddc_proxy_xfer,
+ .functionality = intel_sdvo_ddc_proxy_func
+};
+
+static void proxy_lock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+ sdvo->i2c->lock_ops->lock_bus(sdvo->i2c, flags);
+}
+
+static int proxy_trylock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+ return sdvo->i2c->lock_ops->trylock_bus(sdvo->i2c, flags);
+}
+
+static void proxy_unlock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+ sdvo->i2c->lock_ops->unlock_bus(sdvo->i2c, flags);
+}
+
+static const struct i2c_lock_operations proxy_lock_ops = {
+ .lock_bus = proxy_lock_bus,
+ .trylock_bus = proxy_trylock_bus,
+ .unlock_bus = proxy_unlock_bus,
+};
+
+static bool
+intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
+ struct drm_i915_private *dev_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+
+ sdvo->ddc.owner = THIS_MODULE;
+ sdvo->ddc.class = I2C_CLASS_DDC;
+ snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+ sdvo->ddc.dev.parent = &pdev->dev;
+ sdvo->ddc.algo_data = sdvo;
+ sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
+ sdvo->ddc.lock_ops = &proxy_lock_ops;
+
+ return i2c_add_adapter(&sdvo->ddc) == 0;
+}
+
+static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (HAS_PCH_SPLIT(dev_priv))
+ drm_WARN_ON(&dev_priv->drm, port != PORT_B);
+ else
+ drm_WARN_ON(&dev_priv->drm, port != PORT_B && port != PORT_C);
+}
+
+bool intel_sdvo_init(struct drm_i915_private *dev_priv,
+ i915_reg_t sdvo_reg, enum port port)
+{
+ struct intel_encoder *intel_encoder;
+ struct intel_sdvo *intel_sdvo;
+ int i;
+
+ assert_sdvo_port_valid(dev_priv, port);
+
+ intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
+ if (!intel_sdvo)
+ return false;
+
+ intel_sdvo->sdvo_reg = sdvo_reg;
+ intel_sdvo->port = port;
+ intel_sdvo->slave_addr =
+ intel_sdvo_get_slave_addr(dev_priv, intel_sdvo) >> 1;
+ intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo);
+ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev_priv))
+ goto err_i2c_bus;
+
+ /* encoder type will be decided later */
+ intel_encoder = &intel_sdvo->base;
+ intel_encoder->type = INTEL_OUTPUT_SDVO;
+ intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
+ intel_encoder->port = port;
+ drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ &intel_sdvo_enc_funcs, 0,
+ "SDVO %c", port_name(port));
+
+ /* Read the regs to test if we can talk to the device */
+ for (i = 0; i < 0x40; i++) {
+ u8 byte;
+
+ if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "No SDVO device found on %s\n",
+ SDVO_NAME(intel_sdvo));
+ goto err;
+ }
+ }
+
+ intel_encoder->compute_config = intel_sdvo_compute_config;
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ intel_encoder->disable = pch_disable_sdvo;
+ intel_encoder->post_disable = pch_post_disable_sdvo;
+ } else {
+ intel_encoder->disable = intel_disable_sdvo;
+ }
+ intel_encoder->pre_enable = intel_sdvo_pre_enable;
+ intel_encoder->enable = intel_enable_sdvo;
+ intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
+ intel_encoder->get_config = intel_sdvo_get_config;
+
+ /* In default case sdvo lvds is false */
+ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
+ goto err;
+
+ intel_sdvo->colorimetry_cap =
+ intel_sdvo_get_colorimetry_cap(intel_sdvo);
+
+ if (intel_sdvo_output_setup(intel_sdvo,
+ intel_sdvo->caps.output_flags) != true) {
+ drm_dbg_kms(&dev_priv->drm,
+ "SDVO output failed to setup on %s\n",
+ SDVO_NAME(intel_sdvo));
+ /* Output_setup can leave behind connectors! */
+ goto err_output;
+ }
+
+ /*
+ * Only enable the hotplug irq if we need it, to work around noisy
+ * hotplug lines.
+ */
+ if (intel_sdvo->hotplug_active) {
+ if (intel_sdvo->port == PORT_B)
+ intel_encoder->hpd_pin = HPD_SDVO_B;
+ else
+ intel_encoder->hpd_pin = HPD_SDVO_C;
+ }
+
+ /*
+ * Cloning SDVO with anything is often impossible, since the SDVO
+ * encoder can request a special input timing mode. And even if that's
+ * not the case we have evidence that cloning a plain unscaled mode with
+ * VGA doesn't really work. Furthermore the cloning flags are way too
+ * simplistic anyway to express such constraints, so just give up on
+ * cloning for SDVO encoders.
+ */
+ intel_sdvo->base.cloneable = 0;
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+ goto err_output;
+
+ if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
+ &intel_sdvo->pixel_clock_min,
+ &intel_sdvo->pixel_clock_max))
+ goto err_output;
+
+ drm_dbg_kms(&dev_priv->drm, "%s device VID/DID: %02X:%02X.%02X, "
+ "clock range %dMHz - %dMHz, "
+ "input 1: %c, input 2: %c, "
+ "output 1: %c, output 2: %c\n",
+ SDVO_NAME(intel_sdvo),
+ intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
+ intel_sdvo->caps.device_rev_id,
+ intel_sdvo->pixel_clock_min / 1000,
+ intel_sdvo->pixel_clock_max / 1000,
+ (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+ (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+ /* check currently supported outputs */
+ intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+ intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+ return true;
+
+err_output:
+ intel_sdvo_output_cleanup(intel_sdvo);
+
+err:
+ drm_encoder_cleanup(&intel_encoder->base);
+ i2c_del_adapter(&intel_sdvo->ddc);
+err_i2c_bus:
+ intel_sdvo_unselect_i2c_bus(intel_sdvo);
+ kfree(intel_sdvo);
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h
new file mode 100644
index 000000000..2868852c8
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_SDVO_H__
+#define __INTEL_SDVO_H__
+
+#include <linux/types.h>
+
+#include "i915_reg_defs.h"
+
+struct drm_i915_private;
+enum pipe;
+enum port;
+
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t sdvo_reg, enum pipe *pipe);
+bool intel_sdvo_init(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, enum port port);
+
+#endif /* __INTEL_SDVO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo_regs.h b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
new file mode 100644
index 000000000..74dc6c042
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
@@ -0,0 +1,741 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#ifndef __INTEL_SDVO_REGS_H__
+#define __INTEL_SDVO_REGS_H__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/*
+ * SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST (0)
+#define SDVO_OUTPUT_TMDS0 (1 << 0)
+#define SDVO_OUTPUT_RGB0 (1 << 1)
+#define SDVO_OUTPUT_CVBS0 (1 << 2)
+#define SDVO_OUTPUT_SVID0 (1 << 3)
+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
+#define SDVO_OUTPUT_SCART0 (1 << 5)
+#define SDVO_OUTPUT_LVDS0 (1 << 6)
+#define SDVO_OUTPUT_TMDS1 (1 << 8)
+#define SDVO_OUTPUT_RGB1 (1 << 9)
+#define SDVO_OUTPUT_CVBS1 (1 << 10)
+#define SDVO_OUTPUT_SVID1 (1 << 11)
+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
+#define SDVO_OUTPUT_SCART1 (1 << 13)
+#define SDVO_OUTPUT_LVDS1 (1 << 14)
+#define SDVO_OUTPUT_LAST (14)
+
+struct intel_sdvo_caps {
+ u8 vendor_id;
+ u8 device_id;
+ u8 device_rev_id;
+ u8 sdvo_version_major;
+ u8 sdvo_version_minor;
+ unsigned int sdvo_inputs_mask:2;
+ unsigned int smooth_scaling:1;
+ unsigned int sharp_scaling:1;
+ unsigned int up_scaling:1;
+ unsigned int down_scaling:1;
+ unsigned int stall_support:1;
+ unsigned int pad:1;
+ u16 output_flags;
+} __packed;
+
+/* Note: SDVO detailed timing flags match EDID misc flags. */
+#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
+#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
+#define DTD_FLAG_INTERLACE (1 << 7)
+
+/* This matches the EDID DTD structure, more or less */
+struct intel_sdvo_dtd {
+ struct {
+ u16 clock; /* pixel clock, in 10kHz units */
+ u8 h_active; /* lower 8 bits (pixels) */
+ u8 h_blank; /* lower 8 bits (pixels) */
+ u8 h_high; /* upper 4 bits each h_active, h_blank */
+ u8 v_active; /* lower 8 bits (lines) */
+ u8 v_blank; /* lower 8 bits (lines) */
+ u8 v_high; /* upper 4 bits each v_active, v_blank */
+ } part1;
+
+ struct {
+ u8 h_sync_off; /* lower 8 bits, from hblank start */
+ u8 h_sync_width; /* lower 8 bits (pixels) */
+ /* lower 4 bits each vsync offset, vsync width */
+ u8 v_sync_off_width;
+ /*
+ * 2 high bits of hsync offset, 2 high bits of hsync width,
+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+ */
+ u8 sync_off_width_high;
+ u8 dtd_flags;
+ u8 sdvo_flags;
+ /* bits 6-7 of vsync offset at bits 6-7 */
+ u8 v_sync_off_high;
+ u8 reserved;
+ } part2;
+} __packed;
+
+struct intel_sdvo_pixel_clock_range {
+ u16 min; /* pixel clock, in 10kHz units */
+ u16 max; /* pixel clock, in 10kHz units */
+} __packed;
+
+struct intel_sdvo_preferred_input_timing_args {
+ u16 clock;
+ u16 width;
+ u16 height;
+ u8 interlace:1;
+ u8 scaled:1;
+ u8 pad:6;
+} __packed;
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0 0x07
+#define SDVO_I2C_ARG_1 0x06
+#define SDVO_I2C_ARG_2 0x05
+#define SDVO_I2C_ARG_3 0x04
+#define SDVO_I2C_ARG_4 0x03
+#define SDVO_I2C_ARG_5 0x02
+#define SDVO_I2C_ARG_6 0x01
+#define SDVO_I2C_ARG_7 0x00
+#define SDVO_I2C_OPCODE 0x08
+#define SDVO_I2C_CMD_STATUS 0x09
+#define SDVO_I2C_RETURN_0 0x0a
+#define SDVO_I2C_RETURN_1 0x0b
+#define SDVO_I2C_RETURN_2 0x0c
+#define SDVO_I2C_RETURN_3 0x0d
+#define SDVO_I2C_RETURN_4 0x0e
+#define SDVO_I2C_RETURN_5 0x0f
+#define SDVO_I2C_RETURN_6 0x10
+#define SDVO_I2C_RETURN_7 0x11
+#define SDVO_I2C_VENDOR_BEGIN 0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON 0x0
+#define SDVO_CMD_STATUS_SUCCESS 0x1
+#define SDVO_CMD_STATUS_NOTSUPP 0x2
+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
+#define SDVO_CMD_STATUS_PENDING 0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET 0x01
+
+/* Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
+
+/*
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
+struct intel_sdvo_get_trained_inputs_response {
+ unsigned int input0_trained:1;
+ unsigned int input1_trained:1;
+ unsigned int pad:6;
+} __packed;
+
+/* Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
+
+/*
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
+
+/*
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
+struct intel_sdvo_in_out_map {
+ u16 in0, in1;
+};
+
+/*
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
+
+/*
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
+
+/*
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
+
+/*
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
+
+/*
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
+struct intel_sdvo_get_interrupt_event_source_response {
+ u16 interrupt_status;
+ unsigned int ambient_light_interrupt:1;
+ unsigned int hdmi_audio_encrypt_change:1;
+ unsigned int pad:6;
+} __packed;
+
+/*
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT 0x10
+struct intel_sdvo_set_target_input_args {
+ unsigned int target_1:1;
+ unsigned int pad:7;
+} __packed;
+
+/*
+ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
+
+/*
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
+
+/* Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
+/* Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
+
+/* Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
+
+/* Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
+/* Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
+/* 6 bytes of bit flags for TV formats shared by all TV format functions */
+struct intel_sdvo_tv_format {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:3;
+} __packed;
+
+#define SDVO_CMD_GET_TV_FORMAT 0x28
+
+#define SDVO_CMD_SET_TV_FORMAT 0x29
+
+/* Returns the resolutiosn that can be used with the given TV format */
+#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
+struct intel_sdvo_sdtv_resolution_request {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int pad:5;
+} __packed;
+
+struct intel_sdvo_sdtv_resolution_reply {
+ unsigned int res_320x200:1;
+ unsigned int res_320x240:1;
+ unsigned int res_400x300:1;
+ unsigned int res_640x350:1;
+ unsigned int res_640x400:1;
+ unsigned int res_640x480:1;
+ unsigned int res_704x480:1;
+ unsigned int res_704x576:1;
+
+ unsigned int res_720x350:1;
+ unsigned int res_720x400:1;
+ unsigned int res_720x480:1;
+ unsigned int res_720x540:1;
+ unsigned int res_720x576:1;
+ unsigned int res_768x576:1;
+ unsigned int res_800x600:1;
+ unsigned int res_832x624:1;
+
+ unsigned int res_920x766:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x1024:1;
+ unsigned int pad:5;
+} __packed;
+
+/* Get supported resolution with squire pixel aspect ratio that can be
+ scaled for the requested HDTV format */
+#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
+
+struct intel_sdvo_hdtv_resolution_request {
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:6;
+} __packed;
+
+struct intel_sdvo_hdtv_resolution_reply {
+ unsigned int res_640x480:1;
+ unsigned int res_800x600:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x960:1;
+ unsigned int res_1400x1050:1;
+ unsigned int res_1600x1200:1;
+ unsigned int res_1920x1440:1;
+ unsigned int res_2048x1536:1;
+
+ unsigned int res_2560x1920:1;
+ unsigned int res_3200x2400:1;
+ unsigned int res_3840x2880:1;
+ unsigned int pad1:5;
+
+ unsigned int res_848x480:1;
+ unsigned int res_1064x600:1;
+ unsigned int res_1280x720:1;
+ unsigned int res_1360x768:1;
+ unsigned int res_1704x960:1;
+ unsigned int res_1864x1050:1;
+ unsigned int res_1920x1080:1;
+ unsigned int res_2128x1200:1;
+
+ unsigned int res_2560x1400:1;
+ unsigned int res_2728x1536:1;
+ unsigned int res_3408x1920:1;
+ unsigned int res_4264x2400:1;
+ unsigned int res_5120x2880:1;
+ unsigned int pad2:3;
+
+ unsigned int res_768x480:1;
+ unsigned int res_960x600:1;
+ unsigned int res_1152x720:1;
+ unsigned int res_1124x768:1;
+ unsigned int res_1536x960:1;
+ unsigned int res_1680x1050:1;
+ unsigned int res_1728x1080:1;
+ unsigned int res_1920x1200:1;
+
+ unsigned int res_2304x1440:1;
+ unsigned int res_2456x1536:1;
+ unsigned int res_3072x1920:1;
+ unsigned int res_3840x2400:1;
+ unsigned int res_4608x2880:1;
+ unsigned int pad3:3;
+
+ unsigned int res_1280x1024:1;
+ unsigned int pad4:7;
+
+ unsigned int res_1280x768:1;
+ unsigned int pad5:7;
+} __packed;
+
+/* Get supported power state returns info for encoder and monitor, rely on
+ last SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
+/* Get power state returns info for encoder and monitor, rely on last
+ SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_POWER_STATE 0x2b
+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
+# define SDVO_ENCODER_STATE_ON (1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
+# define SDVO_ENCODER_STATE_OFF (1 << 3)
+# define SDVO_MONITOR_STATE_ON (1 << 4)
+# define SDVO_MONITOR_STATE_STANDBY (1 << 5)
+# define SDVO_MONITOR_STATE_SUSPEND (1 << 6)
+# define SDVO_MONITOR_STATE_OFF (1 << 7)
+
+#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
+#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
+#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
+/*
+ * The panel power sequencing parameters are in units of milliseconds.
+ * The high fields are bits 8:9 of the 10-bit values.
+ */
+struct sdvo_panel_power_sequencing {
+ u8 t0;
+ u8 t1;
+ u8 t2;
+ u8 t3;
+ u8 t4;
+
+ unsigned int t0_high:2;
+ unsigned int t1_high:2;
+ unsigned int t2_high:2;
+ unsigned int t3_high:2;
+
+ unsigned int t4_high:2;
+ unsigned int pad:6;
+} __packed;
+
+#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
+struct sdvo_max_backlight_reply {
+ u8 max_value;
+ u8 default_value;
+} __packed;
+
+#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
+#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
+
+#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
+struct sdvo_get_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ u16 value;
+} __packed;
+#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
+struct sdvo_set_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ unsigned int enable:1;
+ unsigned int pad:7;
+} __packed;
+
+/* Set display power state */
+#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
+# define SDVO_DISPLAY_STATE_ON (1 << 0)
+# define SDVO_DISPLAY_STATE_STANDBY (1 << 1)
+# define SDVO_DISPLAY_STATE_SUSPEND (1 << 2)
+# define SDVO_DISPLAY_STATE_OFF (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
+struct intel_sdvo_enhancements_reply {
+ unsigned int flicker_filter:1;
+ unsigned int flicker_filter_adaptive:1;
+ unsigned int flicker_filter_2d:1;
+ unsigned int saturation:1;
+ unsigned int hue:1;
+ unsigned int brightness:1;
+ unsigned int contrast:1;
+ unsigned int overscan_h:1;
+
+ unsigned int overscan_v:1;
+ unsigned int hpos:1;
+ unsigned int vpos:1;
+ unsigned int sharpness:1;
+ unsigned int dot_crawl:1;
+ unsigned int dither:1;
+ unsigned int tv_chroma_filter:1;
+ unsigned int tv_luma_filter:1;
+} __packed;
+
+/* Picture enhancement limits below are dependent on the current TV format,
+ * and thus need to be queried and set after it.
+ */
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
+#define SDVO_CMD_GET_MAX_SATURATION 0x55
+#define SDVO_CMD_GET_MAX_HUE 0x58
+#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
+#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
+#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
+#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
+#define SDVO_CMD_GET_MAX_HPOS 0x67
+#define SDVO_CMD_GET_MAX_VPOS 0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
+struct intel_sdvo_enhancement_limits_reply {
+ u16 max_value;
+ u16 default_value;
+} __packed;
+
+#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
+#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
+# define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0)
+# define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0)
+# define SDVO_LVDS_CONNECTOR_SPWG (0 << 2)
+# define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2)
+# define SDVO_LVDS_SINGLE_CHANNEL (0 << 4)
+# define SDVO_LVDS_DUAL_CHANNEL (1 << 4)
+
+#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
+#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
+#define SDVO_CMD_GET_SATURATION 0x56
+#define SDVO_CMD_SET_SATURATION 0x57
+#define SDVO_CMD_GET_HUE 0x59
+#define SDVO_CMD_SET_HUE 0x5a
+#define SDVO_CMD_GET_BRIGHTNESS 0x5c
+#define SDVO_CMD_SET_BRIGHTNESS 0x5d
+#define SDVO_CMD_GET_CONTRAST 0x5f
+#define SDVO_CMD_SET_CONTRAST 0x60
+#define SDVO_CMD_GET_OVERSCAN_H 0x62
+#define SDVO_CMD_SET_OVERSCAN_H 0x63
+#define SDVO_CMD_GET_OVERSCAN_V 0x65
+#define SDVO_CMD_SET_OVERSCAN_V 0x66
+#define SDVO_CMD_GET_HPOS 0x68
+#define SDVO_CMD_SET_HPOS 0x69
+#define SDVO_CMD_GET_VPOS 0x6b
+#define SDVO_CMD_SET_VPOS 0x6c
+#define SDVO_CMD_GET_SHARPNESS 0x6e
+#define SDVO_CMD_SET_SHARPNESS 0x6f
+#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
+struct intel_sdvo_enhancements_arg {
+ u16 value;
+} __packed;
+
+#define SDVO_CMD_GET_DOT_CRAWL 0x70
+#define SDVO_CMD_SET_DOT_CRAWL 0x71
+# define SDVO_DOT_CRAWL_ON (1 << 0)
+# define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_GET_DITHER 0x72
+#define SDVO_CMD_SET_DITHER 0x73
+# define SDVO_DITHER_ON (1 << 0)
+# define SDVO_DITHER_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
+# define SDVO_CONTROL_BUS_PROM (1 << 0)
+# define SDVO_CONTROL_BUS_DDC1 (1 << 1)
+# define SDVO_CONTROL_BUS_DDC2 (1 << 2)
+# define SDVO_CONTROL_BUS_DDC3 (1 << 3)
+
+/* HDMI op codes */
+#define SDVO_CMD_GET_SUPP_ENCODE 0x9d
+#define SDVO_CMD_GET_ENCODE 0x9e
+#define SDVO_CMD_SET_ENCODE 0x9f
+ #define SDVO_ENCODE_DVI 0x0
+ #define SDVO_ENCODE_HDMI 0x1
+#define SDVO_CMD_SET_PIXEL_REPLI 0x8b
+#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
+#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
+#define SDVO_CMD_SET_COLORIMETRY 0x8e
+ #define SDVO_COLORIMETRY_RGB256 (1 << 0)
+ #define SDVO_COLORIMETRY_RGB220 (1 << 1)
+ #define SDVO_COLORIMETRY_YCrCb422 (1 << 2)
+ #define SDVO_COLORIMETRY_YCrCb444 (1 << 3)
+#define SDVO_CMD_GET_COLORIMETRY 0x8f
+#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
+#define SDVO_CMD_SET_AUDIO_STAT 0x91
+#define SDVO_CMD_GET_AUDIO_STAT 0x92
+ #define SDVO_AUDIO_ELD_VALID (1 << 0)
+ #define SDVO_AUDIO_PRESENCE_DETECT (1 << 1)
+ #define SDVO_AUDIO_CP_READY (1 << 2)
+#define SDVO_CMD_SET_HBUF_INDEX 0x93
+ #define SDVO_HBUF_INDEX_ELD 0
+ #define SDVO_HBUF_INDEX_AVI_IF 1
+#define SDVO_CMD_GET_HBUF_INDEX 0x94
+#define SDVO_CMD_GET_HBUF_INFO 0x95
+#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
+#define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97
+#define SDVO_CMD_SET_HBUF_DATA 0x98
+#define SDVO_CMD_GET_HBUF_DATA 0x99
+#define SDVO_CMD_SET_HBUF_TXRATE 0x9a
+#define SDVO_CMD_GET_HBUF_TXRATE 0x9b
+ #define SDVO_HBUF_TX_DISABLED (0 << 6)
+ #define SDVO_HBUF_TX_ONCE (2 << 6)
+ #define SDVO_HBUF_TX_VSYNC (3 << 6)
+#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
+#define SDVO_NEED_TO_STALL (1 << 7)
+
+struct intel_sdvo_encode {
+ u8 dvi_rev;
+ u8 hdmi_rev;
+} __packed;
+
+#endif /* __INTEL_SDVO_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
new file mode 100644
index 000000000..3326c79c7
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -0,0 +1,2036 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/util_macros.h>
+
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_snps_phy.h"
+#include "intel_snps_phy_regs.h"
+
+/**
+ * DOC: Synopsis PHY support
+ *
+ * Synopsis PHYs are primarily programmed by looking up magic register values
+ * in tables rather than calculating the necessary values at runtime.
+ *
+ * Of special note is that the SNPS PHYs include a dedicated port PLL, known as
+ * an "MPLLB." The MPLLB replaces the shared DPLL functionality used on other
+ * platforms and must be programming directly during the modeset sequence
+ * since it is not handled by the shared DPLL framework as on other platforms.
+ */
+
+void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915)
+{
+ enum phy phy;
+
+ for_each_phy_masked(phy, ~0) {
+ if (!intel_phy_is_snps(i915, phy))
+ continue;
+
+ /*
+ * If calibration does not complete successfully, we'll remember
+ * which phy was affected and skip setup of the corresponding
+ * output later.
+ */
+ if (intel_de_wait_for_clear(i915, DG2_PHY_MISC(phy),
+ DG2_PHY_DP_TX_ACK_MASK, 25))
+ i915->snps_phy_failed_calibration |= BIT(phy);
+ }
+}
+
+void intel_snps_phy_update_psr_power_state(struct drm_i915_private *dev_priv,
+ enum phy phy, bool enable)
+{
+ u32 val;
+
+ if (!intel_phy_is_snps(dev_priv, phy))
+ return;
+
+ val = REG_FIELD_PREP(SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR,
+ enable ? 2 : 3);
+ intel_uncore_rmw(&dev_priv->uncore, SNPS_PHY_TX_REQ(phy),
+ SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR, val);
+}
+
+void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct intel_ddi_buf_trans *trans;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ int n_entries, ln;
+
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ return;
+
+ for (ln = 0; ln < 4; ln++) {
+ int level = intel_ddi_level(encoder, crtc_state, ln);
+ u32 val = 0;
+
+ val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, trans->entries[level].snps.vswing);
+ val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_PRE, trans->entries[level].snps.pre_cursor);
+ val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, trans->entries[level].snps.post_cursor);
+
+ intel_de_write(dev_priv, SNPS_PHY_TX_EQ(ln, phy), val);
+ }
+}
+
+/*
+ * Basic DP link rates with 100 MHz reference clock.
+ */
+
+static const struct intel_mpllb_state dg2_dp_rbr_100 = {
+ .clock = 162000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 20) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 226),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 3),
+};
+
+static const struct intel_mpllb_state dg2_dp_hbr1_100 = {
+ .clock = 270000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 20) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 184),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+};
+
+static const struct intel_mpllb_state dg2_dp_hbr2_100 = {
+ .clock = 540000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 20) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 184),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+};
+
+static const struct intel_mpllb_state dg2_dp_hbr3_100 = {
+ .clock = 810000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 19) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 292),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+};
+
+static const struct intel_mpllb_state dg2_dp_uhbr10_100 = {
+ .clock = 1000000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 21) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 368),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+
+ /*
+ * SSC will be enabled, DP UHBR has a minimum SSC requirement.
+ */
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 58982),
+ .mpllb_sscstep =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 76101),
+};
+
+static const struct intel_mpllb_state dg2_dp_uhbr13_100 = {
+ .clock = 1350000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 45) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 508),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+
+ /*
+ * SSC will be enabled, DP UHBR has a minimum SSC requirement.
+ */
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 79626),
+ .mpllb_sscstep =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 102737),
+};
+
+static const struct intel_mpllb_state * const dg2_dp_100_tables[] = {
+ &dg2_dp_rbr_100,
+ &dg2_dp_hbr1_100,
+ &dg2_dp_hbr2_100,
+ &dg2_dp_hbr3_100,
+ &dg2_dp_uhbr10_100,
+ &dg2_dp_uhbr13_100,
+ NULL,
+};
+
+/*
+ * eDP link rates with 100 MHz reference clock.
+ */
+
+static const struct intel_mpllb_state dg2_edp_r216 = {
+ .clock = 216000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 19) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 312),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 4),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 50961),
+ .mpllb_sscstep =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 65752),
+};
+
+static const struct intel_mpllb_state dg2_edp_r243 = {
+ .clock = 243000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 20) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 356),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 57331),
+ .mpllb_sscstep =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 73971),
+};
+
+static const struct intel_mpllb_state dg2_edp_r324 = {
+ .clock = 324000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 20) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 226),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 3),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 38221),
+ .mpllb_sscstep =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 49314),
+};
+
+static const struct intel_mpllb_state dg2_edp_r432 = {
+ .clock = 432000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 19) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 312),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 4),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 50961),
+ .mpllb_sscstep =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 65752),
+};
+
+static const struct intel_mpllb_state * const dg2_edp_tables[] = {
+ &dg2_dp_rbr_100,
+ &dg2_edp_r216,
+ &dg2_edp_r243,
+ &dg2_dp_hbr1_100,
+ &dg2_edp_r324,
+ &dg2_edp_r432,
+ &dg2_dp_hbr2_100,
+ &dg2_dp_hbr3_100,
+ NULL,
+};
+
+/*
+ * HDMI link rates with 100 MHz reference clock.
+ */
+
+static const struct intel_mpllb_state dg2_hdmi_25_175 = {
+ .clock = 25175,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 143),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36663) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 71),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_27_0 = {
+ .clock = 27000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_74_25 = {
+ .clock = 74250,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_148_5 = {
+ .clock = 148500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+/* values in the below table are calculted using the algo */
+static const struct intel_mpllb_state dg2_hdmi_25200 = {
+ .clock = 25200,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 41943) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2621),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_27027 = {
+ .clock = 27027,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 31876) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 46555),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_28320 = {
+ .clock = 28320,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 148) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 40894) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 30408),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_30240 = {
+ .clock = 30240,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 160) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 50331) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 42466),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_31500 = {
+ .clock = 31500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 68) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_36000 = {
+ .clock = 36000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 82) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_40000 = {
+ .clock = 40000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 96) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_49500 = {
+ .clock = 49500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 126) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_50000 = {
+ .clock = 50000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_57284 = {
+ .clock = 57284,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 150) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 42886) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 49701),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_58000 = {
+ .clock = 58000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 152) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_65000 = {
+ .clock = 65000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 72) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_71000 = {
+ .clock = 71000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 80) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_74176 = {
+ .clock = 74176,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22334) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 43829),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_75000 = {
+ .clock = 75000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 88) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_78750 = {
+ .clock = 78750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_85500 = {
+ .clock = 85500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 104) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_88750 = {
+ .clock = 88750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 110) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_106500 = {
+ .clock = 106500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 138) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_108000 = {
+ .clock = 108000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_115500 = {
+ .clock = 115500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 152) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_119000 = {
+ .clock = 119000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 158) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_135000 = {
+ .clock = 135000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 76) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_138500 = {
+ .clock = 138500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 78) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_147160 = {
+ .clock = 147160,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 84) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 56623) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 6815),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_148352 = {
+ .clock = 148352,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22334) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 43829),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_154000 = {
+ .clock = 154000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 13) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 90) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_162000 = {
+ .clock = 162000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 96) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_209800 = {
+ .clock = 209800,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 134) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 60293) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 7864),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_262750 = {
+ .clock = 262750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 72) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36044) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_267300 = {
+ .clock = 267300,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_268500 = {
+ .clock = 268500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 45875) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_296703 = {
+ .clock = 296703,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36804),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_241500 = {
+ .clock = 241500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 160) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_319890 = {
+ .clock = 319890,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_497750 = {
+ .clock = 497750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 166) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36044) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_592000 = {
+ .clock = 592000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_593407 = {
+ .clock = 593407,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22328) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 7549),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_297 = {
+ .clock = 297000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_594 = {
+ .clock = 594000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
+ &dg2_hdmi_25_175,
+ &dg2_hdmi_27_0,
+ &dg2_hdmi_74_25,
+ &dg2_hdmi_148_5,
+ &dg2_hdmi_297,
+ &dg2_hdmi_594,
+ &dg2_hdmi_25200,
+ &dg2_hdmi_27027,
+ &dg2_hdmi_28320,
+ &dg2_hdmi_30240,
+ &dg2_hdmi_31500,
+ &dg2_hdmi_36000,
+ &dg2_hdmi_40000,
+ &dg2_hdmi_49500,
+ &dg2_hdmi_50000,
+ &dg2_hdmi_57284,
+ &dg2_hdmi_58000,
+ &dg2_hdmi_65000,
+ &dg2_hdmi_71000,
+ &dg2_hdmi_74176,
+ &dg2_hdmi_75000,
+ &dg2_hdmi_78750,
+ &dg2_hdmi_85500,
+ &dg2_hdmi_88750,
+ &dg2_hdmi_106500,
+ &dg2_hdmi_108000,
+ &dg2_hdmi_115500,
+ &dg2_hdmi_119000,
+ &dg2_hdmi_135000,
+ &dg2_hdmi_138500,
+ &dg2_hdmi_147160,
+ &dg2_hdmi_148352,
+ &dg2_hdmi_154000,
+ &dg2_hdmi_162000,
+ &dg2_hdmi_209800,
+ &dg2_hdmi_241500,
+ &dg2_hdmi_262750,
+ &dg2_hdmi_267300,
+ &dg2_hdmi_268500,
+ &dg2_hdmi_296703,
+ &dg2_hdmi_319890,
+ &dg2_hdmi_497750,
+ &dg2_hdmi_592000,
+ &dg2_hdmi_593407,
+ NULL,
+};
+
+static const struct intel_mpllb_state * const *
+intel_mpllb_tables_get(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
+ return dg2_edp_tables;
+ } else if (intel_crtc_has_dp_encoder(crtc_state)) {
+ return dg2_dp_100_tables;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ return dg2_hdmi_tables;
+ }
+
+ MISSING_CASE(encoder->type);
+ return NULL;
+}
+
+int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_mpllb_state * const *tables;
+ int i;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ if (intel_snps_phy_check_hdmi_link_rate(crtc_state->port_clock)
+ != MODE_OK) {
+ /*
+ * FIXME: Can only support fixed HDMI frequencies
+ * until we have a proper algorithm under a valid
+ * license.
+ */
+ drm_dbg_kms(&i915->drm, "Can't support HDMI link rate %d\n",
+ crtc_state->port_clock);
+ return -EINVAL;
+ }
+ }
+
+ tables = intel_mpllb_tables_get(crtc_state, encoder);
+ if (!tables)
+ return -EINVAL;
+
+ for (i = 0; tables[i]; i++) {
+ if (crtc_state->port_clock == tables[i]->clock) {
+ crtc_state->mpllb_state = *tables[i];
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+void intel_mpllb_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct intel_mpllb_state *pll_state = &crtc_state->mpllb_state;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ i915_reg_t enable_reg = (phy <= PHY_D ?
+ DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0));
+
+ /*
+ * 3. Software programs the following PLL registers for the desired
+ * frequency.
+ */
+ intel_de_write(dev_priv, SNPS_PHY_MPLLB_CP(phy), pll_state->mpllb_cp);
+ intel_de_write(dev_priv, SNPS_PHY_MPLLB_DIV(phy), pll_state->mpllb_div);
+ intel_de_write(dev_priv, SNPS_PHY_MPLLB_DIV2(phy), pll_state->mpllb_div2);
+ intel_de_write(dev_priv, SNPS_PHY_MPLLB_SSCEN(phy), pll_state->mpllb_sscen);
+ intel_de_write(dev_priv, SNPS_PHY_MPLLB_SSCSTEP(phy), pll_state->mpllb_sscstep);
+ intel_de_write(dev_priv, SNPS_PHY_MPLLB_FRACN1(phy), pll_state->mpllb_fracn1);
+ intel_de_write(dev_priv, SNPS_PHY_MPLLB_FRACN2(phy), pll_state->mpllb_fracn2);
+
+ /*
+ * 4. If the frequency will result in a change to the voltage
+ * requirement, follow the Display Voltage Frequency Switching -
+ * Sequence Before Frequency Change.
+ *
+ * We handle this step in bxt_set_cdclk().
+ */
+
+ /* 5. Software sets DPLL_ENABLE [PLL Enable] to "1". */
+ intel_uncore_rmw(&dev_priv->uncore, enable_reg, 0, PLL_ENABLE);
+
+ /*
+ * 9. Software sets SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "1". This
+ * will keep the PLL running during the DDI lane programming and any
+ * typeC DP cable disconnect. Do not set the force before enabling the
+ * PLL because that will start the PLL before it has sampled the
+ * divider values.
+ */
+ intel_de_write(dev_priv, SNPS_PHY_MPLLB_DIV(phy),
+ pll_state->mpllb_div | SNPS_PHY_MPLLB_FORCE_EN);
+
+ /*
+ * 10. Software polls on register DPLL_ENABLE [PLL Lock] to confirm PLL
+ * is locked at new settings. This register bit is sampling PHY
+ * dp_mpllb_state interface signal.
+ */
+ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 5))
+ drm_dbg_kms(&dev_priv->drm, "Port %c PLL not locked\n", phy_name(phy));
+
+ /*
+ * 11. If the frequency will result in a change to the voltage
+ * requirement, follow the Display Voltage Frequency Switching -
+ * Sequence After Frequency Change.
+ *
+ * We handle this step in bxt_set_cdclk().
+ */
+}
+
+void intel_mpllb_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ i915_reg_t enable_reg = (phy <= PHY_D ?
+ DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0));
+
+ /*
+ * 1. If the frequency will result in a change to the voltage
+ * requirement, follow the Display Voltage Frequency Switching -
+ * Sequence Before Frequency Change.
+ *
+ * We handle this step in bxt_set_cdclk().
+ */
+
+ /* 2. Software programs DPLL_ENABLE [PLL Enable] to "0" */
+ intel_uncore_rmw(&i915->uncore, enable_reg, PLL_ENABLE, 0);
+
+ /*
+ * 4. Software programs SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "0".
+ * This will allow the PLL to stop running.
+ */
+ intel_uncore_rmw(&i915->uncore, SNPS_PHY_MPLLB_DIV(phy),
+ SNPS_PHY_MPLLB_FORCE_EN, 0);
+
+ /*
+ * 5. Software polls DPLL_ENABLE [PLL Lock] for PHY acknowledgment
+ * (dp_txX_ack) that the new transmitter setting request is completed.
+ */
+ if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 5))
+ drm_err(&i915->drm, "Port %c PLL not locked\n", phy_name(phy));
+
+ /*
+ * 6. If the frequency will result in a change to the voltage
+ * requirement, follow the Display Voltage Frequency Switching -
+ * Sequence After Frequency Change.
+ *
+ * We handle this step in bxt_set_cdclk().
+ */
+}
+
+int intel_mpllb_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_mpllb_state *pll_state)
+{
+ unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
+ unsigned int multiplier, tx_clk_div, refclk;
+ bool frac_en;
+
+ if (0)
+ refclk = 38400;
+ else
+ refclk = 100000;
+
+ refclk >>= REG_FIELD_GET(SNPS_PHY_MPLLB_REF_CLK_DIV, pll_state->mpllb_div2) - 1;
+
+ frac_en = REG_FIELD_GET(SNPS_PHY_MPLLB_FRACN_EN, pll_state->mpllb_fracn1);
+
+ if (frac_en) {
+ frac_quot = REG_FIELD_GET(SNPS_PHY_MPLLB_FRACN_QUOT, pll_state->mpllb_fracn2);
+ frac_rem = REG_FIELD_GET(SNPS_PHY_MPLLB_FRACN_REM, pll_state->mpllb_fracn2);
+ frac_den = REG_FIELD_GET(SNPS_PHY_MPLLB_FRACN_DEN, pll_state->mpllb_fracn1);
+ }
+
+ multiplier = REG_FIELD_GET(SNPS_PHY_MPLLB_MULTIPLIER, pll_state->mpllb_div2) / 2 + 16;
+
+ tx_clk_div = REG_FIELD_GET(SNPS_PHY_MPLLB_TX_CLK_DIV, pll_state->mpllb_div);
+
+ return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, (multiplier << 16) + frac_quot) +
+ DIV_ROUND_CLOSEST(refclk * frac_rem, frac_den),
+ 10 << (tx_clk_div + 16));
+}
+
+void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_mpllb_state *pll_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ pll_state->mpllb_cp = intel_de_read(dev_priv, SNPS_PHY_MPLLB_CP(phy));
+ pll_state->mpllb_div = intel_de_read(dev_priv, SNPS_PHY_MPLLB_DIV(phy));
+ pll_state->mpllb_div2 = intel_de_read(dev_priv, SNPS_PHY_MPLLB_DIV2(phy));
+ pll_state->mpllb_sscen = intel_de_read(dev_priv, SNPS_PHY_MPLLB_SSCEN(phy));
+ pll_state->mpllb_sscstep = intel_de_read(dev_priv, SNPS_PHY_MPLLB_SSCSTEP(phy));
+ pll_state->mpllb_fracn1 = intel_de_read(dev_priv, SNPS_PHY_MPLLB_FRACN1(phy));
+ pll_state->mpllb_fracn2 = intel_de_read(dev_priv, SNPS_PHY_MPLLB_FRACN2(phy));
+
+ /*
+ * REF_CONTROL is under firmware control and never programmed by the
+ * driver; we read it only for sanity checking purposes. The bspec
+ * only tells us the expected value for one field in this register,
+ * so we'll only read out those specific bits here.
+ */
+ pll_state->ref_control = intel_de_read(dev_priv, SNPS_PHY_REF_CONTROL(phy)) &
+ SNPS_PHY_REF_CONTROL_REF_RANGE;
+
+ /*
+ * MPLLB_DIV is programmed twice, once with the software-computed
+ * state, then again with the MPLLB_FORCE_EN bit added. Drop that
+ * extra bit during readout so that we return the actual expected
+ * software state.
+ */
+ pll_state->mpllb_div &= ~SNPS_PHY_MPLLB_FORCE_EN;
+}
+
+int intel_snps_phy_check_hdmi_link_rate(int clock)
+{
+ const struct intel_mpllb_state * const *tables = dg2_hdmi_tables;
+ int i;
+
+ for (i = 0; tables[i]; i++) {
+ if (clock == tables[i]->clock)
+ return MODE_OK;
+ }
+
+ return MODE_CLOCK_RANGE;
+}
+
+void intel_mpllb_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_mpllb_state mpllb_hw_state = { 0 };
+ struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_encoder *encoder;
+
+ if (!IS_DG2(i915))
+ return;
+
+ if (!new_crtc_state->hw.active)
+ return;
+
+ encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
+ intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
+
+#define MPLLB_CHECK(__name) \
+ I915_STATE_WARN(mpllb_sw_state->__name != mpllb_hw_state.__name, \
+ "[CRTC:%d:%s] mismatch in MPLLB: %s (expected 0x%08x, found 0x%08x)", \
+ crtc->base.base.id, crtc->base.name, \
+ __stringify(__name), \
+ mpllb_sw_state->__name, mpllb_hw_state.__name)
+
+ MPLLB_CHECK(mpllb_cp);
+ MPLLB_CHECK(mpllb_div);
+ MPLLB_CHECK(mpllb_div2);
+ MPLLB_CHECK(mpllb_fracn1);
+ MPLLB_CHECK(mpllb_fracn2);
+ MPLLB_CHECK(mpllb_sscen);
+ MPLLB_CHECK(mpllb_sscstep);
+
+ /*
+ * ref_control is handled by the hardware/firemware and never
+ * programmed by the software, but the proper values are supplied
+ * in the bspec for verification purposes.
+ */
+ MPLLB_CHECK(ref_control);
+
+#undef MPLLB_CHECK
+}
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.h b/drivers/gpu/drm/i915/display/intel_snps_phy.h
new file mode 100644
index 000000000..557ef820b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_SNPS_PHY_H__
+#define __INTEL_SNPS_PHY_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc_state;
+struct intel_encoder;
+struct intel_mpllb_state;
+enum phy;
+
+void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv);
+void intel_snps_phy_update_psr_power_state(struct drm_i915_private *dev_priv,
+ enum phy phy, bool enable);
+
+int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder);
+void intel_mpllb_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_mpllb_disable(struct intel_encoder *encoder);
+void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_mpllb_state *pll_state);
+int intel_mpllb_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_mpllb_state *pll_state);
+
+int intel_snps_phy_check_hdmi_link_rate(int clock);
+void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_mpllb_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc_state *new_crtc_state);
+
+#endif /* __INTEL_SNPS_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy_regs.h b/drivers/gpu/drm/i915/display/intel_snps_phy_regs.h
new file mode 100644
index 000000000..0543465aa
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy_regs.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_SNPS_PHY_REGS__
+#define __INTEL_SNPS_PHY_REGS__
+
+#include "i915_reg_defs.h"
+
+#define _SNPS_PHY_A_BASE 0x168000
+#define _SNPS_PHY_B_BASE 0x169000
+#define _SNPS_PHY(phy) _PHY(phy, \
+ _SNPS_PHY_A_BASE, \
+ _SNPS_PHY_B_BASE)
+#define _SNPS2(phy, reg) (_SNPS_PHY(phy) - \
+ _SNPS_PHY_A_BASE + (reg))
+#define _MMIO_SNPS(phy, reg) _MMIO(_SNPS2(phy, reg))
+#define _MMIO_SNPS_LN(ln, phy, reg) _MMIO(_SNPS2(phy, \
+ (reg) + (ln) * 0x10))
+
+#define SNPS_PHY_MPLLB_CP(phy) _MMIO_SNPS(phy, 0x168000)
+#define SNPS_PHY_MPLLB_CP_INT REG_GENMASK(31, 25)
+#define SNPS_PHY_MPLLB_CP_INT_GS REG_GENMASK(23, 17)
+#define SNPS_PHY_MPLLB_CP_PROP REG_GENMASK(15, 9)
+#define SNPS_PHY_MPLLB_CP_PROP_GS REG_GENMASK(7, 1)
+
+#define SNPS_PHY_MPLLB_DIV(phy) _MMIO_SNPS(phy, 0x168004)
+#define SNPS_PHY_MPLLB_FORCE_EN REG_BIT(31)
+#define SNPS_PHY_MPLLB_DIV_CLK_EN REG_BIT(30)
+#define SNPS_PHY_MPLLB_DIV5_CLK_EN REG_BIT(29)
+#define SNPS_PHY_MPLLB_V2I REG_GENMASK(27, 26)
+#define SNPS_PHY_MPLLB_FREQ_VCO REG_GENMASK(25, 24)
+#define SNPS_PHY_MPLLB_DIV_MULTIPLIER REG_GENMASK(23, 16)
+#define SNPS_PHY_MPLLB_PMIX_EN REG_BIT(10)
+#define SNPS_PHY_MPLLB_DP2_MODE REG_BIT(9)
+#define SNPS_PHY_MPLLB_WORD_DIV2_EN REG_BIT(8)
+#define SNPS_PHY_MPLLB_TX_CLK_DIV REG_GENMASK(7, 5)
+#define SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL REG_BIT(0)
+
+#define SNPS_PHY_MPLLB_FRACN1(phy) _MMIO_SNPS(phy, 0x168008)
+#define SNPS_PHY_MPLLB_FRACN_EN REG_BIT(31)
+#define SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN REG_BIT(30)
+#define SNPS_PHY_MPLLB_FRACN_DEN REG_GENMASK(15, 0)
+
+#define SNPS_PHY_MPLLB_FRACN2(phy) _MMIO_SNPS(phy, 0x16800C)
+#define SNPS_PHY_MPLLB_FRACN_REM REG_GENMASK(31, 16)
+#define SNPS_PHY_MPLLB_FRACN_QUOT REG_GENMASK(15, 0)
+
+#define SNPS_PHY_MPLLB_SSCEN(phy) _MMIO_SNPS(phy, 0x168014)
+#define SNPS_PHY_MPLLB_SSC_EN REG_BIT(31)
+#define SNPS_PHY_MPLLB_SSC_UP_SPREAD REG_BIT(30)
+#define SNPS_PHY_MPLLB_SSC_PEAK REG_GENMASK(29, 10)
+
+#define SNPS_PHY_MPLLB_SSCSTEP(phy) _MMIO_SNPS(phy, 0x168018)
+#define SNPS_PHY_MPLLB_SSC_STEPSIZE REG_GENMASK(31, 11)
+
+#define SNPS_PHY_MPLLB_DIV2(phy) _MMIO_SNPS(phy, 0x16801C)
+#define SNPS_PHY_MPLLB_HDMI_PIXEL_CLK_DIV REG_GENMASK(19, 18)
+#define SNPS_PHY_MPLLB_HDMI_DIV REG_GENMASK(17, 15)
+#define SNPS_PHY_MPLLB_REF_CLK_DIV REG_GENMASK(14, 12)
+#define SNPS_PHY_MPLLB_MULTIPLIER REG_GENMASK(11, 0)
+
+#define SNPS_PHY_REF_CONTROL(phy) _MMIO_SNPS(phy, 0x168188)
+#define SNPS_PHY_REF_CONTROL_REF_RANGE REG_GENMASK(31, 27)
+
+#define SNPS_PHY_TX_REQ(phy) _MMIO_SNPS(phy, 0x168200)
+#define SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR REG_GENMASK(31, 30)
+
+#define SNPS_PHY_TX_EQ(ln, phy) _MMIO_SNPS_LN(ln, phy, 0x168300)
+#define SNPS_PHY_TX_EQ_MAIN REG_GENMASK(23, 18)
+#define SNPS_PHY_TX_EQ_POST REG_GENMASK(15, 10)
+#define SNPS_PHY_TX_EQ_PRE REG_GENMASK(7, 2)
+
+#endif /* __INTEL_SNPS_PHY_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
new file mode 100644
index 000000000..7649c50b5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -0,0 +1,1848 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * New plane/sprite handling.
+ *
+ * The older chips had a separate interface for programming plane related
+ * registers; newer ones are much simpler and we can use the new DRM plane
+ * support.
+ */
+
+#include <linux/string_helpers.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_rect.h>
+
+#include "i915_drv.h"
+#include "i915_vgpu.h"
+#include "i9xx_plane.h"
+#include "intel_atomic_plane.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_frontbuffer.h"
+#include "intel_sprite.h"
+#include "intel_vrr.h"
+
+int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_rect *src = &plane_state->uapi.src;
+ u32 src_x, src_y, src_w, src_h, hsub, vsub;
+ bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
+
+ /*
+ * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
+ * abuses hsub/vsub so we can't use them here. But as they
+ * are limited to 32bpp RGB formats we don't actually need
+ * to check anything.
+ */
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
+ return 0;
+
+ /*
+ * Hardware doesn't handle subpixel coordinates.
+ * Adjust to (macro)pixel boundary, but be careful not to
+ * increase the source viewport size, because that could
+ * push the downscaling factor out of bounds.
+ */
+ src_x = src->x1 >> 16;
+ src_w = drm_rect_width(src) >> 16;
+ src_y = src->y1 >> 16;
+ src_h = drm_rect_height(src) >> 16;
+
+ drm_rect_init(src, src_x << 16, src_y << 16,
+ src_w << 16, src_h << 16);
+
+ if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
+ hsub = 2;
+ vsub = 2;
+ } else {
+ hsub = fb->format->hsub;
+ vsub = fb->format->vsub;
+ }
+
+ if (rotated)
+ hsub = vsub = max(hsub, vsub);
+
+ if (src_x % hsub || src_w % hsub) {
+ drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_x, src_w, hsub, str_yes_no(rotated));
+ return -EINVAL;
+ }
+
+ if (src_y % vsub || src_h % vsub) {
+ drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_y, src_h, vsub, str_yes_no(rotated));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void i9xx_plane_linear_gamma(u16 gamma[8])
+{
+ /* The points are not evenly spaced. */
+ static const u8 in[8] = { 0, 1, 2, 4, 8, 16, 24, 32 };
+ int i;
+
+ for (i = 0; i < 8; i++)
+ gamma[i] = (in[i] << 8) / 32;
+}
+
+static void
+chv_sprite_update_csc(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum plane_id plane_id = plane->id;
+ /*
+ * |r| | c0 c1 c2 | |cr|
+ * |g| = | c3 c4 c5 | x |y |
+ * |b| | c6 c7 c8 | |cb|
+ *
+ * Coefficients are s3.12.
+ *
+ * Cb and Cr apparently come in as signed already, and
+ * we always get full range data in on account of CLRC0/1.
+ */
+ static const s16 csc_matrix[][9] = {
+ /* BT.601 full range YCbCr -> full range RGB */
+ [DRM_COLOR_YCBCR_BT601] = {
+ 5743, 4096, 0,
+ -2925, 4096, -1410,
+ 0, 4096, 7258,
+ },
+ /* BT.709 full range YCbCr -> full range RGB */
+ [DRM_COLOR_YCBCR_BT709] = {
+ 6450, 4096, 0,
+ -1917, 4096, -767,
+ 0, 4096, 7601,
+ },
+ };
+ const s16 *csc = csc_matrix[plane_state->hw.color_encoding];
+
+ /* Seems RGB data bypasses the CSC always */
+ if (!fb->format->is_yuv)
+ return;
+
+ intel_de_write_fw(dev_priv, SPCSCYGOFF(plane_id),
+ SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ intel_de_write_fw(dev_priv, SPCSCCBOFF(plane_id),
+ SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ intel_de_write_fw(dev_priv, SPCSCCROFF(plane_id),
+ SPCSC_OOFF(0) | SPCSC_IOFF(0));
+
+ intel_de_write_fw(dev_priv, SPCSCC01(plane_id),
+ SPCSC_C1(csc[1]) | SPCSC_C0(csc[0]));
+ intel_de_write_fw(dev_priv, SPCSCC23(plane_id),
+ SPCSC_C1(csc[3]) | SPCSC_C0(csc[2]));
+ intel_de_write_fw(dev_priv, SPCSCC45(plane_id),
+ SPCSC_C1(csc[5]) | SPCSC_C0(csc[4]));
+ intel_de_write_fw(dev_priv, SPCSCC67(plane_id),
+ SPCSC_C1(csc[7]) | SPCSC_C0(csc[6]));
+ intel_de_write_fw(dev_priv, SPCSCC8(plane_id), SPCSC_C0(csc[8]));
+
+ intel_de_write_fw(dev_priv, SPCSCYGICLAMP(plane_id),
+ SPCSC_IMAX(1023) | SPCSC_IMIN(0));
+ intel_de_write_fw(dev_priv, SPCSCCBICLAMP(plane_id),
+ SPCSC_IMAX(512) | SPCSC_IMIN(-512));
+ intel_de_write_fw(dev_priv, SPCSCCRICLAMP(plane_id),
+ SPCSC_IMAX(512) | SPCSC_IMIN(-512));
+
+ intel_de_write_fw(dev_priv, SPCSCYGOCLAMP(plane_id),
+ SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ intel_de_write_fw(dev_priv, SPCSCCBOCLAMP(plane_id),
+ SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ intel_de_write_fw(dev_priv, SPCSCCROCLAMP(plane_id),
+ SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+}
+
+#define SIN_0 0
+#define COS_0 1
+
+static void
+vlv_sprite_update_clrc(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+ int contrast, brightness, sh_scale, sh_sin, sh_cos;
+
+ if (fb->format->is_yuv &&
+ plane_state->hw.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
+ /*
+ * Expand limited range to full range:
+ * Contrast is applied first and is used to expand Y range.
+ * Brightness is applied second and is used to remove the
+ * offset from Y. Saturation/hue is used to expand CbCr range.
+ */
+ contrast = DIV_ROUND_CLOSEST(255 << 6, 235 - 16);
+ brightness = -DIV_ROUND_CLOSEST(16 * 255, 235 - 16);
+ sh_scale = DIV_ROUND_CLOSEST(128 << 7, 240 - 128);
+ sh_sin = SIN_0 * sh_scale;
+ sh_cos = COS_0 * sh_scale;
+ } else {
+ /* Pass-through everything. */
+ contrast = 1 << 6;
+ brightness = 0;
+ sh_scale = 1 << 7;
+ sh_sin = SIN_0 * sh_scale;
+ sh_cos = COS_0 * sh_scale;
+ }
+
+ /* FIXME these register are single buffered :( */
+ intel_de_write_fw(dev_priv, SPCLRC0(pipe, plane_id),
+ SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
+ intel_de_write_fw(dev_priv, SPCLRC1(pipe, plane_id),
+ SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
+}
+
+static void
+vlv_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ /*
+ * VLV bspec only considers cases where all three planes are
+ * enabled, and cases where the primary and one sprite is enabled.
+ * Let's assume the case with just two sprites enabled also
+ * maps to the latter case.
+ */
+ if (hweight8(active_planes) == 3) {
+ switch (cpp) {
+ case 8:
+ *num = 11;
+ *den = 8;
+ break;
+ case 4:
+ *num = 18;
+ *den = 16;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else if (hweight8(active_planes) == 2) {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ case 4:
+ *num = 17;
+ *den = 16;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ }
+}
+
+int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ vlv_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ u32 sprctl = 0;
+
+ if (crtc_state->gamma_enable)
+ sprctl |= SP_PIPE_GAMMA_ENABLE;
+
+ return sprctl;
+}
+
+static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 sprctl;
+
+ sprctl = SP_ENABLE;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_YUYV:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY;
+ break;
+ case DRM_FORMAT_C8:
+ sprctl |= SP_FORMAT_8BPP;
+ break;
+ case DRM_FORMAT_RGB565:
+ sprctl |= SP_FORMAT_BGR565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ sprctl |= SP_FORMAT_BGRX8888;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ sprctl |= SP_FORMAT_BGRA8888;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ sprctl |= SP_FORMAT_RGBX1010102;
+ break;
+ case DRM_FORMAT_ABGR2101010:
+ sprctl |= SP_FORMAT_RGBA1010102;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ sprctl |= SP_FORMAT_BGRX1010102;
+ break;
+ case DRM_FORMAT_ARGB2101010:
+ sprctl |= SP_FORMAT_BGRA1010102;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ sprctl |= SP_FORMAT_RGBX8888;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ sprctl |= SP_FORMAT_RGBA8888;
+ break;
+ default:
+ MISSING_CASE(fb->format->format);
+ return 0;
+ }
+
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
+ sprctl |= SP_YUV_FORMAT_BT709;
+
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
+ sprctl |= SP_TILED;
+
+ if (rotation & DRM_MODE_ROTATE_180)
+ sprctl |= SP_ROTATE_180;
+
+ if (rotation & DRM_MODE_REFLECT_X)
+ sprctl |= SP_MIRROR;
+
+ if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SP_SOURCE_KEY;
+
+ return sprctl;
+}
+
+static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+ u16 gamma[8];
+ int i;
+
+ /* Seems RGB data bypasses the gamma always */
+ if (!fb->format->is_yuv)
+ return;
+
+ i9xx_plane_linear_gamma(gamma);
+
+ /* FIXME these register are single buffered :( */
+ /* The two end points are implicit (0.0 and 1.0) */
+ for (i = 1; i < 8 - 1; i++)
+ intel_de_write_fw(dev_priv, SPGAMC(pipe, plane_id, i - 1),
+ gamma[i] << 16 | gamma[i] << 8 | gamma[i]);
+}
+
+static void
+vlv_sprite_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
+
+ intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id),
+ plane_state->view.color_plane[0].mapping_stride);
+ intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id),
+ SP_POS_Y(crtc_y) | SP_POS_X(crtc_x));
+ intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id),
+ SP_HEIGHT(crtc_h - 1) | SP_WIDTH(crtc_w - 1));
+}
+
+static void
+vlv_sprite_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
+ u32 x = plane_state->view.color_plane[0].x;
+ u32 y = plane_state->view.color_plane[0].y;
+ u32 sprctl, linear_offset;
+
+ sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state);
+
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
+ chv_sprite_update_csc(plane_state);
+
+ if (key->flags) {
+ intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id),
+ key->min_value);
+ intel_de_write_fw(dev_priv, SPKEYMSK(pipe, plane_id),
+ key->channel_mask);
+ intel_de_write_fw(dev_priv, SPKEYMAXVAL(pipe, plane_id),
+ key->max_value);
+ }
+
+ intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0);
+
+ intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset);
+ intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id),
+ SP_OFFSET_Y(y) | SP_OFFSET_X(x));
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), sprctl);
+ intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id),
+ intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+
+ vlv_sprite_update_clrc(plane_state);
+ vlv_sprite_update_gamma(plane_state);
+}
+
+static void
+vlv_sprite_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+
+ intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), 0);
+}
+
+static bool
+vlv_sprite_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum plane_id plane_id = plane->id;
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = intel_de_read(dev_priv, SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
+
+ *pipe = plane->pipe;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static void ivb_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ if (hweight8(active_planes) == 2) {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ case 4:
+ *num = 17;
+ *den = 16;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else {
+ switch (cpp) {
+ case 8:
+ *num = 9;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ }
+}
+
+static void ivb_plane_ratio_scaling(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ switch (cpp) {
+ case 8:
+ *num = 12;
+ *den = 8;
+ break;
+ case 4:
+ *num = 19;
+ *den = 16;
+ break;
+ case 2:
+ *num = 33;
+ *den = 32;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+}
+
+int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ ivb_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static int ivb_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int src_w, dst_w, pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->uapi.dst);
+
+ if (src_w != dst_w)
+ ivb_plane_ratio_scaling(crtc_state, plane_state, &num, &den);
+ else
+ ivb_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* Horizontal downscaling limits the maximum pixel rate */
+ dst_w = min(src_w, dst_w);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, num * src_w),
+ den * dst_w);
+}
+
+static void hsw_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ if (hweight8(active_planes) == 2) {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else {
+ switch (cpp) {
+ case 8:
+ *num = 9;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ }
+}
+
+int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate = crtc_state->pixel_rate;
+ unsigned int num, den;
+
+ hsw_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ u32 sprctl = 0;
+
+ if (crtc_state->gamma_enable)
+ sprctl |= SPRITE_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ sprctl |= SPRITE_PIPE_CSC_ENABLE;
+
+ return sprctl;
+}
+
+static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ return fb->format->cpp[0] == 8 &&
+ (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv));
+}
+
+static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 sprctl;
+
+ sprctl = SPRITE_ENABLE;
+
+ if (IS_IVYBRIDGE(dev_priv))
+ sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XBGR8888:
+ sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ sprctl |= SPRITE_FORMAT_RGBX888;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ sprctl |= SPRITE_FORMAT_RGBX101010 | SPRITE_RGB_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ sprctl |= SPRITE_FORMAT_RGBX101010;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ sprctl |= SPRITE_FORMAT_RGBX161616 | SPRITE_RGB_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB16161616F:
+ sprctl |= SPRITE_FORMAT_RGBX161616;
+ break;
+ case DRM_FORMAT_YUYV:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
+ break;
+ default:
+ MISSING_CASE(fb->format->format);
+ return 0;
+ }
+
+ if (!ivb_need_sprite_gamma(plane_state))
+ sprctl |= SPRITE_PLANE_GAMMA_DISABLE;
+
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
+ sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709;
+
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ sprctl |= SPRITE_YUV_RANGE_CORRECTION_DISABLE;
+
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
+ sprctl |= SPRITE_TILED;
+
+ if (rotation & DRM_MODE_ROTATE_180)
+ sprctl |= SPRITE_ROTATE_180;
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ sprctl |= SPRITE_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SPRITE_SOURCE_KEY;
+
+ return sprctl;
+}
+
+static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state,
+ u16 gamma[18])
+{
+ int scale, i;
+
+ /*
+ * WaFP16GammaEnabling:ivb,hsw
+ * "Workaround : When using the 64-bit format, the sprite output
+ * on each color channel has one quarter amplitude. It can be
+ * brought up to full amplitude by using sprite internal gamma
+ * correction, pipe gamma correction, or pipe color space
+ * conversion to multiply the sprite output by four."
+ */
+ scale = 4;
+
+ for (i = 0; i < 16; i++)
+ gamma[i] = min((scale * i << 10) / 16, (1 << 10) - 1);
+
+ gamma[i] = min((scale * i << 10) / 16, 1 << 10);
+ i++;
+
+ gamma[i] = 3 << 10;
+ i++;
+}
+
+static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ u16 gamma[18];
+ int i;
+
+ if (!ivb_need_sprite_gamma(plane_state))
+ return;
+
+ ivb_sprite_linear_gamma(plane_state, gamma);
+
+ /* FIXME these register are single buffered :( */
+ for (i = 0; i < 16; i++)
+ intel_de_write_fw(dev_priv, SPRGAMC(pipe, i),
+ gamma[i] << 20 | gamma[i] << 10 | gamma[i]);
+
+ intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 0), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 1), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 2), gamma[i]);
+ i++;
+
+ intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 0), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 1), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 2), gamma[i]);
+ i++;
+}
+
+static void
+ivb_sprite_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
+ u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ u32 sprscale = 0;
+
+ if (crtc_w != src_w || crtc_h != src_h)
+ sprscale = SPRITE_SCALE_ENABLE |
+ SPRITE_SRC_WIDTH(src_w - 1) |
+ SPRITE_SRC_HEIGHT(src_h - 1);
+
+ intel_de_write_fw(dev_priv, SPRSTRIDE(pipe),
+ plane_state->view.color_plane[0].mapping_stride);
+ intel_de_write_fw(dev_priv, SPRPOS(pipe),
+ SPRITE_POS_Y(crtc_y) | SPRITE_POS_X(crtc_x));
+ intel_de_write_fw(dev_priv, SPRSIZE(pipe),
+ SPRITE_HEIGHT(crtc_h - 1) | SPRITE_WIDTH(crtc_w - 1));
+ if (IS_IVYBRIDGE(dev_priv))
+ intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale);
+}
+
+static void
+ivb_sprite_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
+ u32 x = plane_state->view.color_plane[0].x;
+ u32 y = plane_state->view.color_plane[0].y;
+ u32 sprctl, linear_offset;
+
+ sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state);
+
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+ if (key->flags) {
+ intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value);
+ intel_de_write_fw(dev_priv, SPRKEYMSK(pipe),
+ key->channel_mask);
+ intel_de_write_fw(dev_priv, SPRKEYMAX(pipe), key->max_value);
+ }
+
+ /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
+ * register */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ intel_de_write_fw(dev_priv, SPROFFSET(pipe),
+ SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x));
+ } else {
+ intel_de_write_fw(dev_priv, SPRLINOFF(pipe), linear_offset);
+ intel_de_write_fw(dev_priv, SPRTILEOFF(pipe),
+ SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x));
+ }
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ intel_de_write_fw(dev_priv, SPRCTL(pipe), sprctl);
+ intel_de_write_fw(dev_priv, SPRSURF(pipe),
+ intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+
+ ivb_sprite_update_gamma(plane_state);
+}
+
+static void
+ivb_sprite_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ intel_de_write_fw(dev_priv, SPRCTL(pipe), 0);
+ /* Disable the scaler */
+ if (IS_IVYBRIDGE(dev_priv))
+ intel_de_write_fw(dev_priv, SPRSCALE(pipe), 0);
+ intel_de_write_fw(dev_priv, SPRSURF(pipe), 0);
+}
+
+static bool
+ivb_sprite_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = intel_de_read(dev_priv, SPRCTL(plane->pipe)) & SPRITE_ENABLE;
+
+ *pipe = plane->pipe;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int hscale, pixel_rate;
+ unsigned int limit, decimate;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ /* Horizontal downscaling limits the maximum pixel rate */
+ hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
+ 0, INT_MAX);
+ hscale = max(hscale, 0x10000u);
+
+ /* Decimation steps at 2x,4x,8x,16x */
+ decimate = ilog2(hscale >> 16);
+ hscale >>= decimate;
+
+ /* Starting limit is 90% of cdclk */
+ limit = 9;
+
+ /* -10% per decimation step */
+ limit -= decimate;
+
+ /* -10% for RGB */
+ if (!fb->format->is_yuv)
+ limit--;
+
+ /*
+ * We should also do -10% if sprite scaling is enabled
+ * on the other pipe, but we can't really check for that,
+ * so we ignore it.
+ */
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, 10 * hscale),
+ limit << 16);
+}
+
+static unsigned int
+g4x_sprite_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
+ if (modifier == I915_FORMAT_MOD_X_TILED)
+ return min(4096 * cpp, 16 * 1024);
+ else
+ return 16 * 1024;
+}
+
+static unsigned int
+hsw_sprite_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+
+ /* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
+ return min(8192 * cpp, 16 * 1024);
+}
+
+static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ u32 dvscntr = 0;
+
+ if (crtc_state->gamma_enable)
+ dvscntr |= DVS_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ dvscntr |= DVS_PIPE_CSC_ENABLE;
+
+ return dvscntr;
+}
+
+static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 dvscntr;
+
+ dvscntr = DVS_ENABLE;
+
+ if (IS_SANDYBRIDGE(dev_priv))
+ dvscntr |= DVS_TRICKLE_FEED_DISABLE;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XBGR8888:
+ dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dvscntr |= DVS_FORMAT_RGBX888;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ dvscntr |= DVS_FORMAT_RGBX101010 | DVS_RGB_ORDER_XBGR;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ dvscntr |= DVS_FORMAT_RGBX101010;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ dvscntr |= DVS_FORMAT_RGBX161616 | DVS_RGB_ORDER_XBGR;
+ break;
+ case DRM_FORMAT_XRGB16161616F:
+ dvscntr |= DVS_FORMAT_RGBX161616;
+ break;
+ case DRM_FORMAT_YUYV:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
+ break;
+ default:
+ MISSING_CASE(fb->format->format);
+ return 0;
+ }
+
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
+ dvscntr |= DVS_YUV_FORMAT_BT709;
+
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ dvscntr |= DVS_YUV_RANGE_CORRECTION_DISABLE;
+
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
+ dvscntr |= DVS_TILED;
+
+ if (rotation & DRM_MODE_ROTATE_180)
+ dvscntr |= DVS_ROTATE_180;
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ dvscntr |= DVS_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ dvscntr |= DVS_SOURCE_KEY;
+
+ return dvscntr;
+}
+
+static void g4x_sprite_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum pipe pipe = plane->pipe;
+ u16 gamma[8];
+ int i;
+
+ /* Seems RGB data bypasses the gamma always */
+ if (!fb->format->is_yuv)
+ return;
+
+ i9xx_plane_linear_gamma(gamma);
+
+ /* FIXME these register are single buffered :( */
+ /* The two end points are implicit (0.0 and 1.0) */
+ for (i = 1; i < 8 - 1; i++)
+ intel_de_write_fw(dev_priv, DVSGAMC_G4X(pipe, i - 1),
+ gamma[i] << 16 | gamma[i] << 8 | gamma[i]);
+}
+
+static void ilk_sprite_linear_gamma(u16 gamma[17])
+{
+ int i;
+
+ for (i = 0; i < 17; i++)
+ gamma[i] = (i << 10) / 16;
+}
+
+static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum pipe pipe = plane->pipe;
+ u16 gamma[17];
+ int i;
+
+ /* Seems RGB data bypasses the gamma always */
+ if (!fb->format->is_yuv)
+ return;
+
+ ilk_sprite_linear_gamma(gamma);
+
+ /* FIXME these register are single buffered :( */
+ for (i = 0; i < 16; i++)
+ intel_de_write_fw(dev_priv, DVSGAMC_ILK(pipe, i),
+ gamma[i] << 20 | gamma[i] << 10 | gamma[i]);
+
+ intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
+ intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
+ intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
+ i++;
+}
+
+static void
+g4x_sprite_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
+ u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ u32 dvsscale = 0;
+
+ if (crtc_w != src_w || crtc_h != src_h)
+ dvsscale = DVS_SCALE_ENABLE |
+ DVS_SRC_WIDTH(src_w - 1) |
+ DVS_SRC_HEIGHT(src_h - 1);
+
+ intel_de_write_fw(dev_priv, DVSSTRIDE(pipe),
+ plane_state->view.color_plane[0].mapping_stride);
+ intel_de_write_fw(dev_priv, DVSPOS(pipe),
+ DVS_POS_Y(crtc_y) | DVS_POS_X(crtc_x));
+ intel_de_write_fw(dev_priv, DVSSIZE(pipe),
+ DVS_HEIGHT(crtc_h - 1) | DVS_WIDTH(crtc_w - 1));
+ intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale);
+}
+
+static void
+g4x_sprite_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 dvssurf_offset = plane_state->view.color_plane[0].offset;
+ u32 x = plane_state->view.color_plane[0].x;
+ u32 y = plane_state->view.color_plane[0].y;
+ u32 dvscntr, linear_offset;
+
+ dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state);
+
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+ if (key->flags) {
+ intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value);
+ intel_de_write_fw(dev_priv, DVSKEYMSK(pipe),
+ key->channel_mask);
+ intel_de_write_fw(dev_priv, DVSKEYMAX(pipe), key->max_value);
+ }
+
+ intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset);
+ intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), (y << 16) | x);
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ intel_de_write_fw(dev_priv, DVSCNTR(pipe), dvscntr);
+ intel_de_write_fw(dev_priv, DVSSURF(pipe),
+ intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
+
+ if (IS_G4X(dev_priv))
+ g4x_sprite_update_gamma(plane_state);
+ else
+ ilk_sprite_update_gamma(plane_state);
+}
+
+static void
+g4x_sprite_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ intel_de_write_fw(dev_priv, DVSCNTR(pipe), 0);
+ /* Disable the scaler */
+ intel_de_write_fw(dev_priv, DVSSCALE(pipe), 0);
+ intel_de_write_fw(dev_priv, DVSSURF(pipe), 0);
+}
+
+static bool
+g4x_sprite_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = intel_de_read(dev_priv, DVSCNTR(plane->pipe)) & DVS_ENABLE;
+
+ *pipe = plane->pipe;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static bool g4x_fb_scalable(const struct drm_framebuffer *fb)
+{
+ if (!fb)
+ return false;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static int
+g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const struct drm_rect *src = &plane_state->uapi.src;
+ const struct drm_rect *dst = &plane_state->uapi.dst;
+ int src_x, src_w, src_h, crtc_w, crtc_h;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ unsigned int stride = plane_state->view.color_plane[0].mapping_stride;
+ unsigned int cpp = fb->format->cpp[0];
+ unsigned int width_bytes;
+ int min_width, min_height;
+
+ crtc_w = drm_rect_width(dst);
+ crtc_h = drm_rect_height(dst);
+
+ src_x = src->x1 >> 16;
+ src_w = drm_rect_width(src) >> 16;
+ src_h = drm_rect_height(src) >> 16;
+
+ if (src_w == crtc_w && src_h == crtc_h)
+ return 0;
+
+ min_width = 3;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (src_h & 1) {
+ drm_dbg_kms(&i915->drm, "Source height must be even with interlaced modes\n");
+ return -EINVAL;
+ }
+ min_height = 6;
+ } else {
+ min_height = 3;
+ }
+
+ width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
+
+ if (src_w < min_width || src_h < min_height ||
+ src_w > 2048 || src_h > 2048) {
+ drm_dbg_kms(&i915->drm, "Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n",
+ src_w, src_h, min_width, min_height, 2048, 2048);
+ return -EINVAL;
+ }
+
+ if (width_bytes > 4096) {
+ drm_dbg_kms(&i915->drm, "Fetch width (%d) exceeds hardware max with scaling (%u)\n",
+ width_bytes, 4096);
+ return -EINVAL;
+ }
+
+ if (stride > 4096) {
+ drm_dbg_kms(&i915->drm, "Stride (%u) exceeds hardware max with scaling (%u)\n",
+ stride, 4096);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+g4x_sprite_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ int min_scale = DRM_PLANE_NO_SCALING;
+ int max_scale = DRM_PLANE_NO_SCALING;
+ int ret;
+
+ if (g4x_fb_scalable(plane_state->hw.fb)) {
+ if (DISPLAY_VER(dev_priv) < 7) {
+ min_scale = 1;
+ max_scale = 16 << 16;
+ } else if (IS_IVYBRIDGE(dev_priv)) {
+ min_scale = 1;
+ max_scale = 2 << 16;
+ }
+ }
+
+ ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+ min_scale, max_scale, true);
+ if (ret)
+ return ret;
+
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ ret = g4x_sprite_check_scaling(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ if (DISPLAY_VER(dev_priv) >= 7)
+ plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state);
+ else
+ plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ unsigned int rotation = plane_state->hw.rotation;
+
+ /* CHV ignores the mirror bit when the rotate bit is set :( */
+ if (IS_CHERRYVIEW(dev_priv) &&
+ rotation & DRM_MODE_ROTATE_180 &&
+ rotation & DRM_MODE_REFLECT_X) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Cannot rotate and reflect at the same time\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+vlv_sprite_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ int ret;
+
+ ret = chv_plane_check_rotation(plane_state);
+ if (ret)
+ return ret;
+
+ ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true);
+ if (ret)
+ return ret;
+
+ ret = i9xx_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ plane_state->ctl = vlv_sprite_ctl(crtc_state, plane_state);
+
+ return 0;
+}
+
+static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
+{
+ return DISPLAY_VER(dev_priv) >= 9;
+}
+
+static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
+ const struct drm_intel_sprite_colorkey *set)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+
+ *key = *set;
+
+ /*
+ * We want src key enabled on the
+ * sprite and not on the primary.
+ */
+ if (plane->id == PLANE_PRIMARY &&
+ set->flags & I915_SET_COLORKEY_SOURCE)
+ key->flags = 0;
+
+ /*
+ * On SKL+ we want dst key enabled on
+ * the primary and not on the sprite.
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ key->flags = 0;
+}
+
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_intel_sprite_colorkey *set = data;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret = 0;
+
+ /* ignore the pointless "none" flag */
+ set->flags &= ~I915_SET_COLORKEY_NONE;
+
+ if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
+ /* Make sure we don't try to enable both src & dest simultaneously */
+ if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
+ plane = drm_plane_find(dev, file_priv, set->plane_id);
+ if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return -ENOENT;
+
+ /*
+ * SKL+ only plane 2 can do destination keying against plane 1.
+ * Also multiple planes can't do destination keying on the same
+ * pipe simultaneously.
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 &&
+ to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
+ set->flags & I915_SET_COLORKEY_DESTINATION)
+ return -EINVAL;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ state->acquire_ctx = &ctx;
+
+ while (1) {
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (!ret)
+ intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
+
+ /*
+ * On some platforms we have to configure
+ * the dst colorkey on the primary plane.
+ */
+ if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
+ struct intel_crtc *crtc =
+ intel_crtc_for_pipe(dev_priv,
+ to_intel_plane(plane)->pipe);
+
+ plane_state = drm_atomic_get_plane_state(state,
+ crtc->base.primary);
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (!ret)
+ intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
+ }
+
+ if (!ret)
+ ret = drm_atomic_commit(state);
+
+ if (ret != -EDEADLK)
+ break;
+
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ }
+
+ drm_atomic_state_put(state);
+out:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ return ret;
+}
+
+static const u32 g4x_sprite_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+static const u32 snb_sprite_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+static const u32 vlv_sprite_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+static const u32 chv_pipe_b_sprite_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
+ return false;
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED)
+ return true;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
+static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
+ return false;
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED)
+ return true;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
+static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier))
+ return false;
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED)
+ return true;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
+static const struct drm_plane_funcs g4x_sprite_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = g4x_sprite_format_mod_supported,
+};
+
+static const struct drm_plane_funcs snb_sprite_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = snb_sprite_format_mod_supported,
+};
+
+static const struct drm_plane_funcs vlv_sprite_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = vlv_sprite_format_mod_supported,
+};
+
+struct intel_plane *
+intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int sprite)
+{
+ struct intel_plane *plane;
+ const struct drm_plane_funcs *plane_funcs;
+ unsigned int supported_rotations;
+ const u64 *modifiers;
+ const u32 *formats;
+ int num_formats;
+ int ret, zpos;
+
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ plane->update_noarm = vlv_sprite_update_noarm;
+ plane->update_arm = vlv_sprite_update_arm;
+ plane->disable_arm = vlv_sprite_disable_arm;
+ plane->get_hw_state = vlv_sprite_get_hw_state;
+ plane->check_plane = vlv_sprite_check;
+ plane->max_stride = i965_plane_max_stride;
+ plane->min_cdclk = vlv_plane_min_cdclk;
+
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ formats = chv_pipe_b_sprite_formats;
+ num_formats = ARRAY_SIZE(chv_pipe_b_sprite_formats);
+ } else {
+ formats = vlv_sprite_formats;
+ num_formats = ARRAY_SIZE(vlv_sprite_formats);
+ }
+
+ plane_funcs = &vlv_sprite_funcs;
+ } else if (DISPLAY_VER(dev_priv) >= 7) {
+ plane->update_noarm = ivb_sprite_update_noarm;
+ plane->update_arm = ivb_sprite_update_arm;
+ plane->disable_arm = ivb_sprite_disable_arm;
+ plane->get_hw_state = ivb_sprite_get_hw_state;
+ plane->check_plane = g4x_sprite_check;
+
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ plane->max_stride = hsw_sprite_max_stride;
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ } else {
+ plane->max_stride = g4x_sprite_max_stride;
+ plane->min_cdclk = ivb_sprite_min_cdclk;
+ }
+
+ formats = snb_sprite_formats;
+ num_formats = ARRAY_SIZE(snb_sprite_formats);
+
+ plane_funcs = &snb_sprite_funcs;
+ } else {
+ plane->update_noarm = g4x_sprite_update_noarm;
+ plane->update_arm = g4x_sprite_update_arm;
+ plane->disable_arm = g4x_sprite_disable_arm;
+ plane->get_hw_state = g4x_sprite_get_hw_state;
+ plane->check_plane = g4x_sprite_check;
+ plane->max_stride = g4x_sprite_max_stride;
+ plane->min_cdclk = g4x_sprite_min_cdclk;
+
+ if (IS_SANDYBRIDGE(dev_priv)) {
+ formats = snb_sprite_formats;
+ num_formats = ARRAY_SIZE(snb_sprite_formats);
+
+ plane_funcs = &snb_sprite_funcs;
+ } else {
+ formats = g4x_sprite_formats;
+ num_formats = ARRAY_SIZE(g4x_sprite_formats);
+
+ plane_funcs = &g4x_sprite_funcs;
+ }
+ }
+
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X;
+ } else {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
+ }
+
+ plane->pipe = pipe;
+ plane->id = PLANE_SPRITE0 + sprite;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
+
+ modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X);
+
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ 0, plane_funcs,
+ formats, num_formats, modifiers,
+ DRM_PLANE_TYPE_OVERLAY,
+ "sprite %c", sprite_name(pipe, sprite));
+ kfree(modifiers);
+
+ if (ret)
+ goto fail;
+
+ drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+
+ drm_plane_create_color_properties(&plane->base,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+ zpos = sprite + 1;
+ drm_plane_create_zpos_immutable_property(&plane->base, zpos);
+
+ intel_plane_helper_add(plane);
+
+ return plane;
+
+fail:
+ intel_plane_free(plane);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
new file mode 100644
index 000000000..4f63e4967
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_SPRITE_H__
+#define __INTEL_SPRITE_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+
+struct drm_device;
+struct drm_display_mode;
+struct drm_file;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+/*
+ * FIXME: We should instead only take spinlocks once for the entire update
+ * instead of once per mmio.
+ */
+#if IS_ENABLED(CONFIG_PROVE_LOCKING)
+#define VBLANK_EVASION_TIME_US 250
+#else
+#define VBLANK_EVASION_TIME_US 100
+#endif
+
+struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int plane);
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
+int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
+
+static inline u8 icl_hdr_plane_mask(void)
+{
+ return BIT(PLANE_PRIMARY) |
+ BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
+}
+
+int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+
+#endif /* __INTEL_SPRITE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
new file mode 100644
index 000000000..bda77828d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -0,0 +1,1007 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_de.h"
+#include "intel_display.h"
+#include "intel_display_power_map.h"
+#include "intel_display_types.h"
+#include "intel_dp_mst.h"
+#include "intel_tc.h"
+#include "intel_tc_phy_regs.h"
+
+static const char *tc_port_mode_name(enum tc_port_mode mode)
+{
+ static const char * const names[] = {
+ [TC_PORT_DISCONNECTED] = "disconnected",
+ [TC_PORT_TBT_ALT] = "tbt-alt",
+ [TC_PORT_DP_ALT] = "dp-alt",
+ [TC_PORT_LEGACY] = "legacy",
+ };
+
+ if (WARN_ON(mode >= ARRAY_SIZE(names)))
+ mode = TC_PORT_DISCONNECTED;
+
+ return names[mode];
+}
+
+static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
+ enum tc_port_mode mode)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+
+ return intel_phy_is_tc(i915, phy) && dig_port->tc_mode == mode;
+}
+
+bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
+{
+ return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
+}
+
+bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
+{
+ return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
+}
+
+bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
+{
+ return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
+}
+
+bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) ||
+ IS_ALDERLAKE_P(i915);
+}
+
+static enum intel_display_power_domain
+tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port))
+ return POWER_DOMAIN_TC_COLD_OFF;
+
+ return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
+}
+
+static intel_wakeref_t
+tc_cold_block_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode,
+ enum intel_display_power_domain *domain)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ *domain = tc_cold_get_power_domain(dig_port, mode);
+
+ return intel_display_power_get(i915, *domain);
+}
+
+static intel_wakeref_t
+tc_cold_block(struct intel_digital_port *dig_port, enum intel_display_power_domain *domain)
+{
+ return tc_cold_block_in_mode(dig_port, dig_port->tc_mode, domain);
+}
+
+static void
+tc_cold_unblock(struct intel_digital_port *dig_port, enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ /*
+ * wakeref == -1, means some error happened saving save_depot_stack but
+ * power should still be put down and 0 is a invalid save_depot_stack
+ * id so can be used to skip it for non TC legacy ports.
+ */
+ if (wakeref == 0)
+ return;
+
+ intel_display_power_put(i915, domain, wakeref);
+}
+
+static void
+assert_tc_cold_blocked(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ bool enabled;
+
+ enabled = intel_display_power_is_enabled(i915,
+ tc_cold_get_power_domain(dig_port,
+ dig_port->tc_mode));
+ drm_WARN_ON(&i915->drm, !enabled);
+}
+
+static enum intel_display_power_domain
+tc_port_power_domain(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+
+ return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
+}
+
+static void
+assert_tc_port_power_enabled(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ drm_WARN_ON(&i915->drm,
+ !intel_display_power_is_enabled(i915, tc_port_power_domain(dig_port)));
+}
+
+u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 lane_mask;
+
+ lane_mask = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
+
+ drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
+ assert_tc_cold_blocked(dig_port);
+
+ lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
+ return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
+}
+
+u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 pin_mask;
+
+ pin_mask = intel_uncore_read(uncore,
+ PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
+
+ drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
+ assert_tc_cold_blocked(dig_port);
+
+ return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
+ DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
+}
+
+int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ intel_wakeref_t wakeref;
+ u32 lane_mask;
+
+ if (dig_port->tc_mode != TC_PORT_DP_ALT)
+ return 4;
+
+ assert_tc_cold_blocked(dig_port);
+
+ lane_mask = 0;
+ with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ lane_mask = intel_tc_port_get_lane_mask(dig_port);
+
+ switch (lane_mask) {
+ default:
+ MISSING_CASE(lane_mask);
+ fallthrough;
+ case 0x1:
+ case 0x2:
+ case 0x4:
+ case 0x8:
+ return 1;
+ case 0x3:
+ case 0xc:
+ return 2;
+ case 0xf:
+ return 4;
+ }
+}
+
+void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ drm_WARN_ON(&i915->drm,
+ lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
+
+ assert_tc_cold_blocked(dig_port);
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
+ val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
+
+ switch (required_lanes) {
+ case 1:
+ val |= lane_reversal ?
+ DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
+ DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
+ break;
+ case 2:
+ val |= lane_reversal ?
+ DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
+ DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
+ break;
+ case 4:
+ val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
+ break;
+ default:
+ MISSING_CASE(required_lanes);
+ }
+
+ intel_uncore_write(uncore,
+ PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
+}
+
+static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
+ u32 live_status_mask)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ u32 valid_hpd_mask;
+
+ if (dig_port->tc_legacy_port)
+ valid_hpd_mask = BIT(TC_PORT_LEGACY);
+ else
+ valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
+ BIT(TC_PORT_TBT_ALT);
+
+ if (!(live_status_mask & ~valid_hpd_mask))
+ return;
+
+ /* If live status mismatches the VBT flag, trust the live status. */
+ drm_dbg_kms(&i915->drm,
+ "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
+ dig_port->tc_port_name, live_status_mask, valid_hpd_mask);
+
+ dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
+}
+
+static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
+ u32 mask = 0;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
+
+ if (val == 0xffffffff) {
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, nothing connected\n",
+ dig_port->tc_port_name);
+ return mask;
+ }
+
+ if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
+ mask |= BIT(TC_PORT_TBT_ALT);
+ if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
+ mask |= BIT(TC_PORT_DP_ALT);
+
+ if (intel_uncore_read(uncore, SDEISR) & isr_bit)
+ mask |= BIT(TC_PORT_LEGACY);
+
+ /* The sink can be connected only in a single mode. */
+ if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1))
+ tc_port_fixup_legacy_flag(dig_port, mask);
+
+ return mask;
+}
+
+static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val, mask = 0;
+
+ /*
+ * On ADL-P HW/FW will wake from TCCOLD to complete the read access of
+ * registers in IOM. Note that this doesn't apply to PHY and FIA
+ * registers.
+ */
+ val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
+ if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT)
+ mask |= BIT(TC_PORT_DP_ALT);
+ if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT)
+ mask |= BIT(TC_PORT_TBT_ALT);
+
+ if (intel_uncore_read(uncore, SDEISR) & isr_bit)
+ mask |= BIT(TC_PORT_LEGACY);
+
+ /* The sink can be connected only in a single mode. */
+ if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
+ tc_port_fixup_legacy_flag(dig_port, mask);
+
+ return mask;
+}
+
+static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (IS_ALDERLAKE_P(i915))
+ return adl_tc_port_live_status_mask(dig_port);
+
+ return icl_tc_port_live_status_mask(dig_port);
+}
+
+/*
+ * Return the PHY status complete flag indicating that display can acquire the
+ * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
+ * is connected and it's ready to switch the ownership to display. The flag
+ * will be left cleared when a TBT-alt sink is connected, where the PHY is
+ * owned by the TBT subsystem and so switching the ownership to display is not
+ * required.
+ */
+static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
+ if (val == 0xffffffff) {
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, assuming not complete\n",
+ dig_port->tc_port_name);
+ return false;
+ }
+
+ return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
+}
+
+/*
+ * Return the PHY status complete flag indicating that display can acquire the
+ * PHY ownership. The IOM firmware sets this flag when it's ready to switch
+ * the ownership to display, regardless of what sink is connected (TBT-alt,
+ * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
+ * subsystem and so switching the ownership to display is not required.
+ */
+static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
+ if (val == 0xffffffff) {
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, assuming not complete\n",
+ dig_port->tc_port_name);
+ return false;
+ }
+
+ return val & TCSS_DDI_STATUS_READY;
+}
+
+static bool tc_phy_status_complete(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (IS_ALDERLAKE_P(i915))
+ return adl_tc_phy_status_complete(dig_port);
+
+ return icl_tc_phy_status_complete(dig_port);
+}
+
+static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
+ bool take)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
+ if (val == 0xffffffff) {
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, can't %s ownership\n",
+ dig_port->tc_port_name, take ? "take" : "release");
+
+ return false;
+ }
+
+ val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
+ if (take)
+ val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
+
+ intel_uncore_write(uncore,
+ PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
+
+ return true;
+}
+
+static bool adl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
+ bool take)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ enum port port = dig_port->base.port;
+ u32 val;
+
+ val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
+ if (take)
+ val |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+ else
+ val &= ~DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+ intel_uncore_write(uncore, DDI_BUF_CTL(port), val);
+
+ return true;
+}
+
+static bool tc_phy_take_ownership(struct intel_digital_port *dig_port, bool take)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (IS_ALDERLAKE_P(i915))
+ return adl_tc_phy_take_ownership(dig_port, take);
+
+ return icl_tc_phy_take_ownership(dig_port, take);
+}
+
+static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
+ if (val == 0xffffffff) {
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, assume not owned\n",
+ dig_port->tc_port_name);
+ return false;
+ }
+
+ return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
+}
+
+static bool adl_tc_phy_is_owned(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ enum port port = dig_port->base.port;
+ u32 val;
+
+ val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
+ return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+}
+
+static bool tc_phy_is_owned(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (IS_ALDERLAKE_P(i915))
+ return adl_tc_phy_is_owned(dig_port);
+
+ return icl_tc_phy_is_owned(dig_port);
+}
+
+/*
+ * This function implements the first part of the Connect Flow described by our
+ * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
+ * lanes, EDID, etc) is done as needed in the typical places.
+ *
+ * Unlike the other ports, type-C ports are not available to use as soon as we
+ * get a hotplug. The type-C PHYs can be shared between multiple controllers:
+ * display, USB, etc. As a result, handshaking through FIA is required around
+ * connect and disconnect to cleanly transfer ownership with the controller and
+ * set the type-C power state.
+ */
+static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ u32 live_status_mask;
+ int max_lanes;
+
+ if (!tc_phy_status_complete(dig_port)) {
+ drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
+ dig_port->tc_port_name);
+ goto out_set_tbt_alt_mode;
+ }
+
+ live_status_mask = tc_port_live_status_mask(dig_port);
+ if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY))) &&
+ !dig_port->tc_legacy_port) {
+ drm_dbg_kms(&i915->drm, "Port %s: PHY ownership not required (live status %02x)\n",
+ dig_port->tc_port_name, live_status_mask);
+ goto out_set_tbt_alt_mode;
+ }
+
+ if (!tc_phy_take_ownership(dig_port, true) &&
+ !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
+ goto out_set_tbt_alt_mode;
+
+ max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
+ if (dig_port->tc_legacy_port) {
+ drm_WARN_ON(&i915->drm, max_lanes != 4);
+ dig_port->tc_mode = TC_PORT_LEGACY;
+
+ return;
+ }
+
+ /*
+ * Now we have to re-check the live state, in case the port recently
+ * became disconnected. Not necessary for legacy mode.
+ */
+ if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
+ drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
+ dig_port->tc_port_name);
+ goto out_release_phy;
+ }
+
+ if (max_lanes < required_lanes) {
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY max lanes %d < required lanes %d\n",
+ dig_port->tc_port_name,
+ max_lanes, required_lanes);
+ goto out_release_phy;
+ }
+
+ dig_port->tc_mode = TC_PORT_DP_ALT;
+
+ return;
+
+out_release_phy:
+ tc_phy_take_ownership(dig_port, false);
+out_set_tbt_alt_mode:
+ dig_port->tc_mode = TC_PORT_TBT_ALT;
+}
+
+/*
+ * See the comment at the connect function. This implements the Disconnect
+ * Flow.
+ */
+static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
+{
+ switch (dig_port->tc_mode) {
+ case TC_PORT_LEGACY:
+ case TC_PORT_DP_ALT:
+ tc_phy_take_ownership(dig_port, false);
+ fallthrough;
+ case TC_PORT_TBT_ALT:
+ dig_port->tc_mode = TC_PORT_DISCONNECTED;
+ fallthrough;
+ case TC_PORT_DISCONNECTED:
+ break;
+ default:
+ MISSING_CASE(dig_port->tc_mode);
+ }
+}
+
+static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ if (!tc_phy_status_complete(dig_port)) {
+ drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
+ dig_port->tc_port_name);
+ return dig_port->tc_mode == TC_PORT_TBT_ALT;
+ }
+
+ /* On ADL-P the PHY complete flag is set in TBT mode as well. */
+ if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT)
+ return true;
+
+ if (!tc_phy_is_owned(dig_port)) {
+ drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n",
+ dig_port->tc_port_name);
+
+ return false;
+ }
+
+ return dig_port->tc_mode == TC_PORT_DP_ALT ||
+ dig_port->tc_mode == TC_PORT_LEGACY;
+}
+
+static enum tc_port_mode
+intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ u32 live_status_mask = tc_port_live_status_mask(dig_port);
+ enum tc_port_mode mode;
+
+ if (!tc_phy_is_owned(dig_port) ||
+ drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port)))
+ return TC_PORT_TBT_ALT;
+
+ mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
+ if (live_status_mask) {
+ enum tc_port_mode live_mode = fls(live_status_mask) - 1;
+
+ if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
+ mode = live_mode;
+ }
+
+ return mode;
+}
+
+static enum tc_port_mode
+intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
+{
+ u32 live_status_mask = tc_port_live_status_mask(dig_port);
+
+ if (live_status_mask)
+ return fls(live_status_mask) - 1;
+
+ return TC_PORT_TBT_ALT;
+}
+
+static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
+ int required_lanes, bool force_disconnect)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port_mode old_tc_mode = dig_port->tc_mode;
+
+ intel_display_power_flush_work(i915);
+ if (!intel_tc_cold_requires_aux_pw(dig_port)) {
+ enum intel_display_power_domain aux_domain;
+ bool aux_powered;
+
+ aux_domain = intel_aux_power_domain(dig_port);
+ aux_powered = intel_display_power_is_enabled(i915, aux_domain);
+ drm_WARN_ON(&i915->drm, aux_powered);
+ }
+
+ icl_tc_phy_disconnect(dig_port);
+ if (!force_disconnect)
+ icl_tc_phy_connect(dig_port, required_lanes);
+
+ drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(old_tc_mode),
+ tc_port_mode_name(dig_port->tc_mode));
+}
+
+static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
+{
+ return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
+}
+
+static void intel_tc_port_update_mode(struct intel_digital_port *dig_port,
+ int required_lanes, bool force_disconnect)
+{
+ enum intel_display_power_domain domain;
+ intel_wakeref_t wref;
+ bool needs_reset = force_disconnect;
+
+ if (!needs_reset) {
+ /* Get power domain required to check the hotplug live status. */
+ wref = tc_cold_block(dig_port, &domain);
+ needs_reset = intel_tc_port_needs_reset(dig_port);
+ tc_cold_unblock(dig_port, domain, wref);
+ }
+
+ if (!needs_reset)
+ return;
+
+ /* Get power domain required for resetting the mode. */
+ wref = tc_cold_block_in_mode(dig_port, TC_PORT_DISCONNECTED, &domain);
+
+ intel_tc_port_reset_mode(dig_port, required_lanes, force_disconnect);
+
+ /* Get power domain matching the new mode after reset. */
+ tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
+ fetch_and_zero(&dig_port->tc_lock_wakeref));
+ if (dig_port->tc_mode != TC_PORT_DISCONNECTED)
+ dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
+ &dig_port->tc_lock_power_domain);
+
+ tc_cold_unblock(dig_port, domain, wref);
+}
+
+static void __intel_tc_port_get_link(struct intel_digital_port *dig_port)
+{
+ dig_port->tc_link_refcount++;
+}
+
+static void __intel_tc_port_put_link(struct intel_digital_port *dig_port)
+{
+ dig_port->tc_link_refcount--;
+}
+
+static bool tc_port_is_enabled(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ assert_tc_port_power_enabled(dig_port);
+
+ return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
+ DDI_BUF_CTL_ENABLE;
+}
+
+/**
+ * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
+ * @dig_port: digital port
+ *
+ * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
+ * will be locked until intel_tc_port_sanitize_mode() is called.
+ */
+void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ intel_wakeref_t tc_cold_wref;
+ enum intel_display_power_domain domain;
+
+ mutex_lock(&dig_port->tc_lock);
+
+ drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
+ drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
+ drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
+
+ tc_cold_wref = tc_cold_block(dig_port, &domain);
+
+ dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+ /*
+ * Save the initial mode for the state check in
+ * intel_tc_port_sanitize_mode().
+ */
+ dig_port->tc_init_mode = dig_port->tc_mode;
+ dig_port->tc_lock_wakeref = tc_cold_block(dig_port, &dig_port->tc_lock_power_domain);
+
+ /*
+ * The PHY needs to be connected for AUX to work during HW readout and
+ * MST topology resume, but the PHY mode can only be changed if the
+ * port is disabled.
+ */
+ if (!tc_port_is_enabled(dig_port))
+ intel_tc_port_update_mode(dig_port, 1, false);
+
+ /* Prevent changing dig_port->tc_mode until intel_tc_port_sanitize_mode() is called. */
+ __intel_tc_port_get_link(dig_port);
+
+ tc_cold_unblock(dig_port, domain, tc_cold_wref);
+
+ drm_dbg_kms(&i915->drm, "Port %s: init mode (%s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(dig_port->tc_mode));
+
+ mutex_unlock(&dig_port->tc_lock);
+}
+
+/**
+ * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
+ * @dig_port: digital port
+ *
+ * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
+ * loading and system resume:
+ * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
+ * the encoder is disabled.
+ * If the encoder is disabled make sure the PHY is disconnected.
+ */
+void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_encoder *encoder = &dig_port->base;
+ int active_links = 0;
+
+ mutex_lock(&dig_port->tc_lock);
+
+ if (dig_port->dp.is_mst)
+ active_links = intel_dp_mst_encoder_active_links(dig_port);
+ else if (encoder->base.crtc)
+ active_links = to_intel_crtc(encoder->base.crtc)->active;
+
+ drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount != 1);
+ if (active_links) {
+ if (!icl_tc_phy_is_connected(dig_port))
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY disconnected with %d active link(s)\n",
+ dig_port->tc_port_name, active_links);
+ } else {
+ /*
+ * TBT-alt is the default mode in any case the PHY ownership is not
+ * held (regardless of the sink's connected live state), so
+ * we'll just switch to disconnected mode from it here without
+ * a note.
+ */
+ if (dig_port->tc_init_mode != TC_PORT_TBT_ALT)
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(dig_port->tc_init_mode));
+ icl_tc_phy_disconnect(dig_port);
+ __intel_tc_port_put_link(dig_port);
+
+ tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
+ fetch_and_zero(&dig_port->tc_lock_wakeref));
+ }
+
+ drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(dig_port->tc_mode));
+
+ mutex_unlock(&dig_port->tc_lock);
+}
+
+/*
+ * The type-C ports are different because even when they are connected, they may
+ * not be available/usable by the graphics driver: see the comment on
+ * icl_tc_phy_connect(). So in our driver instead of adding the additional
+ * concept of "usable" and make everything check for "connected and usable" we
+ * define a port as "connected" when it is not only connected, but also when it
+ * is usable by the rest of the driver. That maintains the old assumption that
+ * connected ports are usable, and avoids exposing to the users objects they
+ * can't really use.
+ */
+bool intel_tc_port_connected(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ bool is_connected;
+
+ intel_tc_port_lock(dig_port);
+
+ is_connected = tc_port_live_status_mask(dig_port) &
+ BIT(dig_port->tc_mode);
+
+ intel_tc_port_unlock(dig_port);
+
+ return is_connected;
+}
+
+static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ mutex_lock(&dig_port->tc_lock);
+
+ cancel_delayed_work(&dig_port->tc_disconnect_phy_work);
+
+ if (!dig_port->tc_link_refcount)
+ intel_tc_port_update_mode(dig_port, required_lanes,
+ false);
+
+ drm_WARN_ON(&i915->drm, dig_port->tc_mode == TC_PORT_DISCONNECTED);
+ drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_TBT_ALT &&
+ !tc_phy_is_owned(dig_port));
+}
+
+void intel_tc_port_lock(struct intel_digital_port *dig_port)
+{
+ __intel_tc_port_lock(dig_port, 1);
+}
+
+/**
+ * intel_tc_port_disconnect_phy_work: disconnect TypeC PHY from display port
+ * @dig_port: digital port
+ *
+ * Disconnect the given digital port from its TypeC PHY (handing back the
+ * control of the PHY to the TypeC subsystem). This will happen in a delayed
+ * manner after each aux transactions and modeset disables.
+ */
+static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
+{
+ struct intel_digital_port *dig_port =
+ container_of(work, struct intel_digital_port, tc_disconnect_phy_work.work);
+
+ mutex_lock(&dig_port->tc_lock);
+
+ if (!dig_port->tc_link_refcount)
+ intel_tc_port_update_mode(dig_port, 1, true);
+
+ mutex_unlock(&dig_port->tc_lock);
+}
+
+/**
+ * intel_tc_port_flush_work: flush the work disconnecting the PHY
+ * @dig_port: digital port
+ *
+ * Flush the delayed work disconnecting an idle PHY.
+ */
+void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
+{
+ flush_delayed_work(&dig_port->tc_disconnect_phy_work);
+}
+
+void intel_tc_port_unlock(struct intel_digital_port *dig_port)
+{
+ if (!dig_port->tc_link_refcount && dig_port->tc_mode != TC_PORT_DISCONNECTED)
+ queue_delayed_work(system_unbound_wq, &dig_port->tc_disconnect_phy_work,
+ msecs_to_jiffies(1000));
+
+ mutex_unlock(&dig_port->tc_lock);
+}
+
+bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
+{
+ return mutex_is_locked(&dig_port->tc_lock) ||
+ dig_port->tc_link_refcount;
+}
+
+void intel_tc_port_get_link(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ __intel_tc_port_lock(dig_port, required_lanes);
+ __intel_tc_port_get_link(dig_port);
+ intel_tc_port_unlock(dig_port);
+}
+
+void intel_tc_port_put_link(struct intel_digital_port *dig_port)
+{
+ intel_tc_port_lock(dig_port);
+ __intel_tc_port_put_link(dig_port);
+ intel_tc_port_unlock(dig_port);
+
+ /*
+ * Disconnecting the PHY after the PHY's PLL gets disabled may
+ * hang the system on ADL-P, so disconnect the PHY here synchronously.
+ * TODO: remove this once the root cause of the ordering requirement
+ * is found/fixed.
+ */
+ intel_tc_port_flush_work(dig_port);
+}
+
+static bool
+tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
+{
+ enum intel_display_power_domain domain;
+ intel_wakeref_t wakeref;
+ u32 val;
+
+ if (!INTEL_INFO(i915)->display.has_modular_fia)
+ return false;
+
+ mutex_lock(&dig_port->tc_lock);
+ wakeref = tc_cold_block(dig_port, &domain);
+ val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
+ tc_cold_unblock(dig_port, domain, wakeref);
+ mutex_unlock(&dig_port->tc_lock);
+
+ drm_WARN_ON(&i915->drm, val == 0xffffffff);
+
+ return val & MODULAR_FIA_MASK;
+}
+
+static void
+tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
+{
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(i915, port);
+
+ /*
+ * Each Modular FIA instance houses 2 TC ports. In SOC that has more
+ * than two TC ports, there are multiple instances of Modular FIA.
+ */
+ if (tc_has_modular_fia(i915, dig_port)) {
+ dig_port->tc_phy_fia = tc_port / 2;
+ dig_port->tc_phy_fia_idx = tc_port % 2;
+ } else {
+ dig_port->tc_phy_fia = FIA1;
+ dig_port->tc_phy_fia_idx = tc_port;
+ }
+}
+
+void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(i915, port);
+
+ if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
+ return;
+
+ snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
+ "%c/TC#%d", port_name(port), tc_port + 1);
+
+ mutex_init(&dig_port->tc_lock);
+ INIT_DELAYED_WORK(&dig_port->tc_disconnect_phy_work, intel_tc_port_disconnect_phy_work);
+ dig_port->tc_legacy_port = is_legacy;
+ dig_port->tc_mode = TC_PORT_DISCONNECTED;
+ dig_port->tc_link_refcount = 0;
+ tc_port_load_fia_params(i915, dig_port);
+
+ intel_tc_port_init_mode(dig_port);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
new file mode 100644
index 000000000..d54082e2d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_TC_H__
+#define __INTEL_TC_H__
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+struct intel_digital_port;
+struct intel_encoder;
+
+bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port);
+bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port);
+bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port);
+
+bool intel_tc_port_connected(struct intel_encoder *encoder);
+
+u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
+u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
+int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
+void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
+ int required_lanes);
+
+void intel_tc_port_init_mode(struct intel_digital_port *dig_port);
+void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port);
+void intel_tc_port_lock(struct intel_digital_port *dig_port);
+void intel_tc_port_unlock(struct intel_digital_port *dig_port);
+void intel_tc_port_flush_work(struct intel_digital_port *dig_port);
+void intel_tc_port_get_link(struct intel_digital_port *dig_port,
+ int required_lanes);
+void intel_tc_port_put_link(struct intel_digital_port *dig_port);
+bool intel_tc_port_ref_held(struct intel_digital_port *dig_port);
+
+void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy);
+
+bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port);
+
+#endif /* __INTEL_TC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc_phy_regs.h b/drivers/gpu/drm/i915/display/intel_tc_phy_regs.h
new file mode 100644
index 000000000..5a545086f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_tc_phy_regs.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_TC_PHY_REGS__
+#define __INTEL_TC_PHY_REGS__
+
+#include "i915_reg_defs.h"
+
+#define MG_PHY_PORT_LN(ln, tc_port, ln0p1, ln0p2, ln1p1) \
+ _MMIO(_PORT(tc_port, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
+
+#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C
+#define MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C
+#define MG_TX_LINK_PARAMS_TX1LN0_PORT2 0x16912C
+#define MG_TX_LINK_PARAMS_TX1LN1_PORT2 0x16952C
+#define MG_TX1_LINK_PARAMS(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
+ MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
+ MG_TX_LINK_PARAMS_TX1LN1_PORT1)
+
+#define MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC
+#define MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC
+#define MG_TX_LINK_PARAMS_TX2LN0_PORT2 0x1690AC
+#define MG_TX_LINK_PARAMS_TX2LN1_PORT2 0x1694AC
+#define MG_TX2_LINK_PARAMS(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
+ MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
+ MG_TX_LINK_PARAMS_TX2LN1_PORT1)
+#define CRI_USE_FS32 (1 << 5)
+
+#define MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C
+#define MG_TX_PISO_READLOAD_TX1LN1_PORT1 0x16854C
+#define MG_TX_PISO_READLOAD_TX1LN0_PORT2 0x16914C
+#define MG_TX_PISO_READLOAD_TX1LN1_PORT2 0x16954C
+#define MG_TX1_PISO_READLOAD(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
+ MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
+ MG_TX_PISO_READLOAD_TX1LN1_PORT1)
+
+#define MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC
+#define MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC
+#define MG_TX_PISO_READLOAD_TX2LN0_PORT2 0x1690CC
+#define MG_TX_PISO_READLOAD_TX2LN1_PORT2 0x1694CC
+#define MG_TX2_PISO_READLOAD(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
+ MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
+ MG_TX_PISO_READLOAD_TX2LN1_PORT1)
+#define CRI_CALCINIT (1 << 1)
+
+#define MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148
+#define MG_TX_SWINGCTRL_TX1LN1_PORT1 0x168548
+#define MG_TX_SWINGCTRL_TX1LN0_PORT2 0x169148
+#define MG_TX_SWINGCTRL_TX1LN1_PORT2 0x169548
+#define MG_TX1_SWINGCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
+ MG_TX_SWINGCTRL_TX1LN0_PORT2, \
+ MG_TX_SWINGCTRL_TX1LN1_PORT1)
+
+#define MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8
+#define MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8
+#define MG_TX_SWINGCTRL_TX2LN0_PORT2 0x1690C8
+#define MG_TX_SWINGCTRL_TX2LN1_PORT2 0x1694C8
+#define MG_TX2_SWINGCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
+ MG_TX_SWINGCTRL_TX2LN0_PORT2, \
+ MG_TX_SWINGCTRL_TX2LN1_PORT1)
+#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0)
+#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0)
+
+#define MG_TX_DRVCTRL_TX1LN0_TXPORT1 0x168144
+#define MG_TX_DRVCTRL_TX1LN1_TXPORT1 0x168544
+#define MG_TX_DRVCTRL_TX1LN0_TXPORT2 0x169144
+#define MG_TX_DRVCTRL_TX1LN1_TXPORT2 0x169544
+#define MG_TX_DRVCTRL_TX1LN0_TXPORT3 0x16A144
+#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544
+#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144
+#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544
+#define MG_TX1_DRVCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
+ MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
+ MG_TX_DRVCTRL_TX1LN1_TXPORT1)
+
+#define MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4
+#define MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4
+#define MG_TX_DRVCTRL_TX2LN0_PORT2 0x1690C4
+#define MG_TX_DRVCTRL_TX2LN1_PORT2 0x1694C4
+#define MG_TX2_DRVCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX2LN0_PORT1, \
+ MG_TX_DRVCTRL_TX2LN0_PORT2, \
+ MG_TX_DRVCTRL_TX2LN1_PORT1)
+#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24)
+#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24)
+#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22)
+#define CRI_TXDEEMPH_OVERRIDE_5_0(x) ((x) << 16)
+#define CRI_TXDEEMPH_OVERRIDE_5_0_MASK (0x3F << 16)
+#define CRI_LOADGEN_SEL(x) ((x) << 12)
+#define CRI_LOADGEN_SEL_MASK (0x3 << 12)
+
+#define MG_CLKHUB_LN0_PORT1 0x16839C
+#define MG_CLKHUB_LN1_PORT1 0x16879C
+#define MG_CLKHUB_LN0_PORT2 0x16939C
+#define MG_CLKHUB_LN1_PORT2 0x16979C
+#define MG_CLKHUB(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_CLKHUB_LN0_PORT1, \
+ MG_CLKHUB_LN0_PORT2, \
+ MG_CLKHUB_LN1_PORT1)
+#define CFG_LOW_RATE_LKREN_EN (1 << 11)
+
+#define MG_TX_DCC_TX1LN0_PORT1 0x168110
+#define MG_TX_DCC_TX1LN1_PORT1 0x168510
+#define MG_TX_DCC_TX1LN0_PORT2 0x169110
+#define MG_TX_DCC_TX1LN1_PORT2 0x169510
+#define MG_TX1_DCC(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX1LN0_PORT1, \
+ MG_TX_DCC_TX1LN0_PORT2, \
+ MG_TX_DCC_TX1LN1_PORT1)
+#define MG_TX_DCC_TX2LN0_PORT1 0x168090
+#define MG_TX_DCC_TX2LN1_PORT1 0x168490
+#define MG_TX_DCC_TX2LN0_PORT2 0x169090
+#define MG_TX_DCC_TX2LN1_PORT2 0x169490
+#define MG_TX2_DCC(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX2LN0_PORT1, \
+ MG_TX_DCC_TX2LN0_PORT2, \
+ MG_TX_DCC_TX2LN1_PORT1)
+#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25)
+#define CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK (0x3 << 25)
+#define CFG_AMI_CK_DIV_OVERRIDE_EN (1 << 24)
+
+#define MG_DP_MODE_LN0_ACU_PORT1 0x1683A0
+#define MG_DP_MODE_LN1_ACU_PORT1 0x1687A0
+#define MG_DP_MODE_LN0_ACU_PORT2 0x1693A0
+#define MG_DP_MODE_LN1_ACU_PORT2 0x1697A0
+#define MG_DP_MODE(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_DP_MODE_LN0_ACU_PORT1, \
+ MG_DP_MODE_LN0_ACU_PORT2, \
+ MG_DP_MODE_LN1_ACU_PORT1)
+#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7)
+#define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6)
+
+#define FIA1_BASE 0x163000
+#define FIA2_BASE 0x16E000
+#define FIA3_BASE 0x16F000
+#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE)
+#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off))
+
+/* ICL PHY DFLEX registers */
+#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0)
+#define DFLEXDPMLE1_DPMLETC_MASK(idx) (0xf << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML0(idx) (1 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML1_0(idx) (3 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML3(idx) (8 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML3_2(idx) (12 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML3_0(idx) (15 << (4 * (idx)))
+
+#define _MG_REFCLKIN_CTL_PORT1 0x16892C
+#define _MG_REFCLKIN_CTL_PORT2 0x16992C
+#define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8)
+#define MG_REFCLKIN_CTL_OD_2_MUX_MASK (0x7 << 8)
+#define MG_REFCLKIN_CTL(tc_port) _MMIO_PORT((tc_port), \
+ _MG_REFCLKIN_CTL_PORT1, \
+ _MG_REFCLKIN_CTL_PORT2)
+
+#define _MG_CLKTOP2_CORECLKCTL1_PORT1 0x1688D8
+#define _MG_CLKTOP2_CORECLKCTL1_PORT2 0x1698D8
+#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x) ((x) << 16)
+#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK (0xff << 16)
+#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8)
+#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK (0xff << 8)
+#define MG_CLKTOP2_CORECLKCTL1(tc_port) _MMIO_PORT((tc_port), \
+ _MG_CLKTOP2_CORECLKCTL1_PORT1, \
+ _MG_CLKTOP2_CORECLKCTL1_PORT2)
+
+#define _MG_CLKTOP2_HSCLKCTL_PORT1 0x1688D4
+#define _MG_CLKTOP2_HSCLKCTL_PORT2 0x1698D4
+#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x) ((x) << 16)
+#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK (0x1 << 16)
+#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14)
+#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK (0x3 << 14)
+#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK (0x3 << 12)
+#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2 (0 << 12)
+#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3 (1 << 12)
+#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5 (2 << 12)
+#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7 (3 << 12)
+#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8)
+#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT 8
+#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8)
+#define MG_CLKTOP2_HSCLKCTL(tc_port) _MMIO_PORT((tc_port), \
+ _MG_CLKTOP2_HSCLKCTL_PORT1, \
+ _MG_CLKTOP2_HSCLKCTL_PORT2)
+
+#define _MG_PLL_DIV0_PORT1 0x168A00
+#define _MG_PLL_DIV0_PORT2 0x169A00
+#define MG_PLL_DIV0_FRACNEN_H (1 << 30)
+#define MG_PLL_DIV0_FBDIV_FRAC_MASK (0x3fffff << 8)
+#define MG_PLL_DIV0_FBDIV_FRAC_SHIFT 8
+#define MG_PLL_DIV0_FBDIV_FRAC(x) ((x) << 8)
+#define MG_PLL_DIV0_FBDIV_INT_MASK (0xff << 0)
+#define MG_PLL_DIV0_FBDIV_INT(x) ((x) << 0)
+#define MG_PLL_DIV0(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV0_PORT1, \
+ _MG_PLL_DIV0_PORT2)
+
+#define _MG_PLL_DIV1_PORT1 0x168A04
+#define _MG_PLL_DIV1_PORT2 0x169A04
+#define MG_PLL_DIV1_IREF_NDIVRATIO(x) ((x) << 16)
+#define MG_PLL_DIV1_DITHER_DIV_1 (0 << 12)
+#define MG_PLL_DIV1_DITHER_DIV_2 (1 << 12)
+#define MG_PLL_DIV1_DITHER_DIV_4 (2 << 12)
+#define MG_PLL_DIV1_DITHER_DIV_8 (3 << 12)
+#define MG_PLL_DIV1_NDIVRATIO(x) ((x) << 4)
+#define MG_PLL_DIV1_FBPREDIV_MASK (0xf << 0)
+#define MG_PLL_DIV1_FBPREDIV(x) ((x) << 0)
+#define MG_PLL_DIV1(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV1_PORT1, \
+ _MG_PLL_DIV1_PORT2)
+
+#define _MG_PLL_LF_PORT1 0x168A08
+#define _MG_PLL_LF_PORT2 0x169A08
+#define MG_PLL_LF_TDCTARGETCNT(x) ((x) << 24)
+#define MG_PLL_LF_AFCCNTSEL_256 (0 << 20)
+#define MG_PLL_LF_AFCCNTSEL_512 (1 << 20)
+#define MG_PLL_LF_GAINCTRL(x) ((x) << 16)
+#define MG_PLL_LF_INT_COEFF(x) ((x) << 8)
+#define MG_PLL_LF_PROP_COEFF(x) ((x) << 0)
+#define MG_PLL_LF(tc_port) _MMIO_PORT((tc_port), _MG_PLL_LF_PORT1, \
+ _MG_PLL_LF_PORT2)
+
+#define _MG_PLL_FRAC_LOCK_PORT1 0x168A0C
+#define _MG_PLL_FRAC_LOCK_PORT2 0x169A0C
+#define MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 (1 << 18)
+#define MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 (1 << 16)
+#define MG_PLL_FRAC_LOCK_LOCKTHRESH(x) ((x) << 11)
+#define MG_PLL_FRAC_LOCK_DCODITHEREN (1 << 10)
+#define MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN (1 << 8)
+#define MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(x) ((x) << 0)
+#define MG_PLL_FRAC_LOCK(tc_port) _MMIO_PORT((tc_port), \
+ _MG_PLL_FRAC_LOCK_PORT1, \
+ _MG_PLL_FRAC_LOCK_PORT2)
+
+#define _MG_PLL_SSC_PORT1 0x168A10
+#define _MG_PLL_SSC_PORT2 0x169A10
+#define MG_PLL_SSC_EN (1 << 28)
+#define MG_PLL_SSC_TYPE(x) ((x) << 26)
+#define MG_PLL_SSC_STEPLENGTH(x) ((x) << 16)
+#define MG_PLL_SSC_STEPNUM(x) ((x) << 10)
+#define MG_PLL_SSC_FLLEN (1 << 9)
+#define MG_PLL_SSC_STEPSIZE(x) ((x) << 0)
+#define MG_PLL_SSC(tc_port) _MMIO_PORT((tc_port), _MG_PLL_SSC_PORT1, \
+ _MG_PLL_SSC_PORT2)
+
+#define _MG_PLL_BIAS_PORT1 0x168A14
+#define _MG_PLL_BIAS_PORT2 0x169A14
+#define MG_PLL_BIAS_BIAS_GB_SEL(x) ((x) << 30)
+#define MG_PLL_BIAS_BIAS_GB_SEL_MASK (0x3 << 30)
+#define MG_PLL_BIAS_INIT_DCOAMP(x) ((x) << 24)
+#define MG_PLL_BIAS_INIT_DCOAMP_MASK (0x3f << 24)
+#define MG_PLL_BIAS_BIAS_BONUS(x) ((x) << 16)
+#define MG_PLL_BIAS_BIAS_BONUS_MASK (0xff << 16)
+#define MG_PLL_BIAS_BIASCAL_EN (1 << 15)
+#define MG_PLL_BIAS_CTRIM(x) ((x) << 8)
+#define MG_PLL_BIAS_CTRIM_MASK (0x1f << 8)
+#define MG_PLL_BIAS_VREF_RDAC(x) ((x) << 5)
+#define MG_PLL_BIAS_VREF_RDAC_MASK (0x7 << 5)
+#define MG_PLL_BIAS_IREFTRIM(x) ((x) << 0)
+#define MG_PLL_BIAS_IREFTRIM_MASK (0x1f << 0)
+#define MG_PLL_BIAS(tc_port) _MMIO_PORT((tc_port), _MG_PLL_BIAS_PORT1, \
+ _MG_PLL_BIAS_PORT2)
+
+#define _MG_PLL_TDC_COLDST_BIAS_PORT1 0x168A18
+#define _MG_PLL_TDC_COLDST_BIAS_PORT2 0x169A18
+#define MG_PLL_TDC_COLDST_IREFINT_EN (1 << 27)
+#define MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(x) ((x) << 17)
+#define MG_PLL_TDC_COLDST_COLDSTART (1 << 16)
+#define MG_PLL_TDC_TDCOVCCORR_EN (1 << 2)
+#define MG_PLL_TDC_TDCSEL(x) ((x) << 0)
+#define MG_PLL_TDC_COLDST_BIAS(tc_port) _MMIO_PORT((tc_port), \
+ _MG_PLL_TDC_COLDST_BIAS_PORT1, \
+ _MG_PLL_TDC_COLDST_BIAS_PORT2)
+
+#endif /* __INTEL_TC_PHY_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
new file mode 100644
index 000000000..fb25be800
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -0,0 +1,2020 @@
+/*
+ * Copyright © 2006-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/** @file
+ * Integrated TV-out support for the 915GM and 945GM.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dpll.h"
+#include "intel_hotplug.h"
+#include "intel_tv.h"
+
+enum tv_margin {
+ TV_MARGIN_LEFT, TV_MARGIN_TOP,
+ TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM
+};
+
+struct intel_tv {
+ struct intel_encoder base;
+
+ int type;
+};
+
+struct video_levels {
+ u16 blank, black;
+ u8 burst;
+};
+
+struct color_conversion {
+ u16 ry, gy, by, ay;
+ u16 ru, gu, bu, au;
+ u16 rv, gv, bv, av;
+};
+
+static const u32 filter_table[] = {
+ 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+ 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+ 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+ 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+ 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+ 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+ 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+ 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+ 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+ 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+ 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+ 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+ 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+ 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+ 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+ 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+ 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+ 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+ 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+ 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+ 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+ 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+ 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+ 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+ 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+ 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+ 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+ 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+ 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+ 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+ 0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0,
+ 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+ 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+ 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+ 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+ 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+ 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+ 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+ 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+ 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+ 0x28003100, 0x28002F00, 0x00003100, 0x36403000,
+ 0x2D002CC0, 0x30003640, 0x2D0036C0,
+ 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+ 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+ 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+ 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+ 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+ 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+ 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+ 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+ 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+ 0x28003100, 0x28002F00, 0x00003100,
+};
+
+/*
+ * Color conversion values have 3 separate fixed point formats:
+ *
+ * 10 bit fields (ay, au)
+ * 1.9 fixed point (b.bbbbbbbbb)
+ * 11 bit fields (ry, by, ru, gu, gv)
+ * exp.mantissa (ee.mmmmmmmmm)
+ * ee = 00 = 10^-1 (0.mmmmmmmmm)
+ * ee = 01 = 10^-2 (0.0mmmmmmmmm)
+ * ee = 10 = 10^-3 (0.00mmmmmmmmm)
+ * ee = 11 = 10^-4 (0.000mmmmmmmmm)
+ * 12 bit fields (gy, rv, bu)
+ * exp.mantissa (eee.mmmmmmmmm)
+ * eee = 000 = 10^-1 (0.mmmmmmmmm)
+ * eee = 001 = 10^-2 (0.0mmmmmmmmm)
+ * eee = 010 = 10^-3 (0.00mmmmmmmmm)
+ * eee = 011 = 10^-4 (0.000mmmmmmmmm)
+ * eee = 100 = reserved
+ * eee = 101 = reserved
+ * eee = 110 = reserved
+ * eee = 111 = 10^0 (m.mmmmmmmm) (only usable for 1.0 representation)
+ *
+ * Saturation and contrast are 8 bits, with their own representation:
+ * 8 bit field (saturation, contrast)
+ * exp.mantissa (ee.mmmmmm)
+ * ee = 00 = 10^-1 (0.mmmmmm)
+ * ee = 01 = 10^0 (m.mmmmm)
+ * ee = 10 = 10^1 (mm.mmmm)
+ * ee = 11 = 10^2 (mmm.mmm)
+ *
+ * Simple conversion function:
+ *
+ * static u32
+ * float_to_csc_11(float f)
+ * {
+ * u32 exp;
+ * u32 mant;
+ * u32 ret;
+ *
+ * if (f < 0)
+ * f = -f;
+ *
+ * if (f >= 1) {
+ * exp = 0x7;
+ * mant = 1 << 8;
+ * } else {
+ * for (exp = 0; exp < 3 && f < 0.5; exp++)
+ * f *= 2.0;
+ * mant = (f * (1 << 9) + 0.5);
+ * if (mant >= (1 << 9))
+ * mant = (1 << 9) - 1;
+ * }
+ * ret = (exp << 9) | mant;
+ * return ret;
+ * }
+ */
+
+/*
+ * Behold, magic numbers! If we plant them they might grow a big
+ * s-video cable to the sky... or something.
+ *
+ * Pre-converted to appropriate hex value.
+ */
+
+/*
+ * PAL & NTSC values for composite & s-video connections
+ */
+static const struct color_conversion ntsc_m_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_m_levels_composite = {
+ .blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion ntsc_m_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_m_levels_svideo = {
+ .blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion ntsc_j_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
+ .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
+ .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_j_levels_composite = {
+ .blank = 225, .black = 225, .burst = 113,
+};
+
+static const struct color_conversion ntsc_j_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
+ .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
+ .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_j_levels_svideo = {
+ .blank = 266, .black = 266, .burst = 133,
+};
+
+static const struct color_conversion pal_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
+ .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
+ .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
+};
+
+static const struct video_levels pal_levels_composite = {
+ .blank = 237, .black = 237, .burst = 118,
+};
+
+static const struct color_conversion pal_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+ .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
+ .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
+};
+
+static const struct video_levels pal_levels_svideo = {
+ .blank = 280, .black = 280, .burst = 139,
+};
+
+static const struct color_conversion pal_m_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels pal_m_levels_composite = {
+ .blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion pal_m_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels pal_m_levels_svideo = {
+ .blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion pal_n_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels pal_n_levels_composite = {
+ .blank = 225, .black = 267, .burst = 118,
+};
+
+static const struct color_conversion pal_n_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels pal_n_levels_svideo = {
+ .blank = 266, .black = 316, .burst = 139,
+};
+
+/*
+ * Component connections
+ */
+static const struct color_conversion sdtv_csc_yprpb = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+ .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
+ .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
+};
+
+static const struct color_conversion hdtv_csc_yprpb = {
+ .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
+ .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
+ .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
+};
+
+static const struct video_levels component_levels = {
+ .blank = 279, .black = 279, .burst = 0,
+};
+
+
+struct tv_mode {
+ const char *name;
+
+ u32 clock;
+ u16 refresh; /* in millihertz (for precision) */
+ u8 oversample;
+ u8 hsync_end;
+ u16 hblank_start, hblank_end, htotal;
+ bool progressive : 1, trilevel_sync : 1, component_only : 1;
+ u8 vsync_start_f1, vsync_start_f2, vsync_len;
+ bool veq_ena : 1;
+ u8 veq_start_f1, veq_start_f2, veq_len;
+ u8 vi_end_f1, vi_end_f2;
+ u16 nbr_end;
+ bool burst_ena : 1;
+ u8 hburst_start, hburst_len;
+ u8 vburst_start_f1;
+ u16 vburst_end_f1;
+ u8 vburst_start_f2;
+ u16 vburst_end_f2;
+ u8 vburst_start_f3;
+ u16 vburst_end_f3;
+ u8 vburst_start_f4;
+ u16 vburst_end_f4;
+ /*
+ * subcarrier programming
+ */
+ u16 dda2_size, dda3_size;
+ u8 dda1_inc;
+ u16 dda2_inc, dda3_inc;
+ u32 sc_reset;
+ bool pal_burst : 1;
+ /*
+ * blank/black levels
+ */
+ const struct video_levels *composite_levels, *svideo_levels;
+ const struct color_conversion *composite_color, *svideo_color;
+ const u32 *filter_table;
+};
+
+
+/*
+ * Sub carrier DDA
+ *
+ * I think this works as follows:
+ *
+ * subcarrier freq = pixel_clock * (dda1_inc + dda2_inc / dda2_size) / 4096
+ *
+ * Presumably, when dda3 is added in, it gets to adjust the dda2_inc value
+ *
+ * So,
+ * dda1_ideal = subcarrier/pixel * 4096
+ * dda1_inc = floor (dda1_ideal)
+ * dda2 = dda1_ideal - dda1_inc
+ *
+ * then pick a ratio for dda2 that gives the closest approximation. If
+ * you can't get close enough, you can play with dda3 as well. This
+ * seems likely to happen when dda2 is small as the jumps would be larger
+ *
+ * To invert this,
+ *
+ * pixel_clock = subcarrier * 4096 / (dda1_inc + dda2_inc / dda2_size)
+ *
+ * The constants below were all computed using a 107.520MHz clock
+ */
+
+/*
+ * Register programming values for TV modes.
+ *
+ * These values account for -1s required.
+ */
+static const struct tv_mode tv_modes[] = {
+ {
+ .name = "NTSC-M",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = 8,
+ .component_only = false,
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 20800, .dda2_size = 27456,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_4,
+ .pal_burst = false,
+
+ .composite_levels = &ntsc_m_levels_composite,
+ .composite_color = &ntsc_m_csc_composite,
+ .svideo_levels = &ntsc_m_levels_svideo,
+ .svideo_color = &ntsc_m_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "NTSC-443",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = 8,
+ .component_only = false,
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+ .dda1_inc = 168,
+ .dda2_inc = 4093, .dda2_size = 27456,
+ .dda3_inc = 310, .dda3_size = 525,
+ .sc_reset = TV_SC_RESET_NEVER,
+ .pal_burst = false,
+
+ .composite_levels = &ntsc_m_levels_composite,
+ .composite_color = &ntsc_m_csc_composite,
+ .svideo_levels = &ntsc_m_levels_svideo,
+ .svideo_color = &ntsc_m_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "NTSC-J",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = 8,
+ .component_only = false,
+
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 20800, .dda2_size = 27456,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_4,
+ .pal_burst = false,
+
+ .composite_levels = &ntsc_j_levels_composite,
+ .composite_color = &ntsc_j_csc_composite,
+ .svideo_levels = &ntsc_j_levels_svideo,
+ .svideo_color = &ntsc_j_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "PAL-M",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = 8,
+ .component_only = false,
+
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 16704, .dda2_size = 27456,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
+
+ .composite_levels = &pal_m_levels_composite,
+ .composite_color = &pal_m_csc_composite,
+ .svideo_levels = &pal_m_levels_svideo,
+ .svideo_color = &pal_m_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+ .name = "PAL-N",
+ .clock = 108000,
+ .refresh = 50000,
+ .oversample = 8,
+ .component_only = false,
+
+ .hsync_end = 64, .hblank_end = 128,
+ .hblank_start = 844, .htotal = 863,
+
+ .progressive = false, .trilevel_sync = false,
+
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 24, .vi_end_f2 = 25,
+ .nbr_end = 286,
+
+ .burst_ena = true,
+ .hburst_start = 73, .hburst_len = 34,
+ .vburst_start_f1 = 8, .vburst_end_f1 = 285,
+ .vburst_start_f2 = 8, .vburst_end_f2 = 286,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 286,
+ .vburst_start_f4 = 9, .vburst_end_f4 = 285,
+
+
+ /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+ .dda1_inc = 135,
+ .dda2_inc = 23578, .dda2_size = 27648,
+ .dda3_inc = 134, .dda3_size = 625,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
+
+ .composite_levels = &pal_n_levels_composite,
+ .composite_color = &pal_n_csc_composite,
+ .svideo_levels = &pal_n_levels_svideo,
+ .svideo_color = &pal_n_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+ .name = "PAL",
+ .clock = 108000,
+ .refresh = 50000,
+ .oversample = 8,
+ .component_only = false,
+
+ .hsync_end = 64, .hblank_end = 142,
+ .hblank_start = 844, .htotal = 863,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 5, .vsync_start_f2 = 6,
+ .vsync_len = 5,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 15,
+
+ .vi_end_f1 = 24, .vi_end_f2 = 25,
+ .nbr_end = 286,
+
+ .burst_ena = true,
+ .hburst_start = 73, .hburst_len = 32,
+ .vburst_start_f1 = 8, .vburst_end_f1 = 285,
+ .vburst_start_f2 = 8, .vburst_end_f2 = 286,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 286,
+ .vburst_start_f4 = 9, .vburst_end_f4 = 285,
+
+ /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+ .dda1_inc = 168,
+ .dda2_inc = 4122, .dda2_size = 27648,
+ .dda3_inc = 67, .dda3_size = 625,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
+
+ .composite_levels = &pal_levels_composite,
+ .composite_color = &pal_csc_composite,
+ .svideo_levels = &pal_levels_svideo,
+ .svideo_color = &pal_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "480p",
+ .clock = 108000,
+ .refresh = 59940,
+ .oversample = 4,
+ .component_only = true,
+
+ .hsync_end = 64, .hblank_end = 122,
+ .hblank_start = 842, .htotal = 857,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 12, .vsync_start_f2 = 12,
+ .vsync_len = 12,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 479,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "576p",
+ .clock = 108000,
+ .refresh = 50000,
+ .oversample = 4,
+ .component_only = true,
+
+ .hsync_end = 64, .hblank_end = 139,
+ .hblank_start = 859, .htotal = 863,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 48, .vi_end_f2 = 48,
+ .nbr_end = 575,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "720p@60Hz",
+ .clock = 148500,
+ .refresh = 60000,
+ .oversample = 2,
+ .component_only = true,
+
+ .hsync_end = 80, .hblank_end = 300,
+ .hblank_start = 1580, .htotal = 1649,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 29, .vi_end_f2 = 29,
+ .nbr_end = 719,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "720p@50Hz",
+ .clock = 148500,
+ .refresh = 50000,
+ .oversample = 2,
+ .component_only = true,
+
+ .hsync_end = 80, .hblank_end = 300,
+ .hblank_start = 1580, .htotal = 1979,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 29, .vi_end_f2 = 29,
+ .nbr_end = 719,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "1080i@50Hz",
+ .clock = 148500,
+ .refresh = 50000,
+ .oversample = 2,
+ .component_only = true,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2639,
+
+ .progressive = false, .trilevel_sync = true,
+
+ .vsync_start_f1 = 4, .vsync_start_f2 = 5,
+ .vsync_len = 10,
+
+ .veq_ena = true, .veq_start_f1 = 4,
+ .veq_start_f2 = 4, .veq_len = 10,
+
+
+ .vi_end_f1 = 21, .vi_end_f2 = 22,
+ .nbr_end = 539,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "1080i@60Hz",
+ .clock = 148500,
+ .refresh = 60000,
+ .oversample = 2,
+ .component_only = true,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2199,
+
+ .progressive = false, .trilevel_sync = true,
+
+ .vsync_start_f1 = 4, .vsync_start_f2 = 5,
+ .vsync_len = 10,
+
+ .veq_ena = true, .veq_start_f1 = 4,
+ .veq_start_f2 = 4, .veq_len = 10,
+
+
+ .vi_end_f1 = 21, .vi_end_f2 = 22,
+ .nbr_end = 539,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+
+ {
+ .name = "1080p@30Hz",
+ .clock = 148500,
+ .refresh = 30000,
+ .oversample = 2,
+ .component_only = true,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2199,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 8, .vsync_start_f2 = 8,
+ .vsync_len = 10,
+
+ .veq_ena = false, .veq_start_f1 = 0,
+ .veq_start_f2 = 0, .veq_len = 0,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 1079,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+
+ {
+ .name = "1080p@50Hz",
+ .clock = 148500,
+ .refresh = 50000,
+ .oversample = 1,
+ .component_only = true,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2639,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 8, .vsync_start_f2 = 8,
+ .vsync_len = 10,
+
+ .veq_ena = false, .veq_start_f1 = 0,
+ .veq_start_f2 = 0, .veq_len = 0,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 1079,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+
+ {
+ .name = "1080p@60Hz",
+ .clock = 148500,
+ .refresh = 60000,
+ .oversample = 1,
+ .component_only = true,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2199,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 8, .vsync_start_f2 = 8,
+ .vsync_len = 10,
+
+ .veq_ena = false, .veq_start_f1 = 0,
+ .veq_start_f2 = 0, .veq_len = 0,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 1079,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+};
+
+struct intel_tv_connector_state {
+ struct drm_connector_state base;
+
+ /*
+ * May need to override the user margins for
+ * gen3 >1024 wide source vertical centering.
+ */
+ struct {
+ u16 top, bottom;
+ } margins;
+
+ bool bypass_vfilter;
+};
+
+#define to_intel_tv_connector_state(x) container_of(x, struct intel_tv_connector_state, base)
+
+static struct drm_connector_state *
+intel_tv_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct intel_tv_connector_state *state;
+
+ state = kmemdup(connector->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector, &state->base);
+ return &state->base;
+}
+
+static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
+{
+ return container_of(encoder, struct intel_tv, base);
+}
+
+static struct intel_tv *intel_attached_tv(struct intel_connector *connector)
+{
+ return enc_to_tv(intel_attached_encoder(connector));
+}
+
+static bool
+intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 tmp = intel_de_read(dev_priv, TV_CTL);
+
+ *pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT;
+
+ return tmp & TV_ENC_ENABLE;
+}
+
+static void
+intel_enable_tv(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ /* Prevents vblank waits from timing out in intel_tv_detect_type() */
+ intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc));
+
+ intel_de_write(dev_priv, TV_CTL,
+ intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE);
+}
+
+static void
+intel_disable_tv(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ intel_de_write(dev_priv, TV_CTL,
+ intel_de_read(dev_priv, TV_CTL) & ~TV_ENC_ENABLE);
+}
+
+static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
+{
+ int format = conn_state->tv.mode;
+
+ return &tv_modes[format];
+}
+
+static enum drm_mode_status
+intel_tv_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
+ int max_dotclk = i915->max_dotclk_freq;
+ enum drm_mode_status status;
+
+ status = intel_cpu_transcoder_mode_valid(i915, mode);
+ if (status != MODE_OK)
+ return status;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (mode->clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
+
+ /* Ensure TV refresh is close to desired refresh */
+ if (abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) >= 1000)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static int
+intel_tv_mode_vdisplay(const struct tv_mode *tv_mode)
+{
+ if (tv_mode->progressive)
+ return tv_mode->nbr_end + 1;
+ else
+ return 2 * (tv_mode->nbr_end + 1);
+}
+
+static void
+intel_tv_mode_to_mode(struct drm_display_mode *mode,
+ const struct tv_mode *tv_mode,
+ int clock)
+{
+ mode->clock = clock / (tv_mode->oversample >> !tv_mode->progressive);
+
+ /*
+ * tv_mode horizontal timings:
+ *
+ * hsync_end
+ * | hblank_end
+ * | | hblank_start
+ * | | | htotal
+ * | _______ |
+ * ____/ \___
+ * \__/ \
+ */
+ mode->hdisplay =
+ tv_mode->hblank_start - tv_mode->hblank_end;
+ mode->hsync_start = mode->hdisplay +
+ tv_mode->htotal - tv_mode->hblank_start;
+ mode->hsync_end = mode->hsync_start +
+ tv_mode->hsync_end;
+ mode->htotal = tv_mode->htotal + 1;
+
+ /*
+ * tv_mode vertical timings:
+ *
+ * vsync_start
+ * | vsync_end
+ * | | vi_end nbr_end
+ * | | | |
+ * | | _______
+ * \__ ____/ \
+ * \__/
+ */
+ mode->vdisplay = intel_tv_mode_vdisplay(tv_mode);
+ if (tv_mode->progressive) {
+ mode->vsync_start = mode->vdisplay +
+ tv_mode->vsync_start_f1 + 1;
+ mode->vsync_end = mode->vsync_start +
+ tv_mode->vsync_len;
+ mode->vtotal = mode->vdisplay +
+ tv_mode->vi_end_f1 + 1;
+ } else {
+ mode->vsync_start = mode->vdisplay +
+ tv_mode->vsync_start_f1 + 1 +
+ tv_mode->vsync_start_f2 + 1;
+ mode->vsync_end = mode->vsync_start +
+ 2 * tv_mode->vsync_len;
+ mode->vtotal = mode->vdisplay +
+ tv_mode->vi_end_f1 + 1 +
+ tv_mode->vi_end_f2 + 1;
+ }
+
+ /* TV has it's own notion of sync and other mode flags, so clear them. */
+ mode->flags = 0;
+
+ snprintf(mode->name, sizeof(mode->name),
+ "%dx%d%c (%s)",
+ mode->hdisplay, mode->vdisplay,
+ tv_mode->progressive ? 'p' : 'i',
+ tv_mode->name);
+}
+
+static void intel_tv_scale_mode_horiz(struct drm_display_mode *mode,
+ int hdisplay, int left_margin,
+ int right_margin)
+{
+ int hsync_start = mode->hsync_start - mode->hdisplay + right_margin;
+ int hsync_end = mode->hsync_end - mode->hdisplay + right_margin;
+ int new_htotal = mode->htotal * hdisplay /
+ (mode->hdisplay - left_margin - right_margin);
+
+ mode->clock = mode->clock * new_htotal / mode->htotal;
+
+ mode->hdisplay = hdisplay;
+ mode->hsync_start = hdisplay + hsync_start * new_htotal / mode->htotal;
+ mode->hsync_end = hdisplay + hsync_end * new_htotal / mode->htotal;
+ mode->htotal = new_htotal;
+}
+
+static void intel_tv_scale_mode_vert(struct drm_display_mode *mode,
+ int vdisplay, int top_margin,
+ int bottom_margin)
+{
+ int vsync_start = mode->vsync_start - mode->vdisplay + bottom_margin;
+ int vsync_end = mode->vsync_end - mode->vdisplay + bottom_margin;
+ int new_vtotal = mode->vtotal * vdisplay /
+ (mode->vdisplay - top_margin - bottom_margin);
+
+ mode->clock = mode->clock * new_vtotal / mode->vtotal;
+
+ mode->vdisplay = vdisplay;
+ mode->vsync_start = vdisplay + vsync_start * new_vtotal / mode->vtotal;
+ mode->vsync_end = vdisplay + vsync_end * new_vtotal / mode->vtotal;
+ mode->vtotal = new_vtotal;
+}
+
+static void
+intel_tv_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+ struct drm_display_mode mode = {};
+ u32 tv_ctl, hctl1, hctl3, vctl1, vctl2, tmp;
+ struct tv_mode tv_mode = {};
+ int hdisplay = adjusted_mode->crtc_hdisplay;
+ int vdisplay = adjusted_mode->crtc_vdisplay;
+ int xsize, ysize, xpos, ypos;
+
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT);
+
+ tv_ctl = intel_de_read(dev_priv, TV_CTL);
+ hctl1 = intel_de_read(dev_priv, TV_H_CTL_1);
+ hctl3 = intel_de_read(dev_priv, TV_H_CTL_3);
+ vctl1 = intel_de_read(dev_priv, TV_V_CTL_1);
+ vctl2 = intel_de_read(dev_priv, TV_V_CTL_2);
+
+ tv_mode.htotal = (hctl1 & TV_HTOTAL_MASK) >> TV_HTOTAL_SHIFT;
+ tv_mode.hsync_end = (hctl1 & TV_HSYNC_END_MASK) >> TV_HSYNC_END_SHIFT;
+
+ tv_mode.hblank_start = (hctl3 & TV_HBLANK_START_MASK) >> TV_HBLANK_START_SHIFT;
+ tv_mode.hblank_end = (hctl3 & TV_HSYNC_END_MASK) >> TV_HBLANK_END_SHIFT;
+
+ tv_mode.nbr_end = (vctl1 & TV_NBR_END_MASK) >> TV_NBR_END_SHIFT;
+ tv_mode.vi_end_f1 = (vctl1 & TV_VI_END_F1_MASK) >> TV_VI_END_F1_SHIFT;
+ tv_mode.vi_end_f2 = (vctl1 & TV_VI_END_F2_MASK) >> TV_VI_END_F2_SHIFT;
+
+ tv_mode.vsync_len = (vctl2 & TV_VSYNC_LEN_MASK) >> TV_VSYNC_LEN_SHIFT;
+ tv_mode.vsync_start_f1 = (vctl2 & TV_VSYNC_START_F1_MASK) >> TV_VSYNC_START_F1_SHIFT;
+ tv_mode.vsync_start_f2 = (vctl2 & TV_VSYNC_START_F2_MASK) >> TV_VSYNC_START_F2_SHIFT;
+
+ tv_mode.clock = pipe_config->port_clock;
+
+ tv_mode.progressive = tv_ctl & TV_PROGRESSIVE;
+
+ switch (tv_ctl & TV_OVERSAMPLE_MASK) {
+ case TV_OVERSAMPLE_8X:
+ tv_mode.oversample = 8;
+ break;
+ case TV_OVERSAMPLE_4X:
+ tv_mode.oversample = 4;
+ break;
+ case TV_OVERSAMPLE_2X:
+ tv_mode.oversample = 2;
+ break;
+ default:
+ tv_mode.oversample = 1;
+ break;
+ }
+
+ tmp = intel_de_read(dev_priv, TV_WIN_POS);
+ xpos = tmp >> 16;
+ ypos = tmp & 0xffff;
+
+ tmp = intel_de_read(dev_priv, TV_WIN_SIZE);
+ xsize = tmp >> 16;
+ ysize = tmp & 0xffff;
+
+ intel_tv_mode_to_mode(&mode, &tv_mode, pipe_config->port_clock);
+
+ drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&mode));
+
+ intel_tv_scale_mode_horiz(&mode, hdisplay,
+ xpos, mode.hdisplay - xsize - xpos);
+ intel_tv_scale_mode_vert(&mode, vdisplay,
+ ypos, mode.vdisplay - ysize - ypos);
+
+ adjusted_mode->crtc_clock = mode.clock;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ adjusted_mode->crtc_clock /= 2;
+
+ /* pixel counter doesn't work on i965gm TV output */
+ if (IS_I965GM(dev_priv))
+ pipe_config->mode_flags |=
+ I915_MODE_FLAG_USE_SCANLINE_COUNTER;
+}
+
+static bool intel_tv_source_too_wide(struct drm_i915_private *dev_priv,
+ int hdisplay)
+{
+ return DISPLAY_VER(dev_priv) == 3 && hdisplay > 1024;
+}
+
+static bool intel_tv_vert_scaling(const struct drm_display_mode *tv_mode,
+ const struct drm_connector_state *conn_state,
+ int vdisplay)
+{
+ return tv_mode->crtc_vdisplay -
+ conn_state->tv.margins.top -
+ conn_state->tv.margins.bottom !=
+ vdisplay;
+}
+
+static int
+intel_tv_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(pipe_config->uapi.state);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_tv_connector_state *tv_conn_state =
+ to_intel_tv_connector_state(conn_state);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+ int hdisplay = adjusted_mode->crtc_hdisplay;
+ int vdisplay = adjusted_mode->crtc_vdisplay;
+ int ret;
+
+ if (!tv_mode)
+ return -EINVAL;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return -EINVAL;
+
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
+ drm_dbg_kms(&dev_priv->drm, "forcing bpc to 8 for TV\n");
+ pipe_config->pipe_bpp = 8*3;
+
+ pipe_config->port_clock = tv_mode->clock;
+
+ ret = intel_dpll_crtc_compute_clock(state, crtc);
+ if (ret)
+ return ret;
+
+ pipe_config->clock_set = true;
+
+ intel_tv_mode_to_mode(adjusted_mode, tv_mode, pipe_config->port_clock);
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ if (intel_tv_source_too_wide(dev_priv, hdisplay) ||
+ !intel_tv_vert_scaling(adjusted_mode, conn_state, vdisplay)) {
+ int extra, top, bottom;
+
+ extra = adjusted_mode->crtc_vdisplay - vdisplay;
+
+ if (extra < 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "No vertical scaling for >1024 pixel wide modes\n");
+ return -EINVAL;
+ }
+
+ /* Need to turn off the vertical filter and center the image */
+
+ /* Attempt to maintain the relative sizes of the margins */
+ top = conn_state->tv.margins.top;
+ bottom = conn_state->tv.margins.bottom;
+
+ if (top + bottom)
+ top = extra * top / (top + bottom);
+ else
+ top = extra / 2;
+ bottom = extra - top;
+
+ tv_conn_state->margins.top = top;
+ tv_conn_state->margins.bottom = bottom;
+
+ tv_conn_state->bypass_vfilter = true;
+
+ if (!tv_mode->progressive) {
+ adjusted_mode->clock /= 2;
+ adjusted_mode->crtc_clock /= 2;
+ adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
+ } else {
+ tv_conn_state->margins.top = conn_state->tv.margins.top;
+ tv_conn_state->margins.bottom = conn_state->tv.margins.bottom;
+
+ tv_conn_state->bypass_vfilter = false;
+ }
+
+ drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(adjusted_mode));
+
+ /*
+ * The pipe scanline counter behaviour looks as follows when
+ * using the TV encoder:
+ *
+ * time ->
+ *
+ * dsl=vtotal-1 | |
+ * || ||
+ * ___| | ___| |
+ * / | / |
+ * / | / |
+ * dsl=0 ___/ |_____/ |
+ * | | | | | |
+ * ^ ^ ^ ^ ^
+ * | | | | pipe vblank/first part of tv vblank
+ * | | | bottom margin
+ * | | active
+ * | top margin
+ * remainder of tv vblank
+ *
+ * When the TV encoder is used the pipe wants to run faster
+ * than expected rate. During the active portion the TV
+ * encoder stalls the pipe every few lines to keep it in
+ * check. When the TV encoder reaches the bottom margin the
+ * pipe simply stops. Once we reach the TV vblank the pipe is
+ * no longer stalled and it runs at the max rate (apparently
+ * oversample clock on gen3, cdclk on gen4). Once the pipe
+ * reaches the pipe vtotal the pipe stops for the remainder
+ * of the TV vblank/top margin. The pipe starts up again when
+ * the TV encoder exits the top margin.
+ *
+ * To avoid huge hassles for vblank timestamping we scale
+ * the pipe timings as if the pipe always runs at the average
+ * rate it maintains during the active period. This also
+ * gives us a reasonable guesstimate as to the pixel rate.
+ * Due to the variation in the actual pipe speed the scanline
+ * counter will give us slightly erroneous results during the
+ * TV vblank/margins. But since vtotal was selected such that
+ * it matches the average rate of the pipe during the active
+ * portion the error shouldn't cause any serious grief to
+ * vblank timestamps.
+ *
+ * For posterity here is the empirically derived formula
+ * that gives us the maximum length of the pipe vblank
+ * we can use without causing display corruption. Following
+ * this would allow us to have a ticking scanline counter
+ * everywhere except during the bottom margin (there the
+ * pipe always stops). Ie. this would eliminate the second
+ * flat portion of the above graph. However this would also
+ * complicate vblank timestamping as the pipe vtotal would
+ * no longer match the average rate the pipe runs at during
+ * the active portion. Hence following this formula seems
+ * more trouble that it's worth.
+ *
+ * if (GRAPHICS_VER(dev_priv) == 4) {
+ * num = cdclk * (tv_mode->oversample >> !tv_mode->progressive);
+ * den = tv_mode->clock;
+ * } else {
+ * num = tv_mode->oversample >> !tv_mode->progressive;
+ * den = 1;
+ * }
+ * max_pipe_vblank_len ~=
+ * (num * tv_htotal * (tv_vblank_len + top_margin)) /
+ * (den * pipe_htotal);
+ */
+ intel_tv_scale_mode_horiz(adjusted_mode, hdisplay,
+ conn_state->tv.margins.left,
+ conn_state->tv.margins.right);
+ intel_tv_scale_mode_vert(adjusted_mode, vdisplay,
+ tv_conn_state->margins.top,
+ tv_conn_state->margins.bottom);
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ adjusted_mode->name[0] = '\0';
+
+ /* pixel counter doesn't work on i965gm TV output */
+ if (IS_I965GM(dev_priv))
+ pipe_config->mode_flags |=
+ I915_MODE_FLAG_USE_SCANLINE_COUNTER;
+
+ return 0;
+}
+
+static void
+set_tv_mode_timings(struct drm_i915_private *dev_priv,
+ const struct tv_mode *tv_mode,
+ bool burst_ena)
+{
+ u32 hctl1, hctl2, hctl3;
+ u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
+
+ hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
+ (tv_mode->htotal << TV_HTOTAL_SHIFT);
+
+ hctl2 = (tv_mode->hburst_start << 16) |
+ (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT);
+
+ if (burst_ena)
+ hctl2 |= TV_BURST_ENA;
+
+ hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) |
+ (tv_mode->hblank_end << TV_HBLANK_END_SHIFT);
+
+ vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) |
+ (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) |
+ (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT);
+
+ vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) |
+ (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) |
+ (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT);
+
+ vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) |
+ (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) |
+ (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT);
+
+ if (tv_mode->veq_ena)
+ vctl3 |= TV_EQUAL_ENA;
+
+ vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) |
+ (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT);
+
+ vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) |
+ (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT);
+
+ vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) |
+ (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT);
+
+ vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
+ (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
+
+ intel_de_write(dev_priv, TV_H_CTL_1, hctl1);
+ intel_de_write(dev_priv, TV_H_CTL_2, hctl2);
+ intel_de_write(dev_priv, TV_H_CTL_3, hctl3);
+ intel_de_write(dev_priv, TV_V_CTL_1, vctl1);
+ intel_de_write(dev_priv, TV_V_CTL_2, vctl2);
+ intel_de_write(dev_priv, TV_V_CTL_3, vctl3);
+ intel_de_write(dev_priv, TV_V_CTL_4, vctl4);
+ intel_de_write(dev_priv, TV_V_CTL_5, vctl5);
+ intel_de_write(dev_priv, TV_V_CTL_6, vctl6);
+ intel_de_write(dev_priv, TV_V_CTL_7, vctl7);
+}
+
+static void set_color_conversion(struct drm_i915_private *dev_priv,
+ const struct color_conversion *color_conversion)
+{
+ if (!color_conversion)
+ return;
+
+ intel_de_write(dev_priv, TV_CSC_Y,
+ (color_conversion->ry << 16) | color_conversion->gy);
+ intel_de_write(dev_priv, TV_CSC_Y2,
+ (color_conversion->by << 16) | color_conversion->ay);
+ intel_de_write(dev_priv, TV_CSC_U,
+ (color_conversion->ru << 16) | color_conversion->gu);
+ intel_de_write(dev_priv, TV_CSC_U2,
+ (color_conversion->bu << 16) | color_conversion->au);
+ intel_de_write(dev_priv, TV_CSC_V,
+ (color_conversion->rv << 16) | color_conversion->gv);
+ intel_de_write(dev_priv, TV_CSC_V2,
+ (color_conversion->bv << 16) | color_conversion->av);
+}
+
+static void intel_tv_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_tv *intel_tv = enc_to_tv(encoder);
+ const struct intel_tv_connector_state *tv_conn_state =
+ to_intel_tv_connector_state(conn_state);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
+ u32 tv_ctl, tv_filter_ctl;
+ u32 scctl1, scctl2, scctl3;
+ int i, j;
+ const struct video_levels *video_levels;
+ const struct color_conversion *color_conversion;
+ bool burst_ena;
+ int xpos, ypos;
+ unsigned int xsize, ysize;
+
+ if (!tv_mode)
+ return; /* can't happen (mode_prepare prevents this) */
+
+ tv_ctl = intel_de_read(dev_priv, TV_CTL);
+ tv_ctl &= TV_CTL_SAVE;
+
+ switch (intel_tv->type) {
+ default:
+ case DRM_MODE_CONNECTOR_Unknown:
+ case DRM_MODE_CONNECTOR_Composite:
+ tv_ctl |= TV_ENC_OUTPUT_COMPOSITE;
+ video_levels = tv_mode->composite_levels;
+ color_conversion = tv_mode->composite_color;
+ burst_ena = tv_mode->burst_ena;
+ break;
+ case DRM_MODE_CONNECTOR_Component:
+ tv_ctl |= TV_ENC_OUTPUT_COMPONENT;
+ video_levels = &component_levels;
+ if (tv_mode->burst_ena)
+ color_conversion = &sdtv_csc_yprpb;
+ else
+ color_conversion = &hdtv_csc_yprpb;
+ burst_ena = false;
+ break;
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ tv_ctl |= TV_ENC_OUTPUT_SVIDEO;
+ video_levels = tv_mode->svideo_levels;
+ color_conversion = tv_mode->svideo_color;
+ burst_ena = tv_mode->burst_ena;
+ break;
+ }
+
+ tv_ctl |= TV_ENC_PIPE_SEL(crtc->pipe);
+
+ switch (tv_mode->oversample) {
+ case 8:
+ tv_ctl |= TV_OVERSAMPLE_8X;
+ break;
+ case 4:
+ tv_ctl |= TV_OVERSAMPLE_4X;
+ break;
+ case 2:
+ tv_ctl |= TV_OVERSAMPLE_2X;
+ break;
+ default:
+ tv_ctl |= TV_OVERSAMPLE_NONE;
+ break;
+ }
+
+ if (tv_mode->progressive)
+ tv_ctl |= TV_PROGRESSIVE;
+ if (tv_mode->trilevel_sync)
+ tv_ctl |= TV_TRILEVEL_SYNC;
+ if (tv_mode->pal_burst)
+ tv_ctl |= TV_PAL_BURST;
+
+ scctl1 = 0;
+ if (tv_mode->dda1_inc)
+ scctl1 |= TV_SC_DDA1_EN;
+ if (tv_mode->dda2_inc)
+ scctl1 |= TV_SC_DDA2_EN;
+ if (tv_mode->dda3_inc)
+ scctl1 |= TV_SC_DDA3_EN;
+ scctl1 |= tv_mode->sc_reset;
+ if (video_levels)
+ scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
+ scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
+
+ scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
+ tv_mode->dda2_inc << TV_SCDDA2_INC_SHIFT;
+
+ scctl3 = tv_mode->dda3_size << TV_SCDDA3_SIZE_SHIFT |
+ tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
+
+ /* Enable two fixes for the chips that need them. */
+ if (IS_I915GM(dev_priv))
+ tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
+
+ set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
+
+ intel_de_write(dev_priv, TV_SC_CTL_1, scctl1);
+ intel_de_write(dev_priv, TV_SC_CTL_2, scctl2);
+ intel_de_write(dev_priv, TV_SC_CTL_3, scctl3);
+
+ set_color_conversion(dev_priv, color_conversion);
+
+ if (DISPLAY_VER(dev_priv) >= 4)
+ intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00404000);
+ else
+ intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00606000);
+
+ if (video_levels)
+ intel_de_write(dev_priv, TV_CLR_LEVEL,
+ ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
+
+ assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder);
+
+ /* Filter ctl must be set before TV_WIN_SIZE */
+ tv_filter_ctl = TV_AUTO_SCALE;
+ if (tv_conn_state->bypass_vfilter)
+ tv_filter_ctl |= TV_V_FILTER_BYPASS;
+ intel_de_write(dev_priv, TV_FILTER_CTL_1, tv_filter_ctl);
+
+ xsize = tv_mode->hblank_start - tv_mode->hblank_end;
+ ysize = intel_tv_mode_vdisplay(tv_mode);
+
+ xpos = conn_state->tv.margins.left;
+ ypos = tv_conn_state->margins.top;
+ xsize -= (conn_state->tv.margins.left +
+ conn_state->tv.margins.right);
+ ysize -= (tv_conn_state->margins.top +
+ tv_conn_state->margins.bottom);
+ intel_de_write(dev_priv, TV_WIN_POS, (xpos << 16) | ypos);
+ intel_de_write(dev_priv, TV_WIN_SIZE, (xsize << 16) | ysize);
+
+ j = 0;
+ for (i = 0; i < 60; i++)
+ intel_de_write(dev_priv, TV_H_LUMA(i),
+ tv_mode->filter_table[j++]);
+ for (i = 0; i < 60; i++)
+ intel_de_write(dev_priv, TV_H_CHROMA(i),
+ tv_mode->filter_table[j++]);
+ for (i = 0; i < 43; i++)
+ intel_de_write(dev_priv, TV_V_LUMA(i),
+ tv_mode->filter_table[j++]);
+ for (i = 0; i < 43; i++)
+ intel_de_write(dev_priv, TV_V_CHROMA(i),
+ tv_mode->filter_table[j++]);
+ intel_de_write(dev_priv, TV_DAC,
+ intel_de_read(dev_priv, TV_DAC) & TV_DAC_SAVE);
+ intel_de_write(dev_priv, TV_CTL, tv_ctl);
+}
+
+static int
+intel_tv_detect_type(struct intel_tv *intel_tv,
+ struct drm_connector *connector)
+{
+ struct intel_crtc *crtc = to_intel_crtc(connector->state->crtc);
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 tv_ctl, save_tv_ctl;
+ u32 tv_dac, save_tv_dac;
+ int type;
+
+ /* Disable TV interrupts around load detect or we'll recurse */
+ if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ i915_disable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_STATUS |
+ PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
+ spin_unlock_irq(&dev_priv->irq_lock);
+ }
+
+ save_tv_dac = tv_dac = intel_de_read(dev_priv, TV_DAC);
+ save_tv_ctl = tv_ctl = intel_de_read(dev_priv, TV_CTL);
+
+ /* Poll for TV detection */
+ tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK);
+ tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+ tv_ctl |= TV_ENC_PIPE_SEL(crtc->pipe);
+
+ tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
+ tv_dac |= (TVDAC_STATE_CHG_EN |
+ TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL |
+ TVDAC_C_SENSE_CTL |
+ DAC_CTL_OVERRIDE |
+ DAC_A_0_7_V |
+ DAC_B_0_7_V |
+ DAC_C_0_7_V);
+
+
+ /*
+ * The TV sense state should be cleared to zero on cantiga platform. Otherwise
+ * the TV is misdetected. This is hardware requirement.
+ */
+ if (IS_GM45(dev_priv))
+ tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
+
+ intel_de_write(dev_priv, TV_CTL, tv_ctl);
+ intel_de_write(dev_priv, TV_DAC, tv_dac);
+ intel_de_posting_read(dev_priv, TV_DAC);
+
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ type = -1;
+ tv_dac = intel_de_read(dev_priv, TV_DAC);
+ drm_dbg_kms(&dev_priv->drm, "TV detected: %x, %x\n", tv_ctl, tv_dac);
+ /*
+ * A B C
+ * 0 1 1 Composite
+ * 1 0 X svideo
+ * 0 0 0 Component
+ */
+ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Detected Composite TV connection\n");
+ type = DRM_MODE_CONNECTOR_Composite;
+ } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Detected S-Video TV connection\n");
+ type = DRM_MODE_CONNECTOR_SVIDEO;
+ } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Detected Component TV connection\n");
+ type = DRM_MODE_CONNECTOR_Component;
+ } else {
+ drm_dbg_kms(&dev_priv->drm, "Unrecognised TV connection\n");
+ type = -1;
+ }
+
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ intel_de_write(dev_priv, TV_CTL, save_tv_ctl);
+ intel_de_posting_read(dev_priv, TV_CTL);
+
+ /* For unknown reasons the hw barfs if we don't do this vblank wait. */
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ /* Restore interrupt config */
+ if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ i915_enable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_STATUS |
+ PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
+ spin_unlock_irq(&dev_priv->irq_lock);
+ }
+
+ return type;
+}
+
+/*
+ * Here we set accurate tv format according to connector type
+ * i.e Component TV should not be assigned by NTSC or PAL
+ */
+static void intel_tv_find_better_format(struct drm_connector *connector)
+{
+ struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
+ const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
+ int i;
+
+ /* Component supports everything so we can keep the current mode */
+ if (intel_tv->type == DRM_MODE_CONNECTOR_Component)
+ return;
+
+ /* If the current mode is fine don't change it */
+ if (!tv_mode->component_only)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+ tv_mode = &tv_modes[i];
+
+ if (!tv_mode->component_only)
+ break;
+ }
+
+ connector->state->tv.mode = i;
+}
+
+static int
+intel_tv_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
+ enum drm_connector_status status;
+ int type;
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, connector->name, force);
+
+ if (!INTEL_DISPLAY_ENABLED(i915))
+ return connector_status_disconnected;
+
+ if (force) {
+ struct intel_load_detect_pipe tmp;
+ int ret;
+
+ ret = intel_get_load_detect_pipe(connector, &tmp, ctx);
+ if (ret < 0)
+ return ret;
+
+ if (ret > 0) {
+ type = intel_tv_detect_type(intel_tv, connector);
+ intel_release_load_detect_pipe(connector, &tmp, ctx);
+ status = type < 0 ?
+ connector_status_disconnected :
+ connector_status_connected;
+ } else
+ status = connector_status_unknown;
+
+ if (status == connector_status_connected) {
+ intel_tv->type = type;
+ intel_tv_find_better_format(connector);
+ }
+
+ return status;
+ } else
+ return connector->status;
+}
+
+static const struct input_res {
+ u16 w, h;
+} input_res_table[] = {
+ { 640, 480 },
+ { 800, 600 },
+ { 1024, 768 },
+ { 1280, 1024 },
+ { 848, 480 },
+ { 1280, 720 },
+ { 1920, 1080 },
+};
+
+/* Choose preferred mode according to line number of TV format */
+static bool
+intel_tv_is_preferred_mode(const struct drm_display_mode *mode,
+ const struct tv_mode *tv_mode)
+{
+ int vdisplay = intel_tv_mode_vdisplay(tv_mode);
+
+ /* prefer 480 line modes for all SD TV modes */
+ if (vdisplay <= 576)
+ vdisplay = 480;
+
+ return vdisplay == mode->vdisplay;
+}
+
+static void
+intel_tv_set_mode_type(struct drm_display_mode *mode,
+ const struct tv_mode *tv_mode)
+{
+ mode->type = DRM_MODE_TYPE_DRIVER;
+
+ if (intel_tv_is_preferred_mode(mode, tv_mode))
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+}
+
+static int
+intel_tv_get_modes(struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
+ int i, count = 0;
+
+ for (i = 0; i < ARRAY_SIZE(input_res_table); i++) {
+ const struct input_res *input = &input_res_table[i];
+ struct drm_display_mode *mode;
+
+ if (input->w > 1024 &&
+ !tv_mode->progressive &&
+ !tv_mode->component_only)
+ continue;
+
+ /* no vertical scaling with wide sources on gen3 */
+ if (DISPLAY_VER(dev_priv) == 3 && input->w > 1024 &&
+ input->h > intel_tv_mode_vdisplay(tv_mode))
+ continue;
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode)
+ continue;
+
+ /*
+ * We take the TV mode and scale it to look
+ * like it had the expected h/vdisplay. This
+ * provides the most information to userspace
+ * about the actual timings of the mode. We
+ * do ignore the margins though.
+ */
+ intel_tv_mode_to_mode(mode, tv_mode, tv_mode->clock);
+ if (count == 0) {
+ drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(mode));
+ }
+ intel_tv_scale_mode_horiz(mode, input->w, 0, 0);
+ intel_tv_scale_mode_vert(mode, input->h, 0, 0);
+ intel_tv_set_mode_type(mode, tv_mode);
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+ count++;
+ }
+
+ return count;
+}
+
+static const struct drm_connector_funcs intel_tv_connector_funcs = {
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
+ .destroy = intel_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = intel_tv_connector_duplicate_state,
+};
+
+static int intel_tv_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *new_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_connector_state *old_state;
+
+ new_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!new_state->crtc)
+ return 0;
+
+ old_state = drm_atomic_get_old_connector_state(state, connector);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
+
+ if (old_state->tv.mode != new_state->tv.mode ||
+ old_state->tv.margins.left != new_state->tv.margins.left ||
+ old_state->tv.margins.right != new_state->tv.margins.right ||
+ old_state->tv.margins.top != new_state->tv.margins.top ||
+ old_state->tv.margins.bottom != new_state->tv.margins.bottom) {
+ /* Force a modeset. */
+
+ new_crtc_state->connectors_changed = true;
+ }
+
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
+ .detect_ctx = intel_tv_detect,
+ .mode_valid = intel_tv_mode_valid,
+ .get_modes = intel_tv_get_modes,
+ .atomic_check = intel_tv_atomic_check,
+};
+
+static const struct drm_encoder_funcs intel_tv_enc_funcs = {
+ .destroy = intel_encoder_destroy,
+};
+
+void
+intel_tv_init(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector *connector;
+ struct intel_tv *intel_tv;
+ struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
+ u32 tv_dac_on, tv_dac_off, save_tv_dac;
+ const char *tv_format_names[ARRAY_SIZE(tv_modes)];
+ int i, initial_mode = 0;
+ struct drm_connector_state *state;
+
+ if ((intel_de_read(dev_priv, TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+ return;
+
+ if (!intel_bios_is_tv_present(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm, "Integrated TV is not present.\n");
+ return;
+ }
+
+ /*
+ * Sanity check the TV output by checking to see if the
+ * DAC register holds a value
+ */
+ save_tv_dac = intel_de_read(dev_priv, TV_DAC);
+
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
+ tv_dac_on = intel_de_read(dev_priv, TV_DAC);
+
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ tv_dac_off = intel_de_read(dev_priv, TV_DAC);
+
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac);
+
+ /*
+ * If the register does not hold the state change enable
+ * bit, (either as a 0 or a 1), assume it doesn't really
+ * exist
+ */
+ if ((tv_dac_on & TVDAC_STATE_CHG_EN) == 0 ||
+ (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
+ return;
+
+ intel_tv = kzalloc(sizeof(*intel_tv), GFP_KERNEL);
+ if (!intel_tv) {
+ return;
+ }
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
+ kfree(intel_tv);
+ return;
+ }
+
+ intel_encoder = &intel_tv->base;
+ connector = &intel_connector->base;
+ state = connector->state;
+
+ /*
+ * The documentation, for the older chipsets at least, recommend
+ * using a polling method rather than hotplug detection for TVs.
+ * This is because in order to perform the hotplug detection, the PLLs
+ * for the TV must be kept alive increasing power drain and starving
+ * bandwidth from other encoders. Notably for instance, it causes
+ * pipe underruns on Crestline when this encoder is supposedly idle.
+ *
+ * More recent chipsets favour HDMI rather than integrated S-Video.
+ */
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ drm_connector_init(dev, connector, &intel_tv_connector_funcs,
+ DRM_MODE_CONNECTOR_SVIDEO);
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
+ DRM_MODE_ENCODER_TVDAC, "TV");
+
+ intel_encoder->compute_config = intel_tv_compute_config;
+ intel_encoder->get_config = intel_tv_get_config;
+ intel_encoder->pre_enable = intel_tv_pre_enable;
+ intel_encoder->enable = intel_enable_tv;
+ intel_encoder->disable = intel_disable_tv;
+ intel_encoder->get_hw_state = intel_tv_get_hw_state;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+
+ intel_encoder->type = INTEL_OUTPUT_TVOUT;
+ intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
+ intel_encoder->port = PORT_NONE;
+ intel_encoder->pipe_mask = ~0;
+ intel_encoder->cloneable = 0;
+ intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
+
+ /* BIOS margin values */
+ state->tv.margins.left = 54;
+ state->tv.margins.top = 36;
+ state->tv.margins.right = 46;
+ state->tv.margins.bottom = 37;
+
+ state->tv.mode = initial_mode;
+
+ drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /* Create TV properties then attach current values */
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+ /* 1080p50/1080p60 not supported on gen3 */
+ if (DISPLAY_VER(dev_priv) == 3 &&
+ tv_modes[i].oversample == 1)
+ break;
+
+ tv_format_names[i] = tv_modes[i].name;
+ }
+ drm_mode_create_tv_properties(dev, i, tv_format_names);
+
+ drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
+ state->tv.mode);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.tv_left_margin_property,
+ state->tv.margins.left);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.tv_top_margin_property,
+ state->tv.margins.top);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.tv_right_margin_property,
+ state->tv.margins.right);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.tv_bottom_margin_property,
+ state->tv.margins.bottom);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_tv.h b/drivers/gpu/drm/i915/display/intel_tv.h
new file mode 100644
index 000000000..44518575e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_tv.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_TV_H__
+#define __INTEL_TV_H__
+
+struct drm_i915_private;
+
+void intel_tv_init(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_TV_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
new file mode 100644
index 000000000..a9f44abfc
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -0,0 +1,1067 @@
+/*
+ * Copyright © 2006-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/*
+ * This information is private to VBT parsing in intel_bios.c.
+ *
+ * Please do NOT include anywhere else.
+ */
+#ifndef _INTEL_BIOS_PRIVATE
+#error "intel_vbt_defs.h is private to intel_bios.c"
+#endif
+
+#ifndef _INTEL_VBT_DEFS_H_
+#define _INTEL_VBT_DEFS_H_
+
+#include "intel_bios.h"
+
+/**
+ * struct vbt_header - VBT Header structure
+ * @signature: VBT signature, always starts with "$VBT"
+ * @version: Version of this structure
+ * @header_size: Size of this structure
+ * @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks)
+ * @vbt_checksum: Checksum
+ * @reserved0: Reserved
+ * @bdb_offset: Offset of &struct bdb_header from beginning of VBT
+ * @aim_offset: Offsets of add-in data blocks from beginning of VBT
+ */
+struct vbt_header {
+ u8 signature[20];
+ u16 version;
+ u16 header_size;
+ u16 vbt_size;
+ u8 vbt_checksum;
+ u8 reserved0;
+ u32 bdb_offset;
+ u32 aim_offset[4];
+} __packed;
+
+/**
+ * struct bdb_header - BDB Header structure
+ * @signature: BDB signature "BIOS_DATA_BLOCK"
+ * @version: Version of the data block definitions
+ * @header_size: Size of this structure
+ * @bdb_size: Size of BDB (BDB Header and data blocks)
+ */
+struct bdb_header {
+ u8 signature[16];
+ u16 version;
+ u16 header_size;
+ u16 bdb_size;
+} __packed;
+
+/*
+ * BDB version number dependencies are documented as:
+ *
+ * <start>+
+ * indicates the field was introduced in version <start>
+ * and is still valid
+ *
+ * <start>-<end>
+ * indicates the field was introduced in version <start>
+ * and obsoleted in version <end>+1.
+ *
+ * ??? indicates the specific version number is unknown
+ */
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+enum bdb_block_id {
+ BDB_GENERAL_FEATURES = 1,
+ BDB_GENERAL_DEFINITIONS = 2,
+ BDB_OLD_TOGGLE_LIST = 3,
+ BDB_MODE_SUPPORT_LIST = 4,
+ BDB_GENERIC_MODE_TABLE = 5,
+ BDB_EXT_MMIO_REGS = 6,
+ BDB_SWF_IO = 7,
+ BDB_SWF_MMIO = 8,
+ BDB_PSR = 9,
+ BDB_MODE_REMOVAL_TABLE = 10,
+ BDB_CHILD_DEVICE_TABLE = 11,
+ BDB_DRIVER_FEATURES = 12,
+ BDB_DRIVER_PERSISTENCE = 13,
+ BDB_EXT_TABLE_PTRS = 14,
+ BDB_DOT_CLOCK_OVERRIDE = 15,
+ BDB_DISPLAY_SELECT = 16,
+ BDB_DRIVER_ROTATION = 18,
+ BDB_DISPLAY_REMOVE = 19,
+ BDB_OEM_CUSTOM = 20,
+ BDB_EFP_LIST = 21, /* workarounds for VGA hsync/vsync */
+ BDB_SDVO_LVDS_OPTIONS = 22,
+ BDB_SDVO_PANEL_DTDS = 23,
+ BDB_SDVO_LVDS_PNP_IDS = 24,
+ BDB_SDVO_LVDS_POWER_SEQ = 25,
+ BDB_TV_OPTIONS = 26,
+ BDB_EDP = 27,
+ BDB_LVDS_OPTIONS = 40,
+ BDB_LVDS_LFP_DATA_PTRS = 41,
+ BDB_LVDS_LFP_DATA = 42,
+ BDB_LVDS_BACKLIGHT = 43,
+ BDB_LFP_POWER = 44,
+ BDB_MIPI_CONFIG = 52,
+ BDB_MIPI_SEQUENCE = 53,
+ BDB_COMPRESSION_PARAMETERS = 56,
+ BDB_GENERIC_DTD = 58,
+ BDB_SKIP = 254, /* VBIOS private block, ignore */
+};
+
+/*
+ * Block 1 - General Bit Definitions
+ */
+
+struct bdb_general_features {
+ /* bits 1 */
+ u8 panel_fitting:2;
+ u8 flexaim:1;
+ u8 msg_enable:1;
+ u8 clear_screen:3;
+ u8 color_flip:1;
+
+ /* bits 2 */
+ u8 download_ext_vbt:1;
+ u8 enable_ssc:1;
+ u8 ssc_freq:1;
+ u8 enable_lfp_on_override:1;
+ u8 disable_ssc_ddt:1;
+ u8 underscan_vga_timings:1;
+ u8 display_clock_mode:1;
+ u8 vbios_hotplug_support:1;
+
+ /* bits 3 */
+ u8 disable_smooth_vision:1;
+ u8 single_dvi:1;
+ u8 rotate_180:1; /* 181+ */
+ u8 fdi_rx_polarity_inverted:1;
+ u8 vbios_extended_mode:1; /* 160+ */
+ u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160+ */
+ u8 panel_best_fit_timing:1; /* 160+ */
+ u8 ignore_strap_state:1; /* 160+ */
+
+ /* bits 4 */
+ u8 legacy_monitor_detect;
+
+ /* bits 5 */
+ u8 int_crt_support:1;
+ u8 int_tv_support:1;
+ u8 int_efp_support:1;
+ u8 dp_ssc_enable:1; /* PCH attached eDP supports SSC */
+ u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
+ u8 dp_ssc_dongle_supported:1;
+ u8 rsvd11:2; /* finish byte */
+
+ /* bits 6 */
+ u8 tc_hpd_retry_timeout:7; /* 242+ */
+ u8 rsvd12:1;
+
+ /* bits 7 */
+ u8 afc_startup_config:2; /* 249+ */
+ u8 rsvd13:6;
+} __packed;
+
+/*
+ * Block 2 - General Bytes Definition
+ */
+
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Device handle */
+#define DEVICE_HANDLE_CRT 0x0001
+#define DEVICE_HANDLE_EFP1 0x0004
+#define DEVICE_HANDLE_EFP2 0x0040
+#define DEVICE_HANDLE_EFP3 0x0020
+#define DEVICE_HANDLE_EFP4 0x0010 /* 194+ */
+#define DEVICE_HANDLE_EFP5 0x0002 /* 215+ */
+#define DEVICE_HANDLE_EFP6 0x0001 /* 217+ */
+#define DEVICE_HANDLE_EFP7 0x0100 /* 217+ */
+#define DEVICE_HANDLE_EFP8 0x0200 /* 217+ */
+#define DEVICE_HANDLE_LFP1 0x0008
+#define DEVICE_HANDLE_LFP2 0x0080
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE 0x00
+#define DEVICE_TYPE_CRT 0x01
+#define DEVICE_TYPE_TV 0x09
+#define DEVICE_TYPE_EFP 0x12
+#define DEVICE_TYPE_LFP 0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS 0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
+#define DEVICE_TYPE_TV_COMPOSITE 0x0209
+#define DEVICE_TYPE_TV_MACROVISION 0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
+#define DEVICE_TYPE_TV_SCART 0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
+#define DEVICE_TYPE_EFP_DVI_I 0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
+#define DEVICE_TYPE_LFP_PANELLINK 0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
+
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6
+#define DEVICE_TYPE_eDP 0x78C6
+
+#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
+#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
+#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
+#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
+#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
+#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
+#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
+#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
+#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
+#define DEVICE_TYPE_LVDS_SIGNALING (1 << 5)
+#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
+#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
+#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
+#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
+#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
+
+#define DEVICE_CFG_NONE 0x00
+#define DEVICE_CFG_12BIT_DVOB 0x01
+#define DEVICE_CFG_12BIT_DVOC 0x02
+#define DEVICE_CFG_24BIT_DVOBC 0x09
+#define DEVICE_CFG_24BIT_DVOCB 0x0a
+#define DEVICE_CFG_DUAL_DVOB 0x11
+#define DEVICE_CFG_DUAL_DVOC 0x12
+#define DEVICE_CFG_DUAL_DVOBC 0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
+
+#define DEVICE_WIRE_NONE 0x00
+#define DEVICE_WIRE_DVOB 0x01
+#define DEVICE_WIRE_DVOC 0x02
+#define DEVICE_WIRE_DVOBC 0x03
+#define DEVICE_WIRE_DVOBB 0x05
+#define DEVICE_WIRE_DVOCC 0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+/* dvo_port pre BDB 155 */
+#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB 0x01
+#define DEVICE_PORT_DVOC 0x02
+
+/* dvo_port BDB 155+ */
+#define DVO_PORT_HDMIA 0
+#define DVO_PORT_HDMIB 1
+#define DVO_PORT_HDMIC 2
+#define DVO_PORT_HDMID 3
+#define DVO_PORT_LVDS 4
+#define DVO_PORT_TV 5
+#define DVO_PORT_CRT 6
+#define DVO_PORT_DPB 7
+#define DVO_PORT_DPC 8
+#define DVO_PORT_DPD 9
+#define DVO_PORT_DPA 10
+#define DVO_PORT_DPE 11 /* 193+ */
+#define DVO_PORT_HDMIE 12 /* 193+ */
+#define DVO_PORT_DPF 13 /* N/A */
+#define DVO_PORT_HDMIF 14 /* N/A */
+#define DVO_PORT_DPG 15 /* 217+ */
+#define DVO_PORT_HDMIG 16 /* 217+ */
+#define DVO_PORT_DPH 17 /* 217+ */
+#define DVO_PORT_HDMIH 18 /* 217+ */
+#define DVO_PORT_DPI 19 /* 217+ */
+#define DVO_PORT_HDMII 20 /* 217+ */
+#define DVO_PORT_MIPIA 21 /* 171+ */
+#define DVO_PORT_MIPIB 22 /* 171+ */
+#define DVO_PORT_MIPIC 23 /* 171+ */
+#define DVO_PORT_MIPID 24 /* 171+ */
+
+#define HDMI_MAX_DATA_RATE_PLATFORM 0 /* 204+ */
+#define HDMI_MAX_DATA_RATE_297 1 /* 204+ */
+#define HDMI_MAX_DATA_RATE_165 2 /* 204+ */
+#define HDMI_MAX_DATA_RATE_594 3 /* 249+ */
+#define HDMI_MAX_DATA_RATE_340 4 /* 249+ */
+#define HDMI_MAX_DATA_RATE_300 5 /* 249+ */
+
+#define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33
+
+/* DDC Bus DDI Type 155+ */
+enum vbt_gmbus_ddi {
+ DDC_BUS_DDI_B = 0x1,
+ DDC_BUS_DDI_C,
+ DDC_BUS_DDI_D,
+ DDC_BUS_DDI_F,
+ ICL_DDC_BUS_DDI_A = 0x1,
+ ICL_DDC_BUS_DDI_B,
+ TGL_DDC_BUS_DDI_C,
+ RKL_DDC_BUS_DDI_D = 0x3,
+ RKL_DDC_BUS_DDI_E,
+ ICL_DDC_BUS_PORT_1 = 0x4,
+ ICL_DDC_BUS_PORT_2,
+ ICL_DDC_BUS_PORT_3,
+ ICL_DDC_BUS_PORT_4,
+ TGL_DDC_BUS_PORT_5,
+ TGL_DDC_BUS_PORT_6,
+ ADLS_DDC_BUS_PORT_TC1 = 0x2,
+ ADLS_DDC_BUS_PORT_TC2,
+ ADLS_DDC_BUS_PORT_TC3,
+ ADLS_DDC_BUS_PORT_TC4,
+ ADLP_DDC_BUS_PORT_TC1 = 0x3,
+ ADLP_DDC_BUS_PORT_TC2,
+ ADLP_DDC_BUS_PORT_TC3,
+ ADLP_DDC_BUS_PORT_TC4
+
+};
+
+#define DP_AUX_A 0x40
+#define DP_AUX_B 0x10
+#define DP_AUX_C 0x20
+#define DP_AUX_D 0x30
+#define DP_AUX_E 0x50
+#define DP_AUX_F 0x60
+#define DP_AUX_G 0x70
+#define DP_AUX_H 0x80
+#define DP_AUX_I 0x90
+
+/* DP max link rate 216+ */
+#define BDB_216_VBT_DP_MAX_LINK_RATE_HBR3 0
+#define BDB_216_VBT_DP_MAX_LINK_RATE_HBR2 1
+#define BDB_216_VBT_DP_MAX_LINK_RATE_HBR 2
+#define BDB_216_VBT_DP_MAX_LINK_RATE_LBR 3
+
+/* DP max link rate 230+ */
+#define BDB_230_VBT_DP_MAX_LINK_RATE_DEF 0
+#define BDB_230_VBT_DP_MAX_LINK_RATE_LBR 1
+#define BDB_230_VBT_DP_MAX_LINK_RATE_HBR 2
+#define BDB_230_VBT_DP_MAX_LINK_RATE_HBR2 3
+#define BDB_230_VBT_DP_MAX_LINK_RATE_HBR3 4
+#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR10 5
+#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR13P5 6
+#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR20 7
+
+/*
+ * The child device config, aka the display device data structure, provides a
+ * description of a port and its configuration on the platform.
+ *
+ * The child device config size has been increased, and fields have been added
+ * and their meaning has changed over time. Care must be taken when accessing
+ * basically any of the fields to ensure the correct interpretation for the BDB
+ * version in question.
+ *
+ * When we copy the child device configs to dev_priv->display.vbt.child_dev, we
+ * reserve space for the full structure below, and initialize the tail not
+ * actually present in VBT to zeros. Accessing those fields is fine, as long as
+ * the default zero is taken into account, again according to the BDB version.
+ *
+ * BDB versions 155 and below are considered legacy, and version 155 seems to be
+ * a baseline for some of the VBT documentation. When adding new fields, please
+ * include the BDB version when the field was added, if it's above that.
+ */
+struct child_device_config {
+ u16 handle;
+ u16 device_type; /* See DEVICE_TYPE_* above */
+
+ union {
+ u8 device_id[10]; /* ascii string */
+ struct {
+ u8 i2c_speed;
+ u8 dp_onboard_redriver_preemph:3; /* 158+ */
+ u8 dp_onboard_redriver_vswing:3; /* 158+ */
+ u8 dp_onboard_redriver_present:1; /* 158+ */
+ u8 reserved0:1;
+ u8 dp_ondock_redriver_preemph:3; /* 158+ */
+ u8 dp_ondock_redriver_vswing:3; /* 158+ */
+ u8 dp_ondock_redriver_present:1; /* 158+ */
+ u8 reserved1:1;
+ u8 hdmi_level_shifter_value:5; /* 158+ */
+ u8 hdmi_max_data_rate:3; /* 204+ */
+ u16 dtd_buf_ptr; /* 161+ */
+ u8 edidless_efp:1; /* 161+ */
+ u8 compression_enable:1; /* 198+ */
+ u8 compression_method_cps:1; /* 198+ */
+ u8 ganged_edp:1; /* 202+ */
+ u8 lttpr_non_transparent:1; /* 235+ */
+ u8 disable_compression_for_ext_disp:1; /* 251+ */
+ u8 reserved2:2;
+ u8 compression_structure_index:4; /* 198+ */
+ u8 reserved3:4;
+ u8 hdmi_max_frl_rate:4; /* 237+ */
+ u8 hdmi_max_frl_rate_valid:1; /* 237+ */
+ u8 reserved4:3; /* 237+ */
+ u8 reserved5;
+ } __packed;
+ } __packed;
+
+ u16 addin_offset;
+ u8 dvo_port; /* See DEVICE_PORT_* and DVO_PORT_* above */
+ u8 i2c_pin;
+ u8 slave_addr;
+ u8 ddc_pin;
+ u16 edid_ptr;
+ u8 dvo_cfg; /* See DEVICE_CFG_* above */
+
+ union {
+ struct {
+ u8 dvo2_port;
+ u8 i2c2_pin;
+ u8 slave2_addr;
+ u8 ddc2_pin;
+ } __packed;
+ struct {
+ u8 efp_routed:1; /* 158+ */
+ u8 lane_reversal:1; /* 184+ */
+ u8 lspcon:1; /* 192+ */
+ u8 iboost:1; /* 196+ */
+ u8 hpd_invert:1; /* 196+ */
+ u8 use_vbt_vswing:1; /* 218+ */
+ u8 dp_max_lane_count:2; /* 244+ */
+ u8 hdmi_support:1; /* 158+ */
+ u8 dp_support:1; /* 158+ */
+ u8 tmds_support:1; /* 158+ */
+ u8 support_reserved:5;
+ u8 aux_channel;
+ u8 dongle_detect;
+ } __packed;
+ } __packed;
+
+ u8 pipe_cap:2;
+ u8 sdvo_stall:1; /* 158+ */
+ u8 hpd_status:2;
+ u8 integrated_encoder:1;
+ u8 capabilities_reserved:2;
+ u8 dvo_wiring; /* See DEVICE_WIRE_* above */
+
+ union {
+ u8 dvo2_wiring;
+ u8 mipi_bridge_type; /* 171+ */
+ } __packed;
+
+ u16 extended_type;
+ u8 dvo_function;
+ u8 dp_usb_type_c:1; /* 195+ */
+ u8 tbt:1; /* 209+ */
+ u8 flags2_reserved:2; /* 195+ */
+ u8 dp_port_trace_length:4; /* 209+ */
+ u8 dp_gpio_index; /* 195+ */
+ u16 dp_gpio_pin_num; /* 195+ */
+ u8 dp_iboost_level:4; /* 196+ */
+ u8 hdmi_iboost_level:4; /* 196+ */
+ u8 dp_max_link_rate:3; /* 216+ */
+ u8 dp_max_link_rate_reserved:5; /* 216+ */
+} __packed;
+
+struct bdb_general_definitions {
+ /* DDC GPIO */
+ u8 crt_ddc_gmbus_pin;
+
+ /* DPMS bits */
+ u8 dpms_non_acpi:1;
+ u8 skip_boot_crt_detect:1;
+ u8 dpms_aim:1;
+ u8 rsvd1:5; /* finish byte */
+
+ /* boot device bits */
+ u8 boot_display[2];
+ u8 child_dev_size;
+
+ /*
+ * Device info:
+ * If TV is present, it'll be at devices[0].
+ * LVDS will be next, either devices[0] or [1], if present.
+ * On some platforms the number of device is 6. But could be as few as
+ * 4 if both TV and LVDS are missing.
+ * And the device num is related with the size of general definition
+ * block. It is obtained by using the following formula:
+ * number = (block_size - sizeof(bdb_general_definitions))/
+ * defs->child_dev_size;
+ */
+ u8 devices[];
+} __packed;
+
+/*
+ * Block 9 - SRD Feature Block
+ */
+
+struct psr_table {
+ /* Feature bits */
+ u8 full_link:1; /* 165+ */
+ u8 require_aux_to_wakeup:1; /* 165+ */
+ u8 feature_bits_rsvd:6;
+
+ /* Wait times */
+ u8 idle_frames:4; /* 165+ */
+ u8 lines_to_wait:3; /* 165+ */
+ u8 wait_times_rsvd:1;
+
+ /* TP wake up time in multiple of 100 */
+ u16 tp1_wakeup_time; /* 165+ */
+ u16 tp2_tp3_wakeup_time; /* 165+ */
+} __packed;
+
+struct bdb_psr {
+ struct psr_table psr_table[16];
+
+ /* PSR2 TP2/TP3 wakeup time for 16 panels */
+ u32 psr2_tp2_tp3_wakeup_time; /* 226+ */
+} __packed;
+
+/*
+ * Block 12 - Driver Features Data Block
+ */
+
+#define BDB_DRIVER_FEATURE_NO_LVDS 0
+#define BDB_DRIVER_FEATURE_INT_LVDS 1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
+#define BDB_DRIVER_FEATURE_INT_SDVO_LVDS 3
+
+struct bdb_driver_features {
+ /* Driver bits */
+ u8 boot_dev_algorithm:1;
+ u8 allow_display_switch_dvd:1;
+ u8 allow_display_switch_dos:1;
+ u8 hotplug_dvo:1;
+ u8 dual_view_zoom:1;
+ u8 int15h_hook:1;
+ u8 sprite_in_clone:1;
+ u8 primary_lfp_id:1;
+
+ u16 boot_mode_x;
+ u16 boot_mode_y;
+ u8 boot_mode_bpp;
+ u8 boot_mode_refresh;
+
+ /* Extended Driver Bits 1 */
+ u16 enable_lfp_primary:1;
+ u16 selective_mode_pruning:1;
+ u16 dual_frequency:1;
+ u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
+ u16 nt_clone_support:1;
+ u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
+ u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
+ u16 cui_aspect_scaling:1;
+ u16 preserve_aspect_ratio:1;
+ u16 sdvo_device_power_down:1;
+ u16 crt_hotplug:1;
+ u16 lvds_config:2;
+ u16 tv_hotplug:1;
+ u16 hdmi_config:2;
+
+ /* Driver Flags 1 */
+ u8 static_display:1; /* 163+ */
+ u8 embedded_platform:1; /* 163+ */
+ u8 display_subsystem_enable:1; /* 163+ */
+ u8 reserved0:5;
+
+ u16 legacy_crt_max_x;
+ u16 legacy_crt_max_y;
+ u8 legacy_crt_max_refresh;
+
+ /* Extended Driver Bits 2 */
+ u8 hdmi_termination:1;
+ u8 cea861d_hdmi_support:1;
+ u8 self_refresh_enable:1;
+ u8 reserved1:5;
+
+ u8 custom_vbt_version; /* 155+ */
+
+ /* Driver Feature Flags */
+ u16 rmpm_enabled:1; /* 165+ */
+ u16 s2ddt_enabled:1; /* 165+ */
+ u16 dpst_enabled:1; /* 165-227 */
+ u16 bltclt_enabled:1; /* 165+ */
+ u16 adb_enabled:1; /* 165-227 */
+ u16 drrs_enabled:1; /* 165-227 */
+ u16 grs_enabled:1; /* 165+ */
+ u16 gpmt_enabled:1; /* 165+ */
+ u16 tbt_enabled:1; /* 165+ */
+ u16 psr_enabled:1; /* 165-227 */
+ u16 ips_enabled:1; /* 165+ */
+ u16 dpfs_enabled:1; /* 165+ */
+ u16 dmrrs_enabled:1; /* 174-227 */
+ u16 adt_enabled:1; /* ???-228 */
+ u16 hpd_wake:1; /* 201-240 */
+ u16 pc_feature_valid:1;
+} __packed;
+
+/*
+ * Block 22 - SDVO LVDS General Options
+ */
+
+struct bdb_sdvo_lvds_options {
+ u8 panel_backlight;
+ u8 h40_set_panel_type;
+ u8 panel_type;
+ u8 ssc_clk_freq;
+ u16 als_low_trip;
+ u16 als_high_trip;
+ u8 sclalarcoeff_tab_row_num;
+ u8 sclalarcoeff_tab_row_size;
+ u8 coefficient[8];
+ u8 panel_misc_bits_1;
+ u8 panel_misc_bits_2;
+ u8 panel_misc_bits_3;
+ u8 panel_misc_bits_4;
+} __packed;
+
+/*
+ * Block 23 - SDVO LVDS Panel DTDs
+ */
+
+struct lvds_dvo_timing {
+ u16 clock; /**< In 10khz */
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_off_lo;
+ u8 hsync_pulse_width_lo;
+ u8 vsync_pulse_width_lo:4;
+ u8 vsync_off_lo:4;
+ u8 vsync_pulse_width_hi:2;
+ u8 vsync_off_hi:2;
+ u8 hsync_pulse_width_hi:2;
+ u8 hsync_off_hi:2;
+ u8 himage_lo;
+ u8 vimage_lo;
+ u8 vimage_hi:4;
+ u8 himage_hi:4;
+ u8 h_border;
+ u8 v_border;
+ u8 rsvd1:3;
+ u8 digital:2;
+ u8 vsync_positive:1;
+ u8 hsync_positive:1;
+ u8 non_interlaced:1;
+} __packed;
+
+struct bdb_sdvo_panel_dtds {
+ struct lvds_dvo_timing dtds[4];
+} __packed;
+
+/*
+ * Block 27 - eDP VBT Block
+ */
+
+#define EDP_18BPP 0
+#define EDP_24BPP 1
+#define EDP_30BPP 2
+#define EDP_RATE_1_62 0
+#define EDP_RATE_2_7 1
+#define EDP_RATE_5_4 2
+#define EDP_LANE_1 0
+#define EDP_LANE_2 1
+#define EDP_LANE_4 3
+#define EDP_PREEMPHASIS_NONE 0
+#define EDP_PREEMPHASIS_3_5dB 1
+#define EDP_PREEMPHASIS_6dB 2
+#define EDP_PREEMPHASIS_9_5dB 3
+#define EDP_VSWING_0_4V 0
+#define EDP_VSWING_0_6V 1
+#define EDP_VSWING_0_8V 2
+#define EDP_VSWING_1_2V 3
+
+
+struct edp_fast_link_params {
+ u8 rate:4; /* ???-223 */
+ u8 lanes:4;
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __packed;
+
+struct edp_pwm_delays {
+ u16 pwm_on_to_backlight_enable;
+ u16 backlight_disable_to_pwm_off;
+} __packed;
+
+struct edp_full_link_params {
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __packed;
+
+struct edp_apical_params {
+ u32 panel_oui;
+ u32 dpcd_base_address;
+ u32 dpcd_idridix_control_0;
+ u32 dpcd_option_select;
+ u32 dpcd_backlight;
+ u32 ambient_light;
+ u32 backlight_scale;
+} __packed;
+
+struct bdb_edp {
+ struct edp_power_seq power_seqs[16];
+ u32 color_depth;
+ struct edp_fast_link_params fast_link_params[16];
+ u32 sdrrs_msa_timing_delay;
+
+ /* ith bit indicates enabled/disabled for (i+1)th panel */
+ u16 edp_s3d_feature; /* 162+ */
+ u16 edp_t3_optimization; /* 165+ */
+ u64 edp_vswing_preemph; /* 173+ */
+ u16 fast_link_training; /* 182+ */
+ u16 dpcd_600h_write_required; /* 185+ */
+ struct edp_pwm_delays pwm_delays[16]; /* 186+ */
+ u16 full_link_params_provided; /* 199+ */
+ struct edp_full_link_params full_link_params[16]; /* 199+ */
+ u16 apical_enable; /* 203+ */
+ struct edp_apical_params apical_params[16]; /* 203+ */
+ u16 edp_fast_link_training_rate[16]; /* 224+ */
+ u16 edp_max_port_link_rate[16]; /* 244+ */
+} __packed;
+
+/*
+ * Block 40 - LFP Data Block
+ */
+
+struct bdb_lvds_options {
+ u8 panel_type;
+ u8 panel_type2; /* 212+ */
+ /* LVDS capabilities, stored in a dword */
+ u8 pfit_mode:2;
+ u8 pfit_text_mode_enhanced:1;
+ u8 pfit_gfx_mode_enhanced:1;
+ u8 pfit_ratio_auto:1;
+ u8 pixel_dither:1;
+ u8 lvds_edid:1; /* ???-240 */
+ u8 rsvd2:1;
+ u8 rsvd4;
+ /* LVDS Panel channel bits stored here */
+ u32 lvds_panel_channel_bits;
+ /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
+ u16 ssc_bits;
+ u16 ssc_freq;
+ u16 ssc_ddt;
+ /* Panel color depth defined here */
+ u16 panel_color_depth;
+ /* LVDS panel type bits stored here */
+ u32 dps_panel_type_bits;
+ /* LVDS backlight control type bits stored here */
+ u32 blt_control_type_bits; /* ???-240 */
+
+ u16 lcdvcc_s0_enable; /* 200+ */
+ u32 rotation; /* 228+ */
+ u32 position; /* 240+ */
+} __packed;
+
+/*
+ * Block 41 - LFP Data Table Pointers
+ */
+struct lvds_lfp_data_ptr_table {
+ u16 offset; /* offsets are from start of bdb */
+ u8 table_size;
+} __packed;
+
+/* LFP pointer table contains entries to the struct below */
+struct lvds_lfp_data_ptr {
+ struct lvds_lfp_data_ptr_table fp_timing;
+ struct lvds_lfp_data_ptr_table dvo_timing;
+ struct lvds_lfp_data_ptr_table panel_pnp_id;
+} __packed;
+
+struct bdb_lvds_lfp_data_ptrs {
+ u8 lvds_entries;
+ struct lvds_lfp_data_ptr ptr[16];
+ struct lvds_lfp_data_ptr_table panel_name; /* (156-163?)+ */
+} __packed;
+
+/*
+ * Block 42 - LFP Data Tables
+ */
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+ u16 x_res;
+ u16 y_res;
+ u32 lvds_reg;
+ u32 lvds_reg_val;
+ u32 pp_on_reg;
+ u32 pp_on_reg_val;
+ u32 pp_off_reg;
+ u32 pp_off_reg_val;
+ u32 pp_cycle_reg;
+ u32 pp_cycle_reg_val;
+ u32 pfit_reg;
+ u32 pfit_reg_val;
+ u16 terminator;
+} __packed;
+
+struct lvds_pnp_id {
+ u16 mfg_name;
+ u16 product_code;
+ u32 serial;
+ u8 mfg_week;
+ u8 mfg_year;
+} __packed;
+
+/*
+ * For reference only. fp_timing has variable size so
+ * the data must be accessed using the data table pointers.
+ * Do not use this directly!
+ */
+struct lvds_lfp_data_entry {
+ struct lvds_fp_timing fp_timing;
+ struct lvds_dvo_timing dvo_timing;
+ struct lvds_pnp_id pnp_id;
+} __packed;
+
+struct bdb_lvds_lfp_data {
+ struct lvds_lfp_data_entry data[16];
+} __packed;
+
+struct lvds_lfp_panel_name {
+ u8 name[13];
+} __packed;
+
+struct lvds_lfp_black_border {
+ u8 top; /* 227+ */
+ u8 bottom; /* 227+ */
+ u8 left; /* 238+ */
+ u8 right; /* 238+ */
+} __packed;
+
+struct bdb_lvds_lfp_data_tail {
+ struct lvds_lfp_panel_name panel_name[16]; /* (156-163?)+ */
+ u16 scaling_enable; /* 187+ */
+ u8 seamless_drrs_min_refresh_rate[16]; /* 188+ */
+ u8 pixel_overlap_count[16]; /* 208+ */
+ struct lvds_lfp_black_border black_border[16]; /* 227+ */
+ u16 dual_lfp_port_sync_enable; /* 231+ */
+ u16 gpu_dithering_for_banding_artifacts; /* 245+ */
+} __packed;
+
+/*
+ * Block 43 - LFP Backlight Control Data Block
+ */
+
+#define BDB_BACKLIGHT_TYPE_NONE 0
+#define BDB_BACKLIGHT_TYPE_PWM 2
+
+struct lfp_backlight_data_entry {
+ u8 type:2;
+ u8 active_low_pwm:1;
+ u8 obsolete1:5;
+ u16 pwm_freq_hz;
+ u8 min_brightness; /* ???-233 */
+ u8 obsolete2;
+ u8 obsolete3;
+} __packed;
+
+struct lfp_backlight_control_method {
+ u8 type:4;
+ u8 controller:4;
+} __packed;
+
+struct lfp_brightness_level {
+ u16 level;
+ u16 reserved;
+} __packed;
+
+#define EXP_BDB_LFP_BL_DATA_SIZE_REV_191 \
+ offsetof(struct bdb_lfp_backlight_data, brightness_level)
+#define EXP_BDB_LFP_BL_DATA_SIZE_REV_234 \
+ offsetof(struct bdb_lfp_backlight_data, brightness_precision_bits)
+
+struct bdb_lfp_backlight_data {
+ u8 entry_size;
+ struct lfp_backlight_data_entry data[16];
+ u8 level[16]; /* ???-233 */
+ struct lfp_backlight_control_method backlight_control[16];
+ struct lfp_brightness_level brightness_level[16]; /* 234+ */
+ struct lfp_brightness_level brightness_min_level[16]; /* 234+ */
+ u8 brightness_precision_bits[16]; /* 236+ */
+ u16 hdr_dpcd_refresh_timeout[16]; /* 239+ */
+} __packed;
+
+/*
+ * Block 44 - LFP Power Conservation Features Block
+ */
+struct lfp_power_features {
+ u8 reserved1:1;
+ u8 power_conservation_pref:3;
+ u8 reserved2:1;
+ u8 lace_enabled_status:1; /* 210+ */
+ u8 lace_support:1; /* 210+ */
+ u8 als_enable:1;
+} __packed;
+
+struct als_data_entry {
+ u16 backlight_adjust;
+ u16 lux;
+} __packed;
+
+struct aggressiveness_profile_entry {
+ u8 dpst_aggressiveness : 4;
+ u8 lace_aggressiveness : 4;
+} __packed;
+
+struct aggressiveness_profile2_entry {
+ u8 opst_aggressiveness : 4;
+ u8 elp_aggressiveness : 4;
+} __packed;
+
+struct bdb_lfp_power {
+ struct lfp_power_features features; /* ???-227 */
+ struct als_data_entry als[5];
+ u8 lace_aggressiveness_profile:3; /* 210-227 */
+ u8 reserved1:5;
+ u16 dpst; /* 228+ */
+ u16 psr; /* 228+ */
+ u16 drrs; /* 228+ */
+ u16 lace_support; /* 228+ */
+ u16 adt; /* 228+ */
+ u16 dmrrs; /* 228+ */
+ u16 adb; /* 228+ */
+ u16 lace_enabled_status; /* 228+ */
+ struct aggressiveness_profile_entry aggressiveness[16]; /* 228+ */
+ u16 hobl; /* 232+ */
+ u16 vrr_feature_enabled; /* 233+ */
+ u16 elp; /* 247+ */
+ u16 opst; /* 247+ */
+ struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */
+} __packed;
+
+/*
+ * Block 52 - MIPI Configuration Block
+ */
+
+#define MAX_MIPI_CONFIGURATIONS 6
+
+struct bdb_mipi_config {
+ struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; /* 175+ */
+ struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; /* 177+ */
+ struct edp_pwm_delays pwm_delays[MAX_MIPI_CONFIGURATIONS]; /* 186+ */
+ u8 pmic_i2c_bus_number[MAX_MIPI_CONFIGURATIONS]; /* 190+ */
+} __packed;
+
+/*
+ * Block 53 - MIPI Sequence Block
+ */
+
+struct bdb_mipi_sequence {
+ u8 version;
+ u8 data[]; /* up to 6 variable length blocks */
+} __packed;
+
+/*
+ * Block 56 - Compression Parameters
+ */
+
+#define VBT_RC_BUFFER_BLOCK_SIZE_1KB 0
+#define VBT_RC_BUFFER_BLOCK_SIZE_4KB 1
+#define VBT_RC_BUFFER_BLOCK_SIZE_16KB 2
+#define VBT_RC_BUFFER_BLOCK_SIZE_64KB 3
+
+#define VBT_DSC_LINE_BUFFER_DEPTH(vbt_value) ((vbt_value) + 8) /* bits */
+#define VBT_DSC_MAX_BPP(vbt_value) (6 + (vbt_value) * 2)
+
+struct dsc_compression_parameters_entry {
+ u8 version_major:4;
+ u8 version_minor:4;
+
+ u8 rc_buffer_block_size:2;
+ u8 reserved1:6;
+
+ /*
+ * Buffer size in bytes:
+ *
+ * 4 ^ rc_buffer_block_size * 1024 * (rc_buffer_size + 1) bytes
+ */
+ u8 rc_buffer_size;
+ u32 slices_per_line;
+
+ u8 line_buffer_depth:4;
+ u8 reserved2:4;
+
+ /* Flag Bits 1 */
+ u8 block_prediction_enable:1;
+ u8 reserved3:7;
+
+ u8 max_bpp; /* mapping */
+
+ /* Color depth capabilities */
+ u8 reserved4:1;
+ u8 support_8bpc:1;
+ u8 support_10bpc:1;
+ u8 support_12bpc:1;
+ u8 reserved5:4;
+
+ u16 slice_height;
+} __packed;
+
+struct bdb_compression_parameters {
+ u16 entry_size;
+ struct dsc_compression_parameters_entry data[16];
+} __packed;
+
+/*
+ * Block 58 - Generic DTD Block
+ */
+
+struct generic_dtd_entry {
+ u32 pixel_clock;
+ u16 hactive;
+ u16 hblank;
+ u16 hfront_porch;
+ u16 hsync;
+ u16 vactive;
+ u16 vblank;
+ u16 vfront_porch;
+ u16 vsync;
+ u16 width_mm;
+ u16 height_mm;
+
+ /* Flags */
+ u8 rsvd_flags:6;
+ u8 vsync_positive_polarity:1;
+ u8 hsync_positive_polarity:1;
+
+ u8 rsvd[3];
+} __packed;
+
+struct bdb_generic_dtd {
+ u16 gdtd_size;
+ struct generic_dtd_entry dtd[]; /* up to 24 DTD's */
+} __packed;
+
+#endif /* _INTEL_VBT_DEFS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
new file mode 100644
index 000000000..269f97923
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -0,0 +1,1218 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Author: Gaurav K Singh <gaurav.k.singh@intel.com>
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+#include <linux/limits.h>
+
+#include <drm/display/drm_dsc_helper.h>
+
+#include "i915_drv.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dsi.h"
+#include "intel_qp_tables.h"
+#include "intel_vdsc.h"
+
+enum ROW_INDEX_BPP {
+ ROW_INDEX_6BPP = 0,
+ ROW_INDEX_8BPP,
+ ROW_INDEX_10BPP,
+ ROW_INDEX_12BPP,
+ ROW_INDEX_15BPP,
+ MAX_ROW_INDEX
+};
+
+enum COLUMN_INDEX_BPC {
+ COLUMN_INDEX_8BPC = 0,
+ COLUMN_INDEX_10BPC,
+ COLUMN_INDEX_12BPC,
+ COLUMN_INDEX_14BPC,
+ COLUMN_INDEX_16BPC,
+ MAX_COLUMN_INDEX
+};
+
+/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */
+static const u16 rc_buf_thresh[] = {
+ 896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616,
+ 7744, 7872, 8000, 8064
+};
+
+struct rc_parameters {
+ u16 initial_xmit_delay;
+ u8 first_line_bpg_offset;
+ u16 initial_offset;
+ u8 flatness_min_qp;
+ u8 flatness_max_qp;
+ u8 rc_quant_incr_limit0;
+ u8 rc_quant_incr_limit1;
+ struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
+};
+
+/*
+ * Selected Rate Control Related Parameter Recommended Values
+ * from DSC_v1.11 spec & C Model release: DSC_model_20161212
+ */
+static const struct rc_parameters rc_parameters[][MAX_COLUMN_INDEX] = {
+{
+ /* 6BPP/8BPC */
+ { 768, 15, 6144, 3, 13, 11, 11, {
+ { 0, 4, 0 }, { 1, 6, -2 }, { 3, 8, -2 }, { 4, 8, -4 },
+ { 5, 9, -6 }, { 5, 9, -6 }, { 6, 9, -6 }, { 6, 10, -8 },
+ { 7, 11, -8 }, { 8, 12, -10 }, { 9, 12, -10 }, { 10, 12, -12 },
+ { 10, 12, -12 }, { 11, 12, -12 }, { 13, 14, -12 }
+ }
+ },
+ /* 6BPP/10BPC */
+ { 768, 15, 6144, 7, 17, 15, 15, {
+ { 0, 8, 0 }, { 3, 10, -2 }, { 7, 12, -2 }, { 8, 12, -4 },
+ { 9, 13, -6 }, { 9, 13, -6 }, { 10, 13, -6 }, { 10, 14, -8 },
+ { 11, 15, -8 }, { 12, 16, -10 }, { 13, 16, -10 },
+ { 14, 16, -12 }, { 14, 16, -12 }, { 15, 16, -12 },
+ { 17, 18, -12 }
+ }
+ },
+ /* 6BPP/12BPC */
+ { 768, 15, 6144, 11, 21, 19, 19, {
+ { 0, 12, 0 }, { 5, 14, -2 }, { 11, 16, -2 }, { 12, 16, -4 },
+ { 13, 17, -6 }, { 13, 17, -6 }, { 14, 17, -6 }, { 14, 18, -8 },
+ { 15, 19, -8 }, { 16, 20, -10 }, { 17, 20, -10 },
+ { 18, 20, -12 }, { 18, 20, -12 }, { 19, 20, -12 },
+ { 21, 22, -12 }
+ }
+ },
+ /* 6BPP/14BPC */
+ { 768, 15, 6144, 15, 25, 23, 27, {
+ { 0, 16, 0 }, { 7, 18, -2 }, { 15, 20, -2 }, { 16, 20, -4 },
+ { 17, 21, -6 }, { 17, 21, -6 }, { 18, 21, -6 }, { 18, 22, -8 },
+ { 19, 23, -8 }, { 20, 24, -10 }, { 21, 24, -10 },
+ { 22, 24, -12 }, { 22, 24, -12 }, { 23, 24, -12 },
+ { 25, 26, -12 }
+ }
+ },
+ /* 6BPP/16BPC */
+ { 768, 15, 6144, 19, 29, 27, 27, {
+ { 0, 20, 0 }, { 9, 22, -2 }, { 19, 24, -2 }, { 20, 24, -4 },
+ { 21, 25, -6 }, { 21, 25, -6 }, { 22, 25, -6 }, { 22, 26, -8 },
+ { 23, 27, -8 }, { 24, 28, -10 }, { 25, 28, -10 },
+ { 26, 28, -12 }, { 26, 28, -12 }, { 27, 28, -12 },
+ { 29, 30, -12 }
+ }
+ },
+},
+{
+ /* 8BPP/8BPC */
+ { 512, 12, 6144, 3, 12, 11, 11, {
+ { 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, { 5, 12, -12 },
+ { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+ }
+ },
+ /* 8BPP/10BPC */
+ { 512, 12, 6144, 7, 16, 15, 15, {
+ { 0, 4, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 },
+ { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+ { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+ }
+ },
+ /* 8BPP/12BPC */
+ { 512, 12, 6144, 11, 20, 19, 19, {
+ { 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 },
+ { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+ { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+ { 21, 23, -12 }
+ }
+ },
+ /* 8BPP/14BPC */
+ { 512, 12, 6144, 15, 24, 23, 23, {
+ { 0, 12, 0 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 },
+ { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 },
+ { 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 },
+ { 24, 25, -12 }
+ }
+ },
+ /* 8BPP/16BPC */
+ { 512, 12, 6144, 19, 28, 27, 27, {
+ { 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 },
+ { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 },
+ { 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 },
+ { 28, 29, -12 }
+ }
+ },
+},
+{
+ /* 10BPP/8BPC */
+ { 410, 15, 5632, 3, 12, 11, 11, {
+ { 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 },
+ { 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 }
+ }
+ },
+ /* 10BPP/10BPC */
+ { 410, 15, 5632, 7, 16, 15, 15, {
+ { 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 },
+ { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 },
+ { 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 }
+ }
+ },
+ /* 10BPP/12BPC */
+ { 410, 15, 5632, 11, 20, 19, 19, {
+ { 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 },
+ { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 },
+ { 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 },
+ { 19, 20, -12 }
+ }
+ },
+ /* 10BPP/14BPC */
+ { 410, 15, 5632, 15, 24, 23, 23, {
+ { 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 },
+ { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 },
+ { 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 },
+ { 23, 24, -12 }
+ }
+ },
+ /* 10BPP/16BPC */
+ { 410, 15, 5632, 19, 28, 27, 27, {
+ { 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 },
+ { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 },
+ { 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 },
+ { 27, 28, -12 }
+ }
+ },
+},
+{
+ /* 12BPP/8BPC */
+ { 341, 15, 2048, 3, 12, 11, 11, {
+ { 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 },
+ { 5, 12, -12 }, { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+ }
+ },
+ /* 12BPP/10BPC */
+ { 341, 15, 2048, 7, 16, 15, 15, {
+ { 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 },
+ { 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+ { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+ }
+ },
+ /* 12BPP/12BPC */
+ { 341, 15, 2048, 11, 20, 19, 19, {
+ { 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 },
+ { 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+ { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+ { 21, 23, -12 }
+ }
+ },
+ /* 12BPP/14BPC */
+ { 341, 15, 2048, 15, 24, 23, 23, {
+ { 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 },
+ { 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 },
+ { 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 },
+ { 22, 23, -12 }
+ }
+ },
+ /* 12BPP/16BPC */
+ { 341, 15, 2048, 19, 28, 27, 27, {
+ { 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 },
+ { 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 },
+ { 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 },
+ { 26, 27, -12 }
+ }
+ },
+},
+{
+ /* 15BPP/8BPC */
+ { 273, 15, 2048, 3, 12, 11, 11, {
+ { 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 },
+ { 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 },
+ { 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 },
+ { 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 }
+ }
+ },
+ /* 15BPP/10BPC */
+ { 273, 15, 2048, 7, 16, 15, 15, {
+ { 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 },
+ { 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 },
+ { 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 },
+ { 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 }
+ }
+ },
+ /* 15BPP/12BPC */
+ { 273, 15, 2048, 11, 20, 19, 19, {
+ { 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 },
+ { 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 },
+ { 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 },
+ { 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 },
+ { 16, 17, -12 }
+ }
+ },
+ /* 15BPP/14BPC */
+ { 273, 15, 2048, 15, 24, 23, 23, {
+ { 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 },
+ { 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 },
+ { 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 },
+ { 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 },
+ { 20, 21, -12 }
+ }
+ },
+ /* 15BPP/16BPC */
+ { 273, 15, 2048, 19, 28, 27, 27, {
+ { 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 },
+ { 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 },
+ { 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 },
+ { 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 },
+ { 24, 25, -12 }
+ }
+ }
+}
+
+};
+
+static int get_row_index_for_rc_params(u16 compressed_bpp)
+{
+ switch (compressed_bpp) {
+ case 6:
+ return ROW_INDEX_6BPP;
+ case 8:
+ return ROW_INDEX_8BPP;
+ case 10:
+ return ROW_INDEX_10BPP;
+ case 12:
+ return ROW_INDEX_12BPP;
+ case 15:
+ return ROW_INDEX_15BPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int get_column_index_for_rc_params(u8 bits_per_component)
+{
+ switch (bits_per_component) {
+ case 8:
+ return COLUMN_INDEX_8BPC;
+ case 10:
+ return COLUMN_INDEX_10BPC;
+ case 12:
+ return COLUMN_INDEX_12BPC;
+ case 14:
+ return COLUMN_INDEX_14BPC;
+ case 16:
+ return COLUMN_INDEX_16BPC;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct rc_parameters *get_rc_params(u16 compressed_bpp,
+ u8 bits_per_component)
+{
+ int row_index, column_index;
+
+ row_index = get_row_index_for_rc_params(compressed_bpp);
+ if (row_index < 0)
+ return NULL;
+
+ column_index = get_column_index_for_rc_params(bits_per_component);
+ if (column_index < 0)
+ return NULL;
+
+ return &rc_parameters[row_index][column_index];
+}
+
+bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!RUNTIME_INFO(i915)->has_dsc)
+ return false;
+
+ if (DISPLAY_VER(i915) >= 12)
+ return true;
+
+ if (DISPLAY_VER(i915) >= 11 && cpu_transcoder != TRANSCODER_A)
+ return true;
+
+ return false;
+}
+
+static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (DISPLAY_VER(i915) >= 12)
+ return true;
+
+ if (cpu_transcoder == TRANSCODER_EDP ||
+ cpu_transcoder == TRANSCODER_DSI_0 ||
+ cpu_transcoder == TRANSCODER_DSI_1)
+ return false;
+
+ /* There's no pipe A DSC engine on ICL */
+ drm_WARN_ON(&i915->drm, crtc->pipe == PIPE_A);
+
+ return true;
+}
+
+static void
+calculate_rc_params(struct rc_parameters *rc,
+ struct drm_dsc_config *vdsc_cfg)
+{
+ int bpc = vdsc_cfg->bits_per_component;
+ int bpp = vdsc_cfg->bits_per_pixel >> 4;
+ static const s8 ofs_und6[] = {
+ 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
+ };
+ static const s8 ofs_und8[] = {
+ 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
+ };
+ static const s8 ofs_und12[] = {
+ 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12
+ };
+ static const s8 ofs_und15[] = {
+ 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
+ };
+ int qp_bpc_modifier = (bpc - 8) * 2;
+ u32 res, buf_i, bpp_i;
+
+ if (vdsc_cfg->slice_height >= 8)
+ rc->first_line_bpg_offset =
+ 12 + DIV_ROUND_UP((9 * min(34, vdsc_cfg->slice_height - 8)), 100);
+ else
+ rc->first_line_bpg_offset = 2 * (vdsc_cfg->slice_height - 1);
+
+ /* Our hw supports only 444 modes as of today */
+ if (bpp >= 12)
+ rc->initial_offset = 2048;
+ else if (bpp >= 10)
+ rc->initial_offset = 5632 - DIV_ROUND_UP(((bpp - 10) * 3584), 2);
+ else if (bpp >= 8)
+ rc->initial_offset = 6144 - DIV_ROUND_UP(((bpp - 8) * 512), 2);
+ else
+ rc->initial_offset = 6144;
+
+ /* initial_xmit_delay = rc_model_size/2/compression_bpp */
+ rc->initial_xmit_delay = DIV_ROUND_UP(DSC_RC_MODEL_SIZE_CONST, 2 * bpp);
+
+ rc->flatness_min_qp = 3 + qp_bpc_modifier;
+ rc->flatness_max_qp = 12 + qp_bpc_modifier;
+
+ rc->rc_quant_incr_limit0 = 11 + qp_bpc_modifier;
+ rc->rc_quant_incr_limit1 = 11 + qp_bpc_modifier;
+
+ bpp_i = (2 * (bpp - 6));
+ for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
+ /* Read range_minqp and range_max_qp from qp tables */
+ rc->rc_range_params[buf_i].range_min_qp =
+ intel_lookup_range_min_qp(bpc, buf_i, bpp_i);
+ rc->rc_range_params[buf_i].range_max_qp =
+ intel_lookup_range_max_qp(bpc, buf_i, bpp_i);
+
+ /* Calculate range_bgp_offset */
+ if (bpp <= 6) {
+ rc->rc_range_params[buf_i].range_bpg_offset = ofs_und6[buf_i];
+ } else if (bpp <= 8) {
+ res = DIV_ROUND_UP(((bpp - 6) * (ofs_und8[buf_i] - ofs_und6[buf_i])), 2);
+ rc->rc_range_params[buf_i].range_bpg_offset =
+ ofs_und6[buf_i] + res;
+ } else if (bpp <= 12) {
+ rc->rc_range_params[buf_i].range_bpg_offset =
+ ofs_und8[buf_i];
+ } else if (bpp <= 15) {
+ res = DIV_ROUND_UP(((bpp - 12) * (ofs_und15[buf_i] - ofs_und12[buf_i])), 3);
+ rc->rc_range_params[buf_i].range_bpg_offset =
+ ofs_und12[buf_i] + res;
+ } else {
+ rc->rc_range_params[buf_i].range_bpg_offset =
+ ofs_und15[buf_i];
+ }
+ }
+}
+
+int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
+{
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
+ u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
+ const struct rc_parameters *rc_params;
+ struct rc_parameters *rc = NULL;
+ u8 i = 0;
+
+ vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
+ vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
+ pipe_config->dsc.slice_count);
+
+ /* Gen 11 does not support YCbCr */
+ vdsc_cfg->simple_422 = false;
+ /* Gen 11 does not support VBR */
+ vdsc_cfg->vbr_enable = false;
+
+ /* Gen 11 only supports integral values of bpp */
+ vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
+ vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3;
+
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+ /*
+ * six 0s are appended to the lsb of each threshold value
+ * internally in h/w.
+ * Only 8 bits are allowed for programming RcBufThreshold
+ */
+ vdsc_cfg->rc_buf_thresh[i] = rc_buf_thresh[i] >> 6;
+ }
+
+ /*
+ * For 6bpp, RC Buffer threshold 12 and 13 need a different value
+ * as per C Model
+ */
+ if (compressed_bpp == 6) {
+ vdsc_cfg->rc_buf_thresh[12] = 0x7C;
+ vdsc_cfg->rc_buf_thresh[13] = 0x7D;
+ }
+
+ /*
+ * From XE_LPD onwards we supports compression bpps in steps of 1
+ * upto uncompressed bpp-1, hence add calculations for all the rc
+ * parameters
+ */
+ if (DISPLAY_VER(dev_priv) >= 13) {
+ rc = kmalloc(sizeof(*rc), GFP_KERNEL);
+ if (!rc)
+ return -ENOMEM;
+
+ calculate_rc_params(rc, vdsc_cfg);
+ rc_params = rc;
+ } else {
+ rc_params = get_rc_params(compressed_bpp,
+ vdsc_cfg->bits_per_component);
+ if (!rc_params)
+ return -EINVAL;
+ }
+
+ vdsc_cfg->first_line_bpg_offset = rc_params->first_line_bpg_offset;
+ vdsc_cfg->initial_xmit_delay = rc_params->initial_xmit_delay;
+ vdsc_cfg->initial_offset = rc_params->initial_offset;
+ vdsc_cfg->flatness_min_qp = rc_params->flatness_min_qp;
+ vdsc_cfg->flatness_max_qp = rc_params->flatness_max_qp;
+ vdsc_cfg->rc_quant_incr_limit0 = rc_params->rc_quant_incr_limit0;
+ vdsc_cfg->rc_quant_incr_limit1 = rc_params->rc_quant_incr_limit1;
+
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ vdsc_cfg->rc_range_params[i].range_min_qp =
+ rc_params->rc_range_params[i].range_min_qp;
+ vdsc_cfg->rc_range_params[i].range_max_qp =
+ rc_params->rc_range_params[i].range_max_qp;
+ /*
+ * Range BPG Offset uses 2's complement and is only a 6 bits. So
+ * mask it to get only 6 bits.
+ */
+ vdsc_cfg->rc_range_params[i].range_bpg_offset =
+ rc_params->rc_range_params[i].range_bpg_offset &
+ DSC_RANGE_BPG_OFFSET_MASK;
+ }
+
+ /*
+ * BitsPerComponent value determines mux_word_size:
+ * When BitsPerComponent is less than or 10bpc, muxWordSize will be equal to
+ * 48 bits otherwise 64
+ */
+ if (vdsc_cfg->bits_per_component <= 10)
+ vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
+ else
+ vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
+
+ /* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */
+ vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
+ (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
+
+ kfree(rc);
+
+ return 0;
+}
+
+enum intel_display_power_domain
+intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /*
+ * VDSC/joining uses a separate power well, PW2, and requires
+ * POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain in two cases:
+ *
+ * - ICL eDP/DSI transcoder
+ * - Display version 12 (except RKL) pipe A
+ *
+ * For any other pipe, VDSC/joining uses the power well associated with
+ * the pipe in use. Hence another reference on the pipe power domain
+ * will suffice. (Except no VDSC/joining on ICL pipe A.)
+ */
+ if (DISPLAY_VER(i915) == 12 && !IS_ROCKETLAKE(i915) && pipe == PIPE_A)
+ return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
+ else if (is_pipe_dsc(crtc, cpu_transcoder))
+ return POWER_DOMAIN_PIPE(pipe);
+ else
+ return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
+}
+
+static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
+ u32 pps_val = 0;
+ u32 rc_buf_thresh_dword[4];
+ u32 rc_range_params_dword[8];
+ u8 num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1;
+ int i = 0;
+
+ if (crtc_state->bigjoiner_pipes)
+ num_vdsc_instances *= 2;
+
+ /* Populate PICTURE_PARAMETER_SET_0 registers */
+ pps_val = DSC_VER_MAJ | vdsc_cfg->dsc_version_minor <<
+ DSC_VER_MIN_SHIFT |
+ vdsc_cfg->bits_per_component << DSC_BPC_SHIFT |
+ vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT;
+ if (vdsc_cfg->dsc_version_minor == 2)
+ pps_val |= DSC_ALT_ICH_SEL;
+ if (vdsc_cfg->block_pred_enable)
+ pps_val |= DSC_BLOCK_PREDICTION;
+ if (vdsc_cfg->convert_rgb)
+ pps_val |= DSC_COLOR_SPACE_CONVERSION;
+ if (vdsc_cfg->simple_422)
+ pps_val |= DSC_422_ENABLE;
+ if (vdsc_cfg->vbr_enable)
+ pps_val |= DSC_VBR_ENABLE;
+ drm_dbg_kms(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val);
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_0,
+ pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_0,
+ pps_val);
+ } else {
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe),
+ pps_val);
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_1 registers */
+ pps_val = 0;
+ pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
+ drm_dbg_kms(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val);
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_1,
+ pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_1,
+ pps_val);
+ } else {
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe),
+ pps_val);
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_2 registers */
+ pps_val = 0;
+ pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
+ DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
+ drm_dbg_kms(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val);
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_2,
+ pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_2,
+ pps_val);
+ } else {
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe),
+ pps_val);
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_3 registers */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
+ DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
+ drm_dbg_kms(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val);
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_3,
+ pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_3,
+ pps_val);
+ } else {
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe),
+ pps_val);
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_4 registers */
+ pps_val = 0;
+ pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
+ DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
+ drm_dbg_kms(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val);
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_4,
+ pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_4,
+ pps_val);
+ } else {
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe),
+ pps_val);
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_5 registers */
+ pps_val = 0;
+ pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
+ DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
+ drm_dbg_kms(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val);
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_5,
+ pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_5,
+ pps_val);
+ } else {
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe),
+ pps_val);
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_6 registers */
+ pps_val = 0;
+ pps_val |= DSC_INITIAL_SCALE_VALUE(vdsc_cfg->initial_scale_value) |
+ DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
+ DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
+ DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
+ drm_dbg_kms(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val);
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_6,
+ pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_6,
+ pps_val);
+ } else {
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe),
+ pps_val);
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_7 registers */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
+ DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
+ drm_dbg_kms(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val);
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_7,
+ pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_7,
+ pps_val);
+ } else {
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe),
+ pps_val);
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_8 registers */
+ pps_val = 0;
+ pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
+ DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
+ drm_dbg_kms(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val);
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_8,
+ pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_8,
+ pps_val);
+ } else {
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe),
+ pps_val);
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_9 registers */
+ pps_val = 0;
+ pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) |
+ DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
+ drm_dbg_kms(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_9,
+ pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_9,
+ pps_val);
+ } else {
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe),
+ pps_val);
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_10 registers */
+ pps_val = 0;
+ pps_val |= DSC_RC_QUANT_INC_LIMIT0(vdsc_cfg->rc_quant_incr_limit0) |
+ DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
+ DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
+ DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
+ drm_dbg_kms(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val);
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_10,
+ pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ DSCC_PICTURE_PARAMETER_SET_10, pps_val);
+ } else {
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe),
+ pps_val);
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
+ pps_val);
+ }
+
+ /* Populate Picture parameter set 16 */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_CHUNK_SIZE(vdsc_cfg->slice_chunk_size) |
+ DSC_SLICE_PER_LINE((vdsc_cfg->pic_width / num_vdsc_instances) /
+ vdsc_cfg->slice_width) |
+ DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
+ vdsc_cfg->slice_height);
+ drm_dbg_kms(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val);
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_16,
+ pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ DSCC_PICTURE_PARAMETER_SET_16, pps_val);
+ } else {
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe),
+ pps_val);
+ if (crtc_state->dsc.dsc_split)
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
+ pps_val);
+ }
+
+ /* Populate the RC_BUF_THRESH registers */
+ memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword));
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+ rc_buf_thresh_dword[i / 4] |=
+ (u32)(vdsc_cfg->rc_buf_thresh[i] <<
+ BITS_PER_BYTE * (i % 4));
+ drm_dbg_kms(&dev_priv->drm, "RC_BUF_THRESH_%d = 0x%08x\n", i,
+ rc_buf_thresh_dword[i / 4]);
+ }
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0,
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0_UDW,
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1,
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1_UDW,
+ rc_buf_thresh_dword[3]);
+ if (crtc_state->dsc.dsc_split) {
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0,
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0_UDW,
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1,
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1_UDW,
+ rc_buf_thresh_dword[3]);
+ }
+ } else {
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
+ if (crtc_state->dsc.dsc_split) {
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
+ }
+ }
+
+ /* Populate the RC_RANGE_PARAMETERS registers */
+ memset(rc_range_params_dword, 0, sizeof(rc_range_params_dword));
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ rc_range_params_dword[i / 2] |=
+ (u32)(((vdsc_cfg->rc_range_params[i].range_bpg_offset <<
+ RC_BPG_OFFSET_SHIFT) |
+ (vdsc_cfg->rc_range_params[i].range_max_qp <<
+ RC_MAX_QP_SHIFT) |
+ (vdsc_cfg->rc_range_params[i].range_min_qp <<
+ RC_MIN_QP_SHIFT)) << 16 * (i % 2));
+ drm_dbg_kms(&dev_priv->drm, "RC_RANGE_PARAM_%d = 0x%08x\n", i,
+ rc_range_params_dword[i / 2]);
+ }
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
+ if (crtc_state->dsc.dsc_split) {
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
+ }
+ } else {
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
+ if (crtc_state->dsc.dsc_split) {
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
+ }
+ }
+}
+
+void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct mipi_dsi_device *dsi;
+ struct drm_dsc_picture_parameter_set pps;
+ enum port port;
+
+ if (!crtc_state->dsc.compression_enable)
+ return;
+
+ drm_dsc_pps_payload_pack(&pps, vdsc_cfg);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi = intel_dsi->dsi_hosts[port]->device;
+
+ mipi_dsi_picture_parameter_set(dsi, &pps);
+ mipi_dsi_compression_mode(dsi, true);
+ }
+}
+
+void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
+
+ if (!crtc_state->dsc.compression_enable)
+ return;
+
+ /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
+ drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp.pps_header);
+
+ /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
+ drm_dsc_pps_payload_pack(&dp_dsc_pps_sdp.pps_payload, vdsc_cfg);
+
+ dig_port->write_infoframe(encoder, crtc_state,
+ DP_SDP_PPS, &dp_dsc_pps_sdp,
+ sizeof(dp_dsc_pps_sdp));
+}
+
+static i915_reg_t dss_ctl1_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
+{
+ return is_pipe_dsc(crtc, cpu_transcoder) ?
+ ICL_PIPE_DSS_CTL1(crtc->pipe) : DSS_CTL1;
+}
+
+static i915_reg_t dss_ctl2_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
+{
+ return is_pipe_dsc(crtc, cpu_transcoder) ?
+ ICL_PIPE_DSS_CTL2(crtc->pipe) : DSS_CTL2;
+}
+
+void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dss_ctl1_val = 0;
+
+ if (crtc_state->bigjoiner_pipes && !crtc_state->dsc.compression_enable) {
+ if (intel_crtc_is_bigjoiner_slave(crtc_state))
+ dss_ctl1_val |= UNCOMPRESSED_JOINER_SLAVE;
+ else
+ dss_ctl1_val |= UNCOMPRESSED_JOINER_MASTER;
+
+ intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val);
+ }
+}
+
+void intel_dsc_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dss_ctl1_val = 0;
+ u32 dss_ctl2_val = 0;
+
+ if (!crtc_state->dsc.compression_enable)
+ return;
+
+ intel_dsc_pps_configure(crtc_state);
+
+ dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
+ if (crtc_state->dsc.dsc_split) {
+ dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
+ dss_ctl1_val |= JOINER_ENABLE;
+ }
+ if (crtc_state->bigjoiner_pipes) {
+ dss_ctl1_val |= BIG_JOINER_ENABLE;
+ if (!intel_crtc_is_bigjoiner_slave(crtc_state))
+ dss_ctl1_val |= MASTER_BIG_JOINER_ENABLE;
+ }
+ intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val);
+ intel_de_write(dev_priv, dss_ctl2_reg(crtc, crtc_state->cpu_transcoder), dss_ctl2_val);
+}
+
+void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /* Disable only if either of them is enabled */
+ if (old_crtc_state->dsc.compression_enable ||
+ old_crtc_state->bigjoiner_pipes) {
+ intel_de_write(dev_priv, dss_ctl1_reg(crtc, old_crtc_state->cpu_transcoder), 0);
+ intel_de_write(dev_priv, dss_ctl2_reg(crtc, old_crtc_state->cpu_transcoder), 0);
+ }
+}
+
+void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ u32 dss_ctl1, dss_ctl2, val;
+
+ if (!intel_dsc_source_support(crtc_state))
+ return;
+
+ power_domain = intel_dsc_power_domain(crtc, cpu_transcoder);
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return;
+
+ dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, cpu_transcoder));
+ dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg(crtc, cpu_transcoder));
+
+ crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE;
+ if (!crtc_state->dsc.compression_enable)
+ goto out;
+
+ crtc_state->dsc.dsc_split = (dss_ctl2 & RIGHT_BRANCH_VDSC_ENABLE) &&
+ (dss_ctl1 & JOINER_ENABLE);
+
+ /* FIXME: add more state readout as needed */
+
+ /* PPS1 */
+ if (!is_pipe_dsc(crtc, cpu_transcoder))
+ val = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1);
+ else
+ val = intel_de_read(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe));
+ vdsc_cfg->bits_per_pixel = val;
+ crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4;
+out:
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
new file mode 100644
index 000000000..8763f00fa
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VDSC_H__
+#define __INTEL_VDSC_H__
+
+#include <linux/types.h>
+
+enum transcoder;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_encoder;
+
+bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state);
+void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state);
+void intel_dsc_enable(const struct intel_crtc_state *crtc_state);
+void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
+int intel_dsc_compute_params(struct intel_crtc_state *pipe_config);
+void intel_dsc_get_config(struct intel_crtc_state *crtc_state);
+enum intel_display_power_domain
+intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder);
+struct intel_crtc *intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc);
+void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
new file mode 100644
index 000000000..b5d058404
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/pci.h>
+#include <linux/vgaarb.h>
+
+#include <drm/i915_drm.h>
+#include <video/vga.h>
+
+#include "i915_drv.h"
+#include "intel_de.h"
+#include "intel_vga.h"
+
+static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ return VLV_VGACNTRL;
+ else if (DISPLAY_VER(i915) >= 5)
+ return CPU_VGACNTRL;
+ else
+ return VGACNTRL;
+}
+
+/* Disable the VGA plane that we never use */
+void intel_vga_disable(struct drm_i915_private *dev_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
+ u8 sr1;
+
+ if (intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE)
+ return;
+
+ /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+ outb(0x01, VGA_SEQ_I);
+ sr1 = inb(VGA_SEQ_D);
+ outb(sr1 | VGA_SR01_SCREEN_OFF, VGA_SEQ_D);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
+ udelay(300);
+
+ intel_de_write(dev_priv, vga_reg, VGA_DISP_DISABLE);
+ intel_de_posting_read(dev_priv, vga_reg);
+}
+
+void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv)
+{
+ i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
+
+ if (!(intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Something enabled VGA plane, disabling it\n");
+ intel_vga_disable(dev_priv);
+ }
+}
+
+void intel_vga_redisable(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref;
+
+ /*
+ * This function can be called both from intel_modeset_setup_hw_state or
+ * at a very early point in our resume sequence, where the power well
+ * structures are not yet restored. Since this function is at a very
+ * paranoid "someone might have enabled VGA while we were not looking"
+ * level, just check if the power well is enabled instead of trying to
+ * follow the "don't touch the power well if we don't need it" policy
+ * the rest of the driver uses.
+ */
+ wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_VGA);
+ if (!wakeref)
+ return;
+
+ intel_vga_redisable_power_on(i915);
+
+ intel_display_power_put(i915, POWER_DOMAIN_VGA, wakeref);
+}
+
+void intel_vga_reset_io_mem(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MIS_R), VGA_MIS_W);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
+}
+
+static int
+intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
+{
+ unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ u16 gmch_ctrl;
+
+ if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) {
+ drm_err(&i915->drm, "failed to read control word\n");
+ return -EIO;
+ }
+
+ if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
+ return 0;
+
+ if (enable_decode)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+
+ if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) {
+ drm_err(&i915->drm, "failed to write control word\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static unsigned int
+intel_vga_set_decode(struct pci_dev *pdev, bool enable_decode)
+{
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
+
+ intel_vga_set_state(i915, enable_decode);
+
+ if (enable_decode)
+ return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ else
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+int intel_vga_register(struct drm_i915_private *i915)
+{
+
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ int ret;
+
+ /*
+ * If we have > 1 VGA cards, then we need to arbitrate access to the
+ * common VGA resources.
+ *
+ * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+ * then we do not take part in VGA arbitration and the
+ * vga_client_register() fails with -ENODEV.
+ */
+ ret = vga_client_register(pdev, intel_vga_set_decode);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
+void intel_vga_unregister(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+
+ vga_client_unregister(pdev);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vga.h b/drivers/gpu/drm/i915/display/intel_vga.h
new file mode 100644
index 000000000..ba5b55b91
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vga.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VGA_H__
+#define __INTEL_VGA_H__
+
+struct drm_i915_private;
+
+void intel_vga_reset_io_mem(struct drm_i915_private *i915);
+void intel_vga_disable(struct drm_i915_private *i915);
+void intel_vga_redisable(struct drm_i915_private *i915);
+void intel_vga_redisable_power_on(struct drm_i915_private *i915);
+int intel_vga_register(struct drm_i915_private *i915);
+void intel_vga_unregister(struct drm_i915_private *i915);
+
+#endif /* __INTEL_VGA_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
new file mode 100644
index 000000000..5eac99021
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_vrr.h"
+
+bool intel_vrr_is_capable(struct intel_connector *connector)
+{
+ const struct drm_display_info *info = &connector->base.display_info;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp;
+
+ /*
+ * DP Sink is capable of VRR video timings if
+ * Ignore MSA bit is set in DPCD.
+ * EDID monitor range also should be atleast 10 for reasonable
+ * Adaptive Sync or Variable Refresh Rate end user experience.
+ */
+ switch (connector->base.connector_type) {
+ case DRM_MODE_CONNECTOR_eDP:
+ if (!connector->panel.vbt.vrr)
+ return false;
+ fallthrough;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ intel_dp = intel_attached_dp(connector);
+
+ if (!drm_dp_sink_can_do_video_without_timing_msa(intel_dp->dpcd))
+ return false;
+
+ break;
+ default:
+ return false;
+ }
+
+ return HAS_VRR(i915) &&
+ info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
+}
+
+void
+intel_vrr_check_modeset(struct intel_atomic_state *state)
+{
+ int i;
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (new_crtc_state->uapi.vrr_enabled !=
+ old_crtc_state->uapi.vrr_enabled)
+ new_crtc_state->uapi.mode_changed = true;
+ }
+}
+
+/*
+ * Without VRR registers get latched at:
+ * vblank_start
+ *
+ * With VRR the earliest registers can get latched is:
+ * intel_vrr_vmin_vblank_start(), which if we want to maintain
+ * the correct min vtotal is >=vblank_start+1
+ *
+ * The latest point registers can get latched is the vmax decision boundary:
+ * intel_vrr_vmax_vblank_start()
+ *
+ * Between those two points the vblank exit starts (and hence registers get
+ * latched) ASAP after a push is sent.
+ *
+ * framestart_delay is programmable 1-4.
+ */
+static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ /* The hw imposes the extra scanline before frame start */
+ if (DISPLAY_VER(i915) >= 13)
+ return crtc_state->vrr.guardband + crtc_state->framestart_delay + 1;
+ else
+ return crtc_state->vrr.pipeline_full + crtc_state->framestart_delay + 1;
+}
+
+int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
+{
+ /* Min vblank actually determined by flipline that is always >=vmin+1 */
+ return crtc_state->vrr.vmin + 1 - intel_vrr_vblank_exit_length(crtc_state);
+}
+
+int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->vrr.vmax - intel_vrr_vblank_exit_length(crtc_state);
+}
+
+void
+intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ const struct drm_display_info *info = &connector->base.display_info;
+ int vmin, vmax;
+
+ if (!intel_vrr_is_capable(connector))
+ return;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return;
+
+ if (!crtc_state->uapi.vrr_enabled)
+ return;
+
+ vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000,
+ adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq);
+ vmax = adjusted_mode->crtc_clock * 1000 /
+ (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq);
+
+ vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal);
+ vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal);
+
+ if (vmin >= vmax)
+ return;
+
+ /*
+ * flipline determines the min vblank length the hardware will
+ * generate, and flipline>=vmin+1, hence we reduce vmin by one
+ * to make sure we can get the actual min vblank length.
+ */
+ crtc_state->vrr.vmin = vmin - 1;
+ crtc_state->vrr.vmax = vmax;
+ crtc_state->vrr.enable = true;
+
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin + 1;
+
+ /*
+ * For XE_LPD+, we use guardband and pipeline override
+ * is deprecated.
+ */
+ if (DISPLAY_VER(i915) >= 13) {
+ /*
+ * FIXME: Subtract Window2 delay from below value.
+ *
+ * Window2 specifies time required to program DSB (Window2) in
+ * number of scan lines. Assuming 0 for no DSB.
+ */
+ crtc_state->vrr.guardband =
+ crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay;
+ } else {
+ /*
+ * FIXME: s/4/framestart_delay/ to get consistent
+ * earliest/latest points for register latching regardless
+ * of the framestart_delay used?
+ *
+ * FIXME: this really needs the extra scanline to provide consistent
+ * behaviour for all framestart_delay values. Otherwise with
+ * framestart_delay==4 we will end up extending the min vblank by
+ * one extra line.
+ */
+ crtc_state->vrr.pipeline_full =
+ min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1);
+ }
+
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
+
+void intel_vrr_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 trans_vrr_ctl;
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
+ VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
+ XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
+ else
+ trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
+ VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
+ VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
+ VRR_CTL_PIPELINE_FULL_OVERRIDE;
+
+ intel_de_write(dev_priv, TRANS_VRR_VMIN(cpu_transcoder), crtc_state->vrr.vmin - 1);
+ intel_de_write(dev_priv, TRANS_VRR_VMAX(cpu_transcoder), crtc_state->vrr.vmax - 1);
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), trans_vrr_ctl);
+ intel_de_write(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder), crtc_state->vrr.flipline - 1);
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN);
+}
+
+void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder),
+ TRANS_PUSH_EN | TRANS_PUSH_SEND);
+}
+
+bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!crtc_state->vrr.enable)
+ return false;
+
+ return intel_de_read(dev_priv, TRANS_PUSH(cpu_transcoder)) & TRANS_PUSH_SEND;
+}
+
+void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
+
+ if (!old_crtc_state->vrr.enable)
+ return;
+
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0);
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0);
+}
+
+void intel_vrr_get_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 trans_vrr_ctl;
+
+ trans_vrr_ctl = intel_de_read(dev_priv, TRANS_VRR_CTL(cpu_transcoder));
+ crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
+ if (!crtc_state->vrr.enable)
+ return;
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ crtc_state->vrr.guardband =
+ REG_FIELD_GET(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, trans_vrr_ctl);
+ else
+ if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE)
+ crtc_state->vrr.pipeline_full =
+ REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
+ if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN)
+ crtc_state->vrr.flipline = intel_de_read(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder)) + 1;
+ crtc_state->vrr.vmax = intel_de_read(dev_priv, TRANS_VRR_VMAX(cpu_transcoder)) + 1;
+ crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1;
+
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
new file mode 100644
index 000000000..9fda1135b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VRR_H__
+#define __INTEL_VRR_H__
+
+#include <linux/types.h>
+
+struct drm_connector_state;
+struct intel_atomic_state;
+struct intel_connector;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+
+bool intel_vrr_is_capable(struct intel_connector *connector);
+void intel_vrr_check_modeset(struct intel_atomic_state *state);
+void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+void intel_vrr_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_vrr_send_push(const struct intel_crtc_state *crtc_state);
+bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state);
+void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state);
+void intel_vrr_get_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
+int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
+int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
new file mode 100644
index 000000000..0b74f91e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -0,0 +1,847 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "skl_scaler.h"
+#include "skl_universal_plane.h"
+
+/*
+ * The hardware phase 0.0 refers to the center of the pixel.
+ * We want to start from the top/left edge which is phase
+ * -0.5. That matches how the hardware calculates the scaling
+ * factors (from top-left of the first pixel to bottom-right
+ * of the last pixel, as opposed to the pixel centers).
+ *
+ * For 4:2:0 subsampled chroma planes we obviously have to
+ * adjust that so that the chroma sample position lands in
+ * the right spot.
+ *
+ * Note that for packed YCbCr 4:2:2 formats there is no way to
+ * control chroma siting. The hardware simply replicates the
+ * chroma samples for both of the luma samples, and thus we don't
+ * actually get the expected MPEG2 chroma siting convention :(
+ * The same behaviour is observed on pre-SKL platforms as well.
+ *
+ * Theory behind the formula (note that we ignore sub-pixel
+ * source coordinates):
+ * s = source sample position
+ * d = destination sample position
+ *
+ * Downscaling 4:1:
+ * -0.5
+ * | 0.0
+ * | | 1.5 (initial phase)
+ * | | |
+ * v v v
+ * | s | s | s | s |
+ * | d |
+ *
+ * Upscaling 1:4:
+ * -0.5
+ * | -0.375 (initial phase)
+ * | | 0.0
+ * | | |
+ * v v v
+ * | s |
+ * | d | d | d | d |
+ */
+static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
+{
+ int phase = -0x8000;
+ u16 trip = 0;
+
+ if (chroma_cosited)
+ phase += (sub - 1) * 0x8000 / sub;
+
+ phase += scale / (2 * sub);
+
+ /*
+ * Hardware initial phase limited to [-0.5:1.5].
+ * Since the max hardware scale factor is 3.0, we
+ * should never actually excdeed 1.0 here.
+ */
+ WARN_ON(phase < -0x8000 || phase > 0x18000);
+
+ if (phase < 0)
+ phase = 0x10000 + phase;
+ else
+ trip = PS_PHASE_TRIP;
+
+ return ((phase >> 2) & PS_PHASE_MASK) | trip;
+}
+
+#define SKL_MIN_SRC_W 8
+#define SKL_MAX_SRC_W 4096
+#define SKL_MIN_SRC_H 8
+#define SKL_MAX_SRC_H 4096
+#define SKL_MIN_DST_W 8
+#define SKL_MAX_DST_W 4096
+#define SKL_MIN_DST_H 8
+#define SKL_MAX_DST_H 4096
+#define ICL_MAX_SRC_W 5120
+#define ICL_MAX_SRC_H 4096
+#define ICL_MAX_DST_W 5120
+#define ICL_MAX_DST_H 4096
+#define MTL_MAX_SRC_W 4096
+#define MTL_MAX_SRC_H 8192
+#define MTL_MAX_DST_W 8192
+#define MTL_MAX_DST_H 8192
+#define SKL_MIN_YUV_420_SRC_W 16
+#define SKL_MIN_YUV_420_SRC_H 16
+
+static int
+skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+ unsigned int scaler_user, int *scaler_id,
+ int src_w, int src_h, int dst_w, int dst_h,
+ const struct drm_format_info *format,
+ u64 modifier, bool need_scaler)
+{
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
+ int min_src_w, min_src_h, min_dst_w, min_dst_h;
+ int max_src_w, max_src_h, max_dst_w, max_dst_h;
+
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ if (src_w != dst_w || src_h != dst_h)
+ need_scaler = true;
+
+ /*
+ * Scaling/fitting not supported in IF-ID mode in GEN9+
+ * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
+ * Once NV12 is enabled, handle it here while allocating scaler
+ * for NV12.
+ */
+ if (DISPLAY_VER(dev_priv) >= 9 && crtc_state->hw.enable &&
+ need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Pipe/Plane scaling not supported with IF-ID mode\n");
+ return -EINVAL;
+ }
+
+ /*
+ * if plane is being disabled or scaler is no more required or force detach
+ * - free scaler binded to this plane/crtc
+ * - in order to do this, update crtc->scaler_usage
+ *
+ * Here scaler state in crtc_state is set free so that
+ * scaler can be assigned to other user. Actual register
+ * update to free the scaler is done in plane/panel-fit programming.
+ * For this purpose crtc/plane_state->scaler_id isn't reset here.
+ */
+ if (force_detach || !need_scaler) {
+ if (*scaler_id >= 0) {
+ scaler_state->scaler_users &= ~(1 << scaler_user);
+ scaler_state->scalers[*scaler_id].in_use = 0;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: "
+ "Staged freeing scaler id %d scaler_users = 0x%x\n",
+ crtc->pipe, scaler_user, *scaler_id,
+ scaler_state->scaler_users);
+ *scaler_id = -1;
+ }
+ return 0;
+ }
+
+ if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
+ (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Planar YUV: src dimensions not met\n");
+ return -EINVAL;
+ }
+
+ min_src_w = SKL_MIN_SRC_W;
+ min_src_h = SKL_MIN_SRC_H;
+ min_dst_w = SKL_MIN_DST_W;
+ min_dst_h = SKL_MIN_DST_H;
+
+ if (DISPLAY_VER(dev_priv) < 11) {
+ max_src_w = SKL_MAX_SRC_W;
+ max_src_h = SKL_MAX_SRC_H;
+ max_dst_w = SKL_MAX_DST_W;
+ max_dst_h = SKL_MAX_DST_H;
+ } else if (DISPLAY_VER(dev_priv) < 14) {
+ max_src_w = ICL_MAX_SRC_W;
+ max_src_h = ICL_MAX_SRC_H;
+ max_dst_w = ICL_MAX_DST_W;
+ max_dst_h = ICL_MAX_DST_H;
+ } else {
+ max_src_w = MTL_MAX_SRC_W;
+ max_src_h = MTL_MAX_SRC_H;
+ max_dst_w = MTL_MAX_DST_W;
+ max_dst_h = MTL_MAX_DST_H;
+ }
+
+ /* range checks */
+ if (src_w < min_src_w || src_h < min_src_h ||
+ dst_w < min_dst_w || dst_h < min_dst_h ||
+ src_w > max_src_w || src_h > max_src_h ||
+ dst_w > max_dst_w || dst_h > max_dst_h) {
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: src %ux%u dst %ux%u "
+ "size is out of scaler range\n",
+ crtc->pipe, scaler_user, src_w, src_h,
+ dst_w, dst_h);
+ return -EINVAL;
+ }
+
+ /*
+ * The pipe scaler does not use all the bits of PIPESRC, at least
+ * on the earlier platforms. So even when we're scaling a plane
+ * the *pipe* source size must not be too large. For simplicity
+ * we assume the limits match the scaler source size limits. Might
+ * not be 100% accurate on all platforms, but good enough for now.
+ */
+ if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) {
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: pipe src size %ux%u "
+ "is out of scaler range\n",
+ crtc->pipe, scaler_user, pipe_src_w, pipe_src_h);
+ return -EINVAL;
+ }
+
+ /* mark this plane as a scaler user in crtc_state */
+ scaler_state->scaler_users |= (1 << scaler_user);
+ drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
+ "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+ crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+ scaler_state->scaler_users);
+
+ return 0;
+}
+
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int width, height;
+
+ if (crtc_state->pch_pfit.enabled) {
+ width = drm_rect_width(&crtc_state->pch_pfit.dst);
+ height = drm_rect_height(&crtc_state->pch_pfit.dst);
+ } else {
+ width = pipe_mode->crtc_hdisplay;
+ height = pipe_mode->crtc_vdisplay;
+ }
+ return skl_update_scaler(crtc_state, !crtc_state->hw.active,
+ SKL_CRTC_INDEX,
+ &crtc_state->scaler_state.scaler_id,
+ drm_rect_width(&crtc_state->pipe_src),
+ drm_rect_height(&crtc_state->pipe_src),
+ width, height, NULL, 0,
+ crtc_state->pch_pfit.enabled);
+}
+
+/**
+ * skl_update_scaler_plane - Stages update to scaler state for a given plane.
+ * @crtc_state: crtc's scaler state
+ * @plane_state: atomic plane state to update
+ *
+ * Return
+ * 0 - scaler_usage updated successfully
+ * error - requested scaling cannot be supported or other error condition
+ */
+int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *intel_plane =
+ to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ int ret;
+ bool force_detach = !fb || !plane_state->uapi.visible;
+ bool need_scaler = false;
+
+ /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
+ if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
+ fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
+ need_scaler = true;
+
+ ret = skl_update_scaler(crtc_state, force_detach,
+ drm_plane_index(&intel_plane->base),
+ &plane_state->scaler_id,
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ drm_rect_height(&plane_state->uapi.src) >> 16,
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst),
+ fb ? fb->format : NULL,
+ fb ? fb->modifier : 0,
+ need_scaler);
+
+ if (ret || plane_state->scaler_id < 0)
+ return ret;
+
+ /* check colorkey */
+ if (plane_state->ckey.flags) {
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] scaling with color key not allowed",
+ intel_plane->base.base.id,
+ intel_plane->base.name);
+ return -EINVAL;
+ }
+
+ /* Check src format */
+ switch (fb->format->format) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ if (DISPLAY_VER(dev_priv) >= 11)
+ break;
+ fallthrough;
+ default:
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
+ intel_plane->base.base.id, intel_plane->base.name,
+ fb->base.id, fb->format->format);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
+ int num_scalers_need, struct intel_crtc *intel_crtc,
+ const char *name, int idx,
+ struct intel_plane_state *plane_state,
+ int *scaler_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ int j;
+ u32 mode;
+
+ if (*scaler_id < 0) {
+ /* find a free scaler */
+ for (j = 0; j < intel_crtc->num_scalers; j++) {
+ if (scaler_state->scalers[j].in_use)
+ continue;
+
+ *scaler_id = j;
+ scaler_state->scalers[*scaler_id].in_use = 1;
+ break;
+ }
+ }
+
+ if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
+ "Cannot find scaler for %s:%d\n", name, idx))
+ return -EINVAL;
+
+ /* set scaler mode */
+ if (plane_state && plane_state->hw.fb &&
+ plane_state->hw.fb->format->is_yuv &&
+ plane_state->hw.fb->format->num_planes > 1) {
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+
+ if (DISPLAY_VER(dev_priv) == 9) {
+ mode = SKL_PS_SCALER_MODE_NV12;
+ } else if (icl_is_hdr_plane(dev_priv, plane->id)) {
+ /*
+ * On gen11+'s HDR planes we only use the scaler for
+ * scaling. They have a dedicated chroma upsampler, so
+ * we don't need the scaler to upsample the UV plane.
+ */
+ mode = PS_SCALER_MODE_NORMAL;
+ } else {
+ struct intel_plane *linked =
+ plane_state->planar_linked_plane;
+
+ mode = PS_SCALER_MODE_PLANAR;
+
+ if (linked)
+ mode |= PS_PLANE_Y_SEL(linked->id);
+ }
+ } else if (DISPLAY_VER(dev_priv) >= 10) {
+ mode = PS_SCALER_MODE_NORMAL;
+ } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
+ /*
+ * when only 1 scaler is in use on a pipe with 2 scalers
+ * scaler 0 operates in high quality (HQ) mode.
+ * In this case use scaler 0 to take advantage of HQ mode
+ */
+ scaler_state->scalers[*scaler_id].in_use = 0;
+ *scaler_id = 0;
+ scaler_state->scalers[0].in_use = 1;
+ mode = SKL_PS_SCALER_MODE_HQ;
+ } else {
+ mode = SKL_PS_SCALER_MODE_DYN;
+ }
+
+ /*
+ * FIXME: we should also check the scaler factors for pfit, so
+ * this shouldn't be tied directly to planes.
+ */
+ if (plane_state && plane_state->hw.fb) {
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const struct drm_rect *src = &plane_state->uapi.src;
+ const struct drm_rect *dst = &plane_state->uapi.dst;
+ int hscale, vscale, max_vscale, max_hscale;
+
+ /*
+ * FIXME: When two scalers are needed, but only one of
+ * them needs to downscale, we should make sure that
+ * the one that needs downscaling support is assigned
+ * as the first scaler, so we don't reject downscaling
+ * unnecessarily.
+ */
+
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ /*
+ * On versions 14 and up, only the first
+ * scaler supports a vertical scaling factor
+ * of more than 1.0, while a horizontal
+ * scaling factor of 3.0 is supported.
+ */
+ max_hscale = 0x30000 - 1;
+ if (*scaler_id == 0)
+ max_vscale = 0x30000 - 1;
+ else
+ max_vscale = 0x10000;
+
+ } else if (DISPLAY_VER(dev_priv) >= 10 ||
+ !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
+ max_hscale = 0x30000 - 1;
+ max_vscale = 0x30000 - 1;
+ } else {
+ max_hscale = 0x20000 - 1;
+ max_vscale = 0x20000 - 1;
+ }
+
+ /*
+ * FIXME: We should change the if-else block above to
+ * support HQ vs dynamic scaler properly.
+ */
+
+ /* Check if required scaling is within limits */
+ hscale = drm_rect_calc_hscale(src, dst, 1, max_hscale);
+ vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale);
+
+ if (hscale < 0 || vscale < 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Scaler %d doesn't support required plane scaling\n",
+ *scaler_id);
+ drm_rect_debug_print("src: ", src, true);
+ drm_rect_debug_print("dst: ", dst, false);
+
+ return -EINVAL;
+ }
+ }
+
+ drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
+ intel_crtc->pipe, *scaler_id, name, idx);
+ scaler_state->scalers[*scaler_id].mode = mode;
+
+ return 0;
+}
+
+/**
+ * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
+ * @dev_priv: i915 device
+ * @intel_crtc: intel crtc
+ * @crtc_state: incoming crtc_state to validate and setup scalers
+ *
+ * This function sets up scalers based on staged scaling requests for
+ * a @crtc and its planes. It is called from crtc level check path. If request
+ * is a supportable request, it attaches scalers to requested planes and crtc.
+ *
+ * This function takes into account the current scaler(s) in use by any planes
+ * not being part of this atomic state
+ *
+ * Returns:
+ * 0 - scalers were setup successfully
+ * error code - otherwise
+ */
+int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_plane *plane = NULL;
+ struct intel_plane *intel_plane;
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct drm_atomic_state *drm_state = crtc_state->uapi.state;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
+ int num_scalers_need;
+ int i;
+
+ num_scalers_need = hweight32(scaler_state->scaler_users);
+
+ /*
+ * High level flow:
+ * - staged scaler requests are already in scaler_state->scaler_users
+ * - check whether staged scaling requests can be supported
+ * - add planes using scalers that aren't in current transaction
+ * - assign scalers to requested users
+ * - as part of plane commit, scalers will be committed
+ * (i.e., either attached or detached) to respective planes in hw
+ * - as part of crtc_commit, scaler will be either attached or detached
+ * to crtc in hw
+ */
+
+ /* fail if required scalers > available scalers */
+ if (num_scalers_need > intel_crtc->num_scalers) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Too many scaling requests %d > %d\n",
+ num_scalers_need, intel_crtc->num_scalers);
+ return -EINVAL;
+ }
+
+ /* walkthrough scaler_users bits and start assigning scalers */
+ for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
+ struct intel_plane_state *plane_state = NULL;
+ int *scaler_id;
+ const char *name;
+ int idx, ret;
+
+ /* skip if scaler not required */
+ if (!(scaler_state->scaler_users & (1 << i)))
+ continue;
+
+ if (i == SKL_CRTC_INDEX) {
+ name = "CRTC";
+ idx = intel_crtc->base.base.id;
+
+ /* panel fitter case: assign as a crtc scaler */
+ scaler_id = &scaler_state->scaler_id;
+ } else {
+ name = "PLANE";
+
+ /* plane scaler case: assign as a plane scaler */
+ /* find the plane that set the bit as scaler_user */
+ plane = drm_state->planes[i].ptr;
+
+ /*
+ * to enable/disable hq mode, add planes that are using scaler
+ * into this transaction
+ */
+ if (!plane) {
+ struct drm_plane_state *state;
+
+ /*
+ * GLK+ scalers don't have a HQ mode so it
+ * isn't necessary to change between HQ and dyn mode
+ * on those platforms.
+ */
+ if (DISPLAY_VER(dev_priv) >= 10)
+ continue;
+
+ plane = drm_plane_from_index(&dev_priv->drm, i);
+ state = drm_atomic_get_plane_state(drm_state, plane);
+ if (IS_ERR(state)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to add [PLANE:%d] to drm_state\n",
+ plane->base.id);
+ return PTR_ERR(state);
+ }
+ }
+
+ intel_plane = to_intel_plane(plane);
+ idx = plane->base.id;
+
+ /* plane on different crtc cannot be a scaler user of this crtc */
+ if (drm_WARN_ON(&dev_priv->drm,
+ intel_plane->pipe != intel_crtc->pipe))
+ continue;
+
+ plane_state = intel_atomic_get_new_plane_state(intel_state,
+ intel_plane);
+ scaler_id = &plane_state->scaler_id;
+ }
+
+ ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need,
+ intel_crtc, name, idx,
+ plane_state, scaler_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int glk_coef_tap(int i)
+{
+ return i % 7;
+}
+
+static u16 glk_nearest_filter_coef(int t)
+{
+ return t == 3 ? 0x0800 : 0x3000;
+}
+
+/*
+ * Theory behind setting nearest-neighbor integer scaling:
+ *
+ * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
+ * The letter represents the filter tap (D is the center tap) and the number
+ * represents the coefficient set for a phase (0-16).
+ *
+ * +------------+------------------------+------------------------+
+ * |Index value | Data value coeffient 1 | Data value coeffient 2 |
+ * +------------+------------------------+------------------------+
+ * | 00h | B0 | A0 |
+ * +------------+------------------------+------------------------+
+ * | 01h | D0 | C0 |
+ * +------------+------------------------+------------------------+
+ * | 02h | F0 | E0 |
+ * +------------+------------------------+------------------------+
+ * | 03h | A1 | G0 |
+ * +------------+------------------------+------------------------+
+ * | 04h | C1 | B1 |
+ * +------------+------------------------+------------------------+
+ * | ... | ... | ... |
+ * +------------+------------------------+------------------------+
+ * | 38h | B16 | A16 |
+ * +------------+------------------------+------------------------+
+ * | 39h | D16 | C16 |
+ * +------------+------------------------+------------------------+
+ * | 3Ah | F16 | C16 |
+ * +------------+------------------------+------------------------+
+ * | 3Bh | Reserved | G16 |
+ * +------------+------------------------+------------------------+
+ *
+ * To enable nearest-neighbor scaling: program scaler coefficents with
+ * the center tap (Dxx) values set to 1 and all other values set to 0 as per
+ * SCALER_COEFFICIENT_FORMAT
+ *
+ */
+
+static void glk_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int id, int set)
+{
+ int i;
+
+ intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set),
+ PS_COEE_INDEX_AUTO_INC);
+
+ for (i = 0; i < 17 * 7; i += 2) {
+ u32 tmp;
+ int t;
+
+ t = glk_coef_tap(i);
+ tmp = glk_nearest_filter_coef(t);
+
+ t = glk_coef_tap(i + 1);
+ tmp |= glk_nearest_filter_coef(t) << 16;
+
+ intel_de_write_fw(dev_priv, GLK_PS_COEF_DATA_SET(pipe, id, set),
+ tmp);
+ }
+
+ intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
+}
+
+static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
+{
+ if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
+ return (PS_FILTER_PROGRAMMED |
+ PS_Y_VERT_FILTER_SELECT(set) |
+ PS_Y_HORZ_FILTER_SELECT(set) |
+ PS_UV_VERT_FILTER_SELECT(set) |
+ PS_UV_HORZ_FILTER_SELECT(set));
+ }
+
+ return PS_FILTER_MEDIUM;
+}
+
+static void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
+ int id, int set, enum drm_scaling_filter filter)
+{
+ switch (filter) {
+ case DRM_SCALING_FILTER_DEFAULT:
+ break;
+ case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
+ glk_program_nearest_filter_coefs(dev_priv, pipe, id, set);
+ break;
+ default:
+ MISSING_CASE(filter);
+ }
+}
+
+void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
+ u16 uv_rgb_hphase, uv_rgb_vphase;
+ enum pipe pipe = crtc->pipe;
+ int width = drm_rect_width(dst);
+ int height = drm_rect_height(dst);
+ int x = dst->x1;
+ int y = dst->y1;
+ int hscale, vscale;
+ struct drm_rect src;
+ int id;
+ u32 ps_ctrl;
+
+ if (!crtc_state->pch_pfit.enabled)
+ return;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ crtc_state->scaler_state.scaler_id < 0))
+ return;
+
+ drm_rect_init(&src, 0, 0,
+ drm_rect_width(&crtc_state->pipe_src) << 16,
+ drm_rect_height(&crtc_state->pipe_src) << 16);
+
+ hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
+ vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
+
+ uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
+
+ id = scaler_state->scaler_id;
+
+ ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
+ ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode;
+
+ skl_scaler_setup_filter(dev_priv, pipe, id, 0,
+ crtc_state->hw.scaling_filter);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
+
+ intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
+ x << 16 | y);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
+ width << 16 | height);
+}
+
+void
+skl_program_plane_scaler(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ enum pipe pipe = plane->pipe;
+ int scaler_id = plane_state->scaler_id;
+ const struct intel_scaler *scaler =
+ &crtc_state->scaler_state.scalers[scaler_id];
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
+ u16 y_hphase, uv_rgb_hphase;
+ u16 y_vphase, uv_rgb_vphase;
+ int hscale, vscale;
+ u32 ps_ctrl;
+
+ hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
+ 0, INT_MAX);
+ vscale = drm_rect_calc_vscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
+ 0, INT_MAX);
+
+ /* TODO: handle sub-pixel coordinates */
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ !icl_is_hdr_plane(dev_priv, plane->id)) {
+ y_hphase = skl_scaler_calc_phase(1, hscale, false);
+ y_vphase = skl_scaler_calc_phase(1, vscale, false);
+
+ /* MPEG2 chroma siting convention */
+ uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
+ uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
+ } else {
+ /* not used */
+ y_hphase = 0;
+ y_vphase = 0;
+
+ uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+ uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
+ }
+
+ ps_ctrl = skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
+ ps_ctrl |= PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode;
+
+ skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0,
+ plane_state->hw.scaling_filter);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+ intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id),
+ (crtc_x << 16) | crtc_y);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id),
+ (crtc_w << 16) | crtc_h);
+}
+
+static void skl_detach_scaler(struct intel_crtc *crtc, int id)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
+}
+
+/*
+ * This function detaches (aka. unbinds) unused scalers in hardware
+ */
+void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ int i;
+
+ /* loop through and disable scalers that aren't in use */
+ for (i = 0; i < crtc->num_scalers; i++) {
+ if (!scaler_state->scalers[i].in_use)
+ skl_detach_scaler(crtc, i);
+ }
+}
+
+void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ int i;
+
+ for (i = 0; i < crtc->num_scalers; i++)
+ skl_detach_scaler(crtc, i);
+}
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
new file mode 100644
index 000000000..f040f6ac0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_scaler.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+#ifndef INTEL_SCALER_H
+#define INTEL_SCALER_H
+
+#include <linux/types.h>
+
+enum drm_scaling_filter;
+enum pipe;
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane;
+struct intel_plane_state;
+
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
+
+int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
+
+int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state);
+
+void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
+
+void skl_program_plane_scaler(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void skl_detach_scalers(const struct intel_crtc_state *crtc_state);
+void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
new file mode 100644
index 000000000..bc523a3d1
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -0,0 +1,2525 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include "i915_drv.h"
+#include "intel_atomic_plane.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_fbc.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+#include "skl_scaler.h"
+#include "skl_universal_plane.h"
+#include "skl_watermark.h"
+#include "pxp/intel_pxp.h"
+
+static const u32 skl_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_XYUV8888,
+};
+
+static const u32 skl_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_XYUV8888,
+};
+
+static const u32 glk_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+};
+
+static const u32 icl_sdr_y_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 icl_sdr_uv_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 icl_hdr_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ARGB16161616F,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
+int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
+{
+ switch (format) {
+ case PLANE_CTL_FORMAT_RGB_565:
+ return DRM_FORMAT_RGB565;
+ case PLANE_CTL_FORMAT_NV12:
+ return DRM_FORMAT_NV12;
+ case PLANE_CTL_FORMAT_XYUV:
+ return DRM_FORMAT_XYUV8888;
+ case PLANE_CTL_FORMAT_P010:
+ return DRM_FORMAT_P010;
+ case PLANE_CTL_FORMAT_P012:
+ return DRM_FORMAT_P012;
+ case PLANE_CTL_FORMAT_P016:
+ return DRM_FORMAT_P016;
+ case PLANE_CTL_FORMAT_Y210:
+ return DRM_FORMAT_Y210;
+ case PLANE_CTL_FORMAT_Y212:
+ return DRM_FORMAT_Y212;
+ case PLANE_CTL_FORMAT_Y216:
+ return DRM_FORMAT_Y216;
+ case PLANE_CTL_FORMAT_Y410:
+ return DRM_FORMAT_XVYU2101010;
+ case PLANE_CTL_FORMAT_Y412:
+ return DRM_FORMAT_XVYU12_16161616;
+ case PLANE_CTL_FORMAT_Y416:
+ return DRM_FORMAT_XVYU16161616;
+ default:
+ case PLANE_CTL_FORMAT_XRGB_8888:
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR8888;
+ else
+ return DRM_FORMAT_XBGR8888;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB8888;
+ else
+ return DRM_FORMAT_XRGB8888;
+ }
+ case PLANE_CTL_FORMAT_XRGB_2101010:
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR2101010;
+ else
+ return DRM_FORMAT_XBGR2101010;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB2101010;
+ else
+ return DRM_FORMAT_XRGB2101010;
+ }
+ case PLANE_CTL_FORMAT_XRGB_16161616F:
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR16161616F;
+ else
+ return DRM_FORMAT_XBGR16161616F;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB16161616F;
+ else
+ return DRM_FORMAT_XRGB16161616F;
+ }
+ }
+}
+
+static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 13 || HAS_D12_PLANE_MINIMIZATION(i915))
+ return BIT(PLANE_SPRITE2) | BIT(PLANE_SPRITE3);
+ else
+ return BIT(PLANE_SPRITE4) | BIT(PLANE_SPRITE5);
+}
+
+bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
+{
+ return DISPLAY_VER(dev_priv) >= 11 &&
+ icl_nv12_y_plane_mask(dev_priv) & BIT(plane_id);
+}
+
+bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
+{
+ return DISPLAY_VER(dev_priv) >= 11 &&
+ icl_hdr_plane_mask() & BIT(plane_id);
+}
+
+static int icl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
+
+ /* two pixels per clock */
+ return DIV_ROUND_UP(pixel_rate, 2);
+}
+
+static void
+glk_plane_ratio(const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (fb->format->cpp[0] == 8) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int glk_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
+ unsigned int num, den;
+
+ glk_plane_ratio(plane_state, &num, &den);
+
+ /* two pixels per clock */
+ return DIV_ROUND_UP(pixel_rate * num, 2 * den);
+}
+
+static void
+skl_plane_ratio(const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (fb->format->cpp[0] == 8) {
+ *num = 9;
+ *den = 8;
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
+ unsigned int num, den;
+
+ skl_plane_ratio(plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static int skl_plane_max_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ int cpp = fb->format->cpp[color_plane];
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ /*
+ * Validated limit is 4k, but has 5k should
+ * work apart from the following features:
+ * - Ytile (already limited to 4k)
+ * - FP16 (already limited to 4k)
+ * - render compression (already limited to 4k)
+ * - KVMR sprite and cursor (don't care)
+ * - horizontal panning (TODO verify this)
+ * - pipe and plane scaling (TODO verify this)
+ */
+ if (cpp == 8)
+ return 4096;
+ else
+ return 5120;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ /* FIXME AUX plane? */
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (cpp == 8)
+ return 2048;
+ else
+ return 4096;
+ default:
+ MISSING_CASE(fb->modifier);
+ return 2048;
+ }
+}
+
+static int glk_plane_max_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ int cpp = fb->format->cpp[color_plane];
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ if (cpp == 8)
+ return 4096;
+ else
+ return 5120;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ /* FIXME AUX plane? */
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ if (cpp == 8)
+ return 2048;
+ else
+ return 5120;
+ default:
+ MISSING_CASE(fb->modifier);
+ return 2048;
+ }
+}
+
+static int icl_plane_min_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ /* Wa_14011264657, Wa_14011050563: gen11+ */
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ return 18;
+ case DRM_FORMAT_RGB565:
+ return 10;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ return 6;
+ case DRM_FORMAT_NV12:
+ return 20;
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return 12;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ return 4;
+ default:
+ return 1;
+ }
+}
+
+static int icl_hdr_plane_max_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
+ return 4096;
+ else
+ return 5120;
+}
+
+static int icl_sdr_plane_max_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 5120;
+}
+
+static int skl_plane_max_height(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 4096;
+}
+
+static int icl_plane_max_height(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 4320;
+}
+
+static unsigned int
+skl_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ int cpp = info->cpp[0];
+ int max_horizontal_pixels = 8192;
+ int max_stride_bytes;
+
+ if (DISPLAY_VER(i915) >= 13) {
+ /*
+ * The stride in bytes must not exceed of the size
+ * of 128K bytes. For pixel formats of 64bpp will allow
+ * for a 16K pixel surface.
+ */
+ max_stride_bytes = 131072;
+ if (cpp == 8)
+ max_horizontal_pixels = 16384;
+ else
+ max_horizontal_pixels = 65536;
+ } else {
+ /*
+ * "The stride in bytes must not exceed the
+ * of the size of 8K pixels and 32K bytes."
+ */
+ max_stride_bytes = 32768;
+ }
+
+ if (drm_rotation_90_or_270(rotation))
+ return min(max_horizontal_pixels, max_stride_bytes / cpp);
+ else
+ return min(max_horizontal_pixels * cpp, max_stride_bytes);
+}
+
+
+/* Preoffset values for YUV to RGB Conversion */
+#define PREOFF_YUV_TO_RGB_HI 0x1800
+#define PREOFF_YUV_TO_RGB_ME 0x0000
+#define PREOFF_YUV_TO_RGB_LO 0x1800
+
+#define ROFF(x) (((x) & 0xffff) << 16)
+#define GOFF(x) (((x) & 0xffff) << 0)
+#define BOFF(x) (((x) & 0xffff) << 16)
+
+/*
+ * Programs the input color space conversion stage for ICL HDR planes.
+ * Note that it is assumed that this stage always happens after YUV
+ * range correction. Thus, the input to this stage is assumed to be
+ * in full-range YCbCr.
+ */
+static void
+icl_program_input_csc(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+
+ static const u16 input_csc_matrix[][9] = {
+ /*
+ * BT.601 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.371,
+ * 1.000, -0.336, -0.698,
+ * 1.000, 1.732, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x7AF8, 0x7800, 0x0,
+ 0x8B28, 0x7800, 0x9AC0,
+ 0x0, 0x7800, 0x7DD8,
+ },
+ /*
+ * BT.709 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.574,
+ * 1.000, -0.187, -0.468,
+ * 1.000, 1.855, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x7C98, 0x7800, 0x0,
+ 0x9EF8, 0x7800, 0xAC00,
+ 0x0, 0x7800, 0x7ED8,
+ },
+ /*
+ * BT.2020 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.474,
+ * 1.000, -0.1645, -0.5713,
+ * 1.000, 1.8814, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT2020] = {
+ 0x7BC8, 0x7800, 0x0,
+ 0x8928, 0x7800, 0xAA88,
+ 0x0, 0x7800, 0x7F10,
+ },
+ };
+ const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
+
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
+ ROFF(csc[0]) | GOFF(csc[1]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1),
+ BOFF(csc[2]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2),
+ ROFF(csc[3]) | GOFF(csc[4]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3),
+ BOFF(csc[5]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4),
+ ROFF(csc[6]) | GOFF(csc[7]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5),
+ BOFF(csc[8]));
+
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
+ PREOFF_YUV_TO_RGB_HI);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
+ PREOFF_YUV_TO_RGB_LO);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
+}
+
+static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int rotation)
+{
+ /*
+ * The stride is either expressed as a multiple of 64 bytes chunks for
+ * linear buffers or in number of tiles for tiled buffers.
+ */
+ if (is_surface_linear(fb, color_plane))
+ return 64;
+ else if (drm_rotation_90_or_270(rotation))
+ return intel_tile_height(fb, color_plane);
+ else
+ return intel_tile_width_bytes(fb, color_plane);
+}
+
+static u32 skl_plane_stride(const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ u32 stride = plane_state->view.color_plane[color_plane].scanout_stride;
+
+ if (color_plane >= fb->format->num_planes)
+ return 0;
+
+ return stride / skl_plane_stride_mult(fb, color_plane, rotation);
+}
+
+static void
+skl_plane_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+
+ skl_write_plane_wm(plane, crtc_state);
+
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
+}
+
+static void
+icl_plane_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+
+ if (icl_is_hdr_plane(dev_priv, plane_id))
+ intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0);
+
+ skl_write_plane_wm(plane, crtc_state);
+
+ intel_psr2_disable_plane_sel_fetch(plane, crtc_state);
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
+}
+
+static bool
+skl_plane_get_hw_state(struct intel_plane *plane,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum plane_id plane_id = plane->id;
+ intel_wakeref_t wakeref;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(plane->pipe);
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return false;
+
+ ret = intel_de_read(dev_priv, PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
+
+ *pipe = plane->pipe;
+
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+
+ return ret;
+}
+
+static u32 skl_plane_ctl_format(u32 pixel_format)
+{
+ switch (pixel_format) {
+ case DRM_FORMAT_C8:
+ return PLANE_CTL_FORMAT_INDEXED;
+ case DRM_FORMAT_RGB565:
+ return PLANE_CTL_FORMAT_RGB_565;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ return PLANE_CTL_FORMAT_XRGB_8888;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ return PLANE_CTL_FORMAT_XRGB_2101010;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ return PLANE_CTL_FORMAT_XRGB_16161616F;
+ case DRM_FORMAT_XYUV8888:
+ return PLANE_CTL_FORMAT_XYUV;
+ case DRM_FORMAT_YUYV:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_YUYV;
+ case DRM_FORMAT_YVYU:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_YVYU;
+ case DRM_FORMAT_UYVY:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_UYVY;
+ case DRM_FORMAT_VYUY:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_VYUY;
+ case DRM_FORMAT_NV12:
+ return PLANE_CTL_FORMAT_NV12;
+ case DRM_FORMAT_P010:
+ return PLANE_CTL_FORMAT_P010;
+ case DRM_FORMAT_P012:
+ return PLANE_CTL_FORMAT_P012;
+ case DRM_FORMAT_P016:
+ return PLANE_CTL_FORMAT_P016;
+ case DRM_FORMAT_Y210:
+ return PLANE_CTL_FORMAT_Y210;
+ case DRM_FORMAT_Y212:
+ return PLANE_CTL_FORMAT_Y212;
+ case DRM_FORMAT_Y216:
+ return PLANE_CTL_FORMAT_Y216;
+ case DRM_FORMAT_XVYU2101010:
+ return PLANE_CTL_FORMAT_Y410;
+ case DRM_FORMAT_XVYU12_16161616:
+ return PLANE_CTL_FORMAT_Y412;
+ case DRM_FORMAT_XVYU16161616:
+ return PLANE_CTL_FORMAT_Y416;
+ default:
+ MISSING_CASE(pixel_format);
+ }
+
+ return 0;
+}
+
+static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
+{
+ if (!plane_state->hw.fb->format->has_alpha)
+ return PLANE_CTL_ALPHA_DISABLE;
+
+ switch (plane_state->hw.pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return PLANE_CTL_ALPHA_DISABLE;
+ case DRM_MODE_BLEND_PREMULTI:
+ return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ case DRM_MODE_BLEND_COVERAGE:
+ return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
+ default:
+ MISSING_CASE(plane_state->hw.pixel_blend_mode);
+ return PLANE_CTL_ALPHA_DISABLE;
+ }
+}
+
+static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
+{
+ if (!plane_state->hw.fb->format->has_alpha)
+ return PLANE_COLOR_ALPHA_DISABLE;
+
+ switch (plane_state->hw.pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return PLANE_COLOR_ALPHA_DISABLE;
+ case DRM_MODE_BLEND_PREMULTI:
+ return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
+ case DRM_MODE_BLEND_COVERAGE:
+ return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
+ default:
+ MISSING_CASE(plane_state->hw.pixel_blend_mode);
+ return PLANE_COLOR_ALPHA_DISABLE;
+ }
+}
+
+static u32 skl_plane_ctl_tiling(u64 fb_modifier)
+{
+ switch (fb_modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ break;
+ case I915_FORMAT_MOD_X_TILED:
+ return PLANE_CTL_TILED_X;
+ case I915_FORMAT_MOD_Y_TILED:
+ return PLANE_CTL_TILED_Y;
+ case I915_FORMAT_MOD_4_TILED:
+ return PLANE_CTL_TILED_4;
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS:
+ return PLANE_CTL_TILED_4 |
+ PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
+ PLANE_CTL_CLEAR_COLOR_DISABLE;
+ case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS:
+ return PLANE_CTL_TILED_4 |
+ PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE |
+ PLANE_CTL_CLEAR_COLOR_DISABLE;
+ case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC:
+ return PLANE_CTL_TILED_4 | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ return PLANE_CTL_TILED_Y |
+ PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
+ PLANE_CTL_CLEAR_COLOR_DISABLE;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
+ case I915_FORMAT_MOD_Yf_TILED:
+ return PLANE_CTL_TILED_YF;
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+ default:
+ MISSING_CASE(fb_modifier);
+ }
+
+ return 0;
+}
+
+static u32 skl_plane_ctl_rotate(unsigned int rotate)
+{
+ switch (rotate) {
+ case DRM_MODE_ROTATE_0:
+ break;
+ /*
+ * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
+ * while i915 HW rotation is clockwise, thats why this swapping.
+ */
+ case DRM_MODE_ROTATE_90:
+ return PLANE_CTL_ROTATE_270;
+ case DRM_MODE_ROTATE_180:
+ return PLANE_CTL_ROTATE_180;
+ case DRM_MODE_ROTATE_270:
+ return PLANE_CTL_ROTATE_90;
+ default:
+ MISSING_CASE(rotate);
+ }
+
+ return 0;
+}
+
+static u32 icl_plane_ctl_flip(unsigned int reflect)
+{
+ switch (reflect) {
+ case 0:
+ break;
+ case DRM_MODE_REFLECT_X:
+ return PLANE_CTL_FLIP_HORIZONTAL;
+ case DRM_MODE_REFLECT_Y:
+ default:
+ MISSING_CASE(reflect);
+ }
+
+ return 0;
+}
+
+static u32 adlp_plane_ctl_arb_slots(const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
+ switch (fb->format->cpp[0]) {
+ case 2:
+ return PLANE_CTL_ARB_SLOTS(1);
+ default:
+ return PLANE_CTL_ARB_SLOTS(0);
+ }
+ } else {
+ switch (fb->format->cpp[0]) {
+ case 8:
+ return PLANE_CTL_ARB_SLOTS(3);
+ case 4:
+ return PLANE_CTL_ARB_SLOTS(1);
+ default:
+ return PLANE_CTL_ARB_SLOTS(0);
+ }
+ }
+}
+
+static u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ u32 plane_ctl = 0;
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ return plane_ctl;
+
+ if (crtc_state->gamma_enable)
+ plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+
+ return plane_ctl;
+}
+
+static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 plane_ctl;
+
+ plane_ctl = PLANE_CTL_ENABLE;
+
+ if (DISPLAY_VER(dev_priv) < 10) {
+ plane_ctl |= skl_plane_ctl_alpha(plane_state);
+ plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
+
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
+ plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
+
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
+ }
+
+ plane_ctl |= skl_plane_ctl_format(fb->format->format);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
+ plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ plane_ctl |= icl_plane_ctl_flip(rotation &
+ DRM_MODE_REFLECT_MASK);
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+
+ /* Wa_22012358565:adl-p */
+ if (DISPLAY_VER(dev_priv) == 13)
+ plane_ctl |= adlp_plane_ctl_arb_slots(plane_state);
+
+ return plane_ctl;
+}
+
+static u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ u32 plane_color_ctl = 0;
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ return plane_color_ctl;
+
+ if (crtc_state->gamma_enable)
+ plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+
+ return plane_color_ctl;
+}
+
+static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ u32 plane_color_ctl = 0;
+
+ plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
+ plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
+
+ if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
+ switch (plane_state->hw.color_encoding) {
+ case DRM_COLOR_YCBCR_BT709:
+ plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
+ break;
+ case DRM_COLOR_YCBCR_BT2020:
+ plane_color_ctl |=
+ PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020;
+ break;
+ default:
+ plane_color_ctl |=
+ PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601;
+ }
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
+ } else if (fb->format->is_yuv) {
+ plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
+ }
+
+ if (plane_state->force_black)
+ plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
+
+ return plane_color_ctl;
+}
+
+static u32 skl_surf_address(const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ u32 offset = plane_state->view.color_plane[color_plane].offset;
+
+ if (intel_fb_uses_dpt(fb)) {
+ /*
+ * The DPT object contains only one vma, so the VMA's offset
+ * within the DPT is always 0.
+ */
+ drm_WARN_ON(&i915->drm, plane_state->dpt_vma->node.start);
+ drm_WARN_ON(&i915->drm, offset & 0x1fffff);
+ return offset >> 9;
+ } else {
+ drm_WARN_ON(&i915->drm, offset & 0xfff);
+ return offset;
+ }
+}
+
+static u32 skl_plane_surf(const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ u32 plane_surf;
+
+ plane_surf = intel_plane_ggtt_offset(plane_state) +
+ skl_surf_address(plane_state, color_plane);
+
+ if (plane_state->decrypt)
+ plane_surf |= PLANE_SURF_DECRYPT;
+
+ return plane_surf;
+}
+
+static u32 skl_plane_aux_dist(const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int aux_plane = skl_main_to_aux_plane(fb, color_plane);
+ u32 aux_dist;
+
+ if (!aux_plane)
+ return 0;
+
+ aux_dist = skl_surf_address(plane_state, aux_plane) -
+ skl_surf_address(plane_state, color_plane);
+
+ if (DISPLAY_VER(i915) < 12)
+ aux_dist |= PLANE_AUX_STRIDE(skl_plane_stride(plane_state, aux_plane));
+
+ return aux_dist;
+}
+
+static u32 skl_plane_keyval(const struct intel_plane_state *plane_state)
+{
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+
+ return key->min_value;
+}
+
+static u32 skl_plane_keymax(const struct intel_plane_state *plane_state)
+{
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u8 alpha = plane_state->hw.alpha >> 8;
+
+ return (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
+}
+
+static u32 skl_plane_keymsk(const struct intel_plane_state *plane_state)
+{
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u8 alpha = plane_state->hw.alpha >> 8;
+ u32 keymsk;
+
+ keymsk = key->channel_mask & 0x7ffffff;
+ if (alpha < 0xff)
+ keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
+
+ return keymsk;
+}
+
+static void icl_plane_csc_load_black(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 0), 0);
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 1), 0);
+
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 2), 0);
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 3), 0);
+
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 4), 0);
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 5), 0);
+
+ intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0);
+ intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0);
+ intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0);
+
+ intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0);
+ intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0);
+ intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0);
+}
+
+static int icl_plane_color_plane(const struct intel_plane_state *plane_state)
+{
+ /* Program the UV plane on planar master */
+ if (plane_state->planar_linked_plane && !plane_state->planar_slave)
+ return 1;
+ else
+ return 0;
+}
+
+static void
+skl_plane_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ u32 stride = skl_plane_stride(plane_state, 0);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ /* The scaler will handle the output position */
+ if (plane_state->scaler_id >= 0) {
+ crtc_x = 0;
+ crtc_y = 0;
+ }
+
+ intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id),
+ PLANE_STRIDE_(stride));
+ intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
+ PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x));
+ intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
+ PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1));
+
+ skl_write_plane_wm(plane, crtc_state);
+}
+
+static void
+skl_plane_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ u32 x = plane_state->view.color_plane[0].x;
+ u32 y = plane_state->view.color_plane[0].y;
+ u32 plane_ctl, plane_color_ctl = 0;
+
+ plane_ctl = plane_state->ctl |
+ skl_plane_ctl_crtc(crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ plane_color_ctl = plane_state->color_ctl |
+ glk_plane_color_ctl_crtc(crtc_state);
+
+ intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state));
+ intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state));
+ intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state));
+
+ intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
+ PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x));
+
+ intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id),
+ skl_plane_aux_dist(plane_state, 0));
+
+ intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
+ PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) |
+ PLANE_OFFSET_X(plane_state->view.color_plane[1].x));
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
+
+ /*
+ * Enable the scaler before the plane so that we don't
+ * get a catastrophic underrun even if the two operations
+ * end up happening in two different frames.
+ *
+ * TODO: split into noarm+arm pair
+ */
+ if (plane_state->scaler_id >= 0)
+ skl_program_plane_scaler(plane, crtc_state, plane_state);
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+ skl_plane_surf(plane_state, 0));
+}
+
+static void
+icl_plane_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ int color_plane = icl_plane_color_plane(plane_state);
+ u32 stride = skl_plane_stride(plane_state, color_plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ int x = plane_state->view.color_plane[color_plane].x;
+ int y = plane_state->view.color_plane[color_plane].y;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ u32 plane_color_ctl;
+
+ plane_color_ctl = plane_state->color_ctl |
+ glk_plane_color_ctl_crtc(crtc_state);
+
+ /* The scaler will handle the output position */
+ if (plane_state->scaler_id >= 0) {
+ crtc_x = 0;
+ crtc_y = 0;
+ }
+
+ intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id),
+ PLANE_STRIDE_(stride));
+ intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
+ PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x));
+ intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
+ PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1));
+
+ intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state));
+ intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state));
+ intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state));
+
+ intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
+ PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x));
+
+ if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) {
+ intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 0),
+ lower_32_bits(plane_state->ccval));
+ intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 1),
+ upper_32_bits(plane_state->ccval));
+ }
+
+ /* FLAT CCS doesn't need to program AUX_DIST */
+ if (!HAS_FLAT_CCS(dev_priv))
+ intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id),
+ skl_plane_aux_dist(plane_state, color_plane));
+
+ if (icl_is_hdr_plane(dev_priv, plane_id))
+ intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id),
+ plane_state->cus_ctl);
+
+ intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
+
+ if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
+ icl_program_input_csc(plane, crtc_state, plane_state);
+
+ skl_write_plane_wm(plane, crtc_state);
+
+ /*
+ * FIXME: pxp session invalidation can hit any time even at time of commit
+ * or after the commit, display content will be garbage.
+ */
+ if (plane_state->force_black)
+ icl_plane_csc_load_black(plane);
+
+ intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
+}
+
+static void
+icl_plane_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ int color_plane = icl_plane_color_plane(plane_state);
+ u32 plane_ctl;
+
+ plane_ctl = plane_state->ctl |
+ skl_plane_ctl_crtc(crtc_state);
+
+ /*
+ * Enable the scaler before the plane so that we don't
+ * get a catastrophic underrun even if the two operations
+ * end up happening in two different frames.
+ *
+ * TODO: split into noarm+arm pair
+ */
+ if (plane_state->scaler_id >= 0)
+ skl_program_plane_scaler(plane, crtc_state, plane_state);
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+ skl_plane_surf(plane_state, color_plane));
+}
+
+static void
+skl_plane_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ bool async_flip)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ u32 plane_ctl = plane_state->ctl;
+
+ plane_ctl |= skl_plane_ctl_crtc(crtc_state);
+
+ if (async_flip)
+ plane_ctl |= PLANE_CTL_ASYNC_FLIP;
+
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+ skl_plane_surf(plane_state, 0));
+}
+
+static bool intel_format_is_p01x(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+
+ if (!fb)
+ return 0;
+
+ if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
+ intel_fb_is_ccs_modifier(fb->modifier)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "RC support only with 0/180 degree rotation (%x)\n",
+ rotation);
+ return -EINVAL;
+ }
+
+ if (rotation & DRM_MODE_REFLECT_X &&
+ fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ drm_dbg_kms(&dev_priv->drm,
+ "horizontal flip is not supported with linear surface formats\n");
+ return -EINVAL;
+ }
+
+ if (drm_rotation_90_or_270(rotation)) {
+ if (!intel_fb_supports_90_270_rotation(to_intel_framebuffer(fb))) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Y/Yf tiling required for 90/270!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * 90/270 is not allowed with RGB64 16:16:16:16 and
+ * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
+ */
+ switch (fb->format->format) {
+ case DRM_FORMAT_RGB565:
+ if (DISPLAY_VER(dev_priv) >= 11)
+ break;
+ fallthrough;
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported pixel format %p4cc for 90/270!\n",
+ &fb->format->format);
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
+ /* Y-tiling is not supported in IF-ID Interlace mode */
+ if (crtc_state->hw.enable &&
+ crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
+ fb->modifier != DRM_FORMAT_MOD_LINEAR &&
+ fb->modifier != I915_FORMAT_MOD_X_TILED) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Y/Yf tiling not supported in IF-ID mode\n");
+ return -EINVAL;
+ }
+
+ /* Wa_1606054188:tgl,adl-s */
+ if ((IS_ALDERLAKE_S(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
+ plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE &&
+ intel_format_is_p01x(fb->format->format)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Source color keying not supported with P01x formats\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->uapi.plane->dev);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+
+ /*
+ * Display WA #1175: glk
+ * Planes other than the cursor may cause FIFO underflow and display
+ * corruption if starting less than 4 pixels from the right edge of
+ * the screen.
+ * Besides the above WA fix the similar problem, where planes other
+ * than the cursor ending less than 4 pixels from the left edge of the
+ * screen may cause FIFO underflow and display corruption.
+ */
+ if (DISPLAY_VER(dev_priv) == 10 &&
+ (crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "requested plane X %s position %d invalid (valid range %d-%d)\n",
+ crtc_x + crtc_w < 4 ? "end" : "start",
+ crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
+ 4, pipe_src_w - 4);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ /* Display WA #1106 */
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ src_w & 3 &&
+ (rotation == DRM_MODE_ROTATE_270 ||
+ rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
+ drm_dbg_kms(&i915->drm, "src width must be multiple of 4 for rotated planar YUV\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
+ const struct drm_framebuffer *fb)
+{
+ /*
+ * We don't yet know the final source width nor
+ * whether we can use the HQ scaler mode. Assume
+ * the best case.
+ * FIXME need to properly check this later.
+ */
+ if (DISPLAY_VER(dev_priv) >= 10 ||
+ !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
+ return 0x30000 - 1;
+ else
+ return 0x20000 - 1;
+}
+
+static int intel_plane_min_width(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ if (plane->min_width)
+ return plane->min_width(fb, color_plane, rotation);
+ else
+ return 1;
+}
+
+static int intel_plane_max_width(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ if (plane->max_width)
+ return plane->max_width(fb, color_plane, rotation);
+ else
+ return INT_MAX;
+}
+
+static int intel_plane_max_height(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ if (plane->max_height)
+ return plane->max_height(fb, color_plane, rotation);
+ else
+ return INT_MAX;
+}
+
+static bool
+skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
+ int main_x, int main_y, u32 main_offset,
+ int ccs_plane)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int aux_x = plane_state->view.color_plane[ccs_plane].x;
+ int aux_y = plane_state->view.color_plane[ccs_plane].y;
+ u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset;
+ u32 alignment = intel_surf_alignment(fb, ccs_plane);
+ int hsub;
+ int vsub;
+
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
+ while (aux_offset >= main_offset && aux_y <= main_y) {
+ int x, y;
+
+ if (aux_x == main_x && aux_y == main_y)
+ break;
+
+ if (aux_offset == 0)
+ break;
+
+ x = aux_x / hsub;
+ y = aux_y / vsub;
+ aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ ccs_plane,
+ aux_offset,
+ aux_offset -
+ alignment);
+ aux_x = x * hsub + aux_x % hsub;
+ aux_y = y * vsub + aux_y % vsub;
+ }
+
+ if (aux_x != main_x || aux_y != main_y)
+ return false;
+
+ plane_state->view.color_plane[ccs_plane].offset = aux_offset;
+ plane_state->view.color_plane[ccs_plane].x = aux_x;
+ plane_state->view.color_plane[ccs_plane].y = aux_y;
+
+ return true;
+}
+
+
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+ int *x, int *y, u32 *offset)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const int aux_plane = skl_main_to_aux_plane(fb, 0);
+ const u32 aux_offset = plane_state->view.color_plane[aux_plane].offset;
+ const u32 alignment = intel_surf_alignment(fb, 0);
+ const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ intel_add_fb_offsets(x, y, plane_state, 0);
+ *offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0);
+ if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
+ return -EINVAL;
+
+ /*
+ * AUX surface offset is specified as the distance from the
+ * main surface offset, and it must be non-negative. Make
+ * sure that is what we will get.
+ */
+ if (aux_plane && *offset > aux_offset)
+ *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+ *offset,
+ aux_offset & ~(alignment - 1));
+
+ /*
+ * When using an X-tiled surface, the plane blows up
+ * if the x offset + width exceed the stride.
+ *
+ * TODO: linear and Y-tiled seem fine, Yf untested,
+ */
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
+ int cpp = fb->format->cpp[0];
+
+ while ((*x + w) * cpp > plane_state->view.color_plane[0].mapping_stride) {
+ if (*offset == 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Unable to find suitable display surface offset due to X-tiling\n");
+ return -EINVAL;
+ }
+
+ *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
+ *offset,
+ *offset - alignment);
+ }
+ }
+
+ return 0;
+}
+
+static int skl_check_main_surface(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const unsigned int rotation = plane_state->hw.rotation;
+ int x = plane_state->uapi.src.x1 >> 16;
+ int y = plane_state->uapi.src.y1 >> 16;
+ const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ const int h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ const int min_width = intel_plane_min_width(plane, fb, 0, rotation);
+ const int max_width = intel_plane_max_width(plane, fb, 0, rotation);
+ const int max_height = intel_plane_max_height(plane, fb, 0, rotation);
+ const int aux_plane = skl_main_to_aux_plane(fb, 0);
+ const u32 alignment = intel_surf_alignment(fb, 0);
+ u32 offset;
+ int ret;
+
+ if (w > max_width || w < min_width || h > max_height || h < 1) {
+ drm_dbg_kms(&dev_priv->drm,
+ "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
+ w, h, min_width, max_width, max_height);
+ return -EINVAL;
+ }
+
+ ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
+ if (ret)
+ return ret;
+
+ /*
+ * CCS AUX surface doesn't have its own x/y offsets, we must make sure
+ * they match with the main surface x/y offsets. On DG2
+ * there's no aux plane on fb so skip this checking.
+ */
+ if (intel_fb_is_ccs_modifier(fb->modifier) && aux_plane) {
+ while (!skl_check_main_ccs_coordinates(plane_state, x, y,
+ offset, aux_plane)) {
+ if (offset == 0)
+ break;
+
+ offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
+ offset, offset - alignment);
+ }
+
+ if (x != plane_state->view.color_plane[aux_plane].x ||
+ y != plane_state->view.color_plane[aux_plane].y) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Unable to find suitable display surface offset due to CCS\n");
+ return -EINVAL;
+ }
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ drm_WARN_ON(&dev_priv->drm, x > 65535 || y > 65535);
+ else
+ drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191);
+
+ plane_state->view.color_plane[0].offset = offset;
+ plane_state->view.color_plane[0].x = x;
+ plane_state->view.color_plane[0].y = y;
+
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->uapi.src,
+ x << 16, y << 16);
+
+ return 0;
+}
+
+static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ int uv_plane = 1;
+ int ccs_plane = intel_fb_is_ccs_modifier(fb->modifier) ?
+ skl_main_to_aux_plane(fb, uv_plane) : 0;
+ int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation);
+ int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation);
+ int x = plane_state->uapi.src.x1 >> 17;
+ int y = plane_state->uapi.src.y1 >> 17;
+ int w = drm_rect_width(&plane_state->uapi.src) >> 17;
+ int h = drm_rect_height(&plane_state->uapi.src) >> 17;
+ u32 offset;
+
+ /* FIXME not quite sure how/if these apply to the chroma plane */
+ if (w > max_width || h > max_height) {
+ drm_dbg_kms(&i915->drm,
+ "CbCr source size %dx%d too big (limit %dx%d)\n",
+ w, h, max_width, max_height);
+ return -EINVAL;
+ }
+
+ intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
+ offset = intel_plane_compute_aligned_offset(&x, &y,
+ plane_state, uv_plane);
+
+ if (ccs_plane) {
+ u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset;
+ u32 alignment = intel_surf_alignment(fb, uv_plane);
+
+ if (offset > aux_offset)
+ offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ uv_plane,
+ offset,
+ aux_offset & ~(alignment - 1));
+
+ while (!skl_check_main_ccs_coordinates(plane_state, x, y,
+ offset, ccs_plane)) {
+ if (offset == 0)
+ break;
+
+ offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ uv_plane,
+ offset, offset - alignment);
+ }
+
+ if (x != plane_state->view.color_plane[ccs_plane].x ||
+ y != plane_state->view.color_plane[ccs_plane].y) {
+ drm_dbg_kms(&i915->drm,
+ "Unable to find suitable display surface offset due to CCS\n");
+ return -EINVAL;
+ }
+ }
+
+ if (DISPLAY_VER(i915) >= 13)
+ drm_WARN_ON(&i915->drm, x > 65535 || y > 65535);
+ else
+ drm_WARN_ON(&i915->drm, x > 8191 || y > 8191);
+
+ plane_state->view.color_plane[uv_plane].offset = offset;
+ plane_state->view.color_plane[uv_plane].x = x;
+ plane_state->view.color_plane[uv_plane].y = y;
+
+ return 0;
+}
+
+static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_x = plane_state->uapi.src.x1 >> 16;
+ int src_y = plane_state->uapi.src.y1 >> 16;
+ u32 offset;
+ int ccs_plane;
+
+ for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
+ int main_hsub, main_vsub;
+ int hsub, vsub;
+ int x, y;
+
+ if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane))
+ continue;
+
+ intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
+ skl_ccs_to_main_plane(fb, ccs_plane));
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
+
+ hsub *= main_hsub;
+ vsub *= main_vsub;
+ x = src_x / hsub;
+ y = src_y / vsub;
+
+ intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
+
+ offset = intel_plane_compute_aligned_offset(&x, &y,
+ plane_state,
+ ccs_plane);
+
+ plane_state->view.color_plane[ccs_plane].offset = offset;
+ plane_state->view.color_plane[ccs_plane].x = (x * hsub + src_x % hsub) / main_hsub;
+ plane_state->view.color_plane[ccs_plane].y = (y * vsub + src_y % vsub) / main_vsub;
+ }
+
+ return 0;
+}
+
+static int skl_check_plane_surface(struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int ret;
+
+ ret = intel_plane_compute_gtt(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ /*
+ * Handle the AUX surface first since the main surface setup depends on
+ * it.
+ */
+ if (intel_fb_is_ccs_modifier(fb->modifier)) {
+ ret = skl_check_ccs_aux_surface(plane_state);
+ if (ret)
+ return ret;
+ }
+
+ if (intel_format_info_is_yuv_semiplanar(fb->format,
+ fb->modifier)) {
+ ret = skl_check_nv12_aux_surface(plane_state);
+ if (ret)
+ return ret;
+ }
+
+ ret = skl_check_main_surface(plane_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static bool skl_fb_scalable(const struct drm_framebuffer *fb)
+{
+ if (!fb)
+ return false;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ return false;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return DISPLAY_VER(to_i915(fb->dev)) >= 11;
+ default:
+ return true;
+ }
+}
+
+static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ return intel_pxp_key_check(&to_gt(i915)->pxp, obj, false) == 0;
+}
+
+static bool pxp_is_borked(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj);
+}
+
+static int skl_plane_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int min_scale = DRM_PLANE_NO_SCALING;
+ int max_scale = DRM_PLANE_NO_SCALING;
+ int ret;
+
+ ret = skl_plane_check_fb(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ /* use scaler when colorkey is not required */
+ if (!plane_state->ckey.flags && skl_fb_scalable(fb)) {
+ min_scale = 1;
+ max_scale = skl_plane_max_scale(dev_priv, fb);
+ }
+
+ ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
+ min_scale, max_scale, true);
+ if (ret)
+ return ret;
+
+ ret = skl_check_plane_surface(plane_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->uapi.visible)
+ return 0;
+
+ ret = skl_plane_check_dst_coordinates(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ ret = intel_plane_check_src_coordinates(plane_state);
+ if (ret)
+ return ret;
+
+ ret = skl_plane_check_nv12_rotation(plane_state);
+ if (ret)
+ return ret;
+
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb));
+ plane_state->force_black = pxp_is_borked(intel_fb_obj(fb));
+ }
+
+ /* HW only has 8 bits pixel precision, disable plane if invisible */
+ if (!(plane_state->hw.alpha >> 8))
+ plane_state->uapi.visible = false;
+
+ plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
+ plane_state);
+
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ icl_is_hdr_plane(dev_priv, plane->id))
+ /* Enable and use MPEG-2 chroma siting */
+ plane_state->cus_ctl = PLANE_CUS_ENABLE |
+ PLANE_CUS_HPHASE_0 |
+ PLANE_CUS_VPHASE_SIGN_NEGATIVE | PLANE_CUS_VPHASE_0_25;
+ else
+ plane_state->cus_ctl = 0;
+
+ return 0;
+}
+
+static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe)
+{
+ return pipe - PIPE_A + INTEL_FBC_A;
+}
+
+static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
+ enum intel_fbc_id fbc_id, enum plane_id plane_id)
+{
+ if ((RUNTIME_INFO(dev_priv)->fbc_mask & BIT(fbc_id)) == 0)
+ return false;
+
+ return plane_id == PLANE_PRIMARY;
+}
+
+static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ enum intel_fbc_id fbc_id = skl_fbc_id_for_pipe(pipe);
+
+ if (skl_plane_has_fbc(dev_priv, fbc_id, plane_id))
+ return dev_priv->display.fbc[fbc_id];
+ else
+ return NULL;
+}
+
+static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ /* Display WA #0870: skl, bxt */
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ return false;
+
+ if (DISPLAY_VER(dev_priv) == 9 && pipe == PIPE_C)
+ return false;
+
+ if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
+ return false;
+
+ return true;
+}
+
+static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ *num_formats = ARRAY_SIZE(skl_planar_formats);
+ return skl_planar_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(skl_plane_formats);
+ return skl_plane_formats;
+ }
+}
+
+static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ *num_formats = ARRAY_SIZE(glk_planar_formats);
+ return glk_planar_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(skl_plane_formats);
+ return skl_plane_formats;
+ }
+}
+
+static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (icl_is_hdr_plane(dev_priv, plane_id)) {
+ *num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
+ return icl_hdr_plane_formats;
+ } else if (icl_is_nv12_y_plane(dev_priv, plane_id)) {
+ *num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats);
+ return icl_sdr_y_plane_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(icl_sdr_uv_plane_formats);
+ return icl_sdr_uv_plane_formats;
+ }
+}
+
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ if (!intel_fb_plane_supports_modifier(plane, modifier))
+ return false;
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ if (intel_fb_is_ccs_modifier(modifier))
+ return true;
+ fallthrough;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ case DRM_FORMAT_XVYU2101010:
+ if (modifier == I915_FORMAT_MOD_Yf_TILED)
+ return true;
+ fallthrough;
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED)
+ return true;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
+static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ if (!intel_fb_plane_supports_modifier(plane, modifier))
+ return false;
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ if (intel_fb_is_ccs_modifier(modifier))
+ return true;
+ fallthrough;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ if (intel_fb_is_mc_ccs_modifier(modifier))
+ return true;
+ fallthrough;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ if (!intel_fb_is_ccs_modifier(modifier))
+ return true;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
+static const struct drm_plane_funcs skl_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = skl_plane_format_mod_supported,
+};
+
+static const struct drm_plane_funcs gen12_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = gen12_plane_format_mod_supported,
+};
+
+static void
+skl_plane_enable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static void
+skl_plane_disable_flip_done(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ spin_lock_irq(&i915->irq_lock);
+ bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id));
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ /* Wa_22011186057 */
+ if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
+ return false;
+
+ if (DISPLAY_VER(i915) >= 11)
+ return true;
+
+ if (IS_GEMINILAKE(i915))
+ return pipe != PIPE_C;
+
+ return pipe != PIPE_C &&
+ (plane_id == PLANE_PRIMARY ||
+ plane_id == PLANE_SPRITE0);
+}
+
+static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915,
+ enum plane_id plane_id)
+{
+ if (DISPLAY_VER(i915) < 12)
+ return false;
+
+ /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
+ if (IS_DG1(i915) || IS_ROCKETLAKE(i915) ||
+ IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0))
+ return false;
+
+ /* Wa_22011186057 */
+ if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
+ return false;
+
+ /* Wa_14013215631 */
+ if (IS_DG2_DISPLAY_STEP(i915, STEP_A0, STEP_C0))
+ return false;
+
+ return plane_id < PLANE_SPRITE4;
+}
+
+static u8 skl_get_plane_caps(struct drm_i915_private *i915,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ u8 caps = INTEL_PLANE_CAP_TILING_X;
+
+ if (DISPLAY_VER(i915) < 13 || IS_ALDERLAKE_P(i915))
+ caps |= INTEL_PLANE_CAP_TILING_Y;
+ if (DISPLAY_VER(i915) < 12)
+ caps |= INTEL_PLANE_CAP_TILING_Yf;
+ if (HAS_4TILE(i915))
+ caps |= INTEL_PLANE_CAP_TILING_4;
+
+ if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
+ caps |= INTEL_PLANE_CAP_CCS_RC;
+ if (DISPLAY_VER(i915) >= 12)
+ caps |= INTEL_PLANE_CAP_CCS_RC_CC;
+ }
+
+ if (gen12_plane_has_mc_ccs(i915, plane_id))
+ caps |= INTEL_PLANE_CAP_CCS_MC;
+
+ return caps;
+}
+
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ const struct drm_plane_funcs *plane_funcs;
+ struct intel_plane *plane;
+ enum drm_plane_type plane_type;
+ unsigned int supported_rotations;
+ unsigned int supported_csc;
+ const u64 *modifiers;
+ const u32 *formats;
+ int num_formats;
+ int ret;
+
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
+
+ plane->pipe = pipe;
+ plane->id = plane_id;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
+
+ intel_fbc_add_plane(skl_plane_fbc(dev_priv, pipe, plane_id), plane);
+
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ plane->min_width = icl_plane_min_width;
+ if (icl_is_hdr_plane(dev_priv, plane_id))
+ plane->max_width = icl_hdr_plane_max_width;
+ else
+ plane->max_width = icl_sdr_plane_max_width;
+ plane->max_height = icl_plane_max_height;
+ plane->min_cdclk = icl_plane_min_cdclk;
+ } else if (DISPLAY_VER(dev_priv) >= 10) {
+ plane->max_width = glk_plane_max_width;
+ plane->max_height = skl_plane_max_height;
+ plane->min_cdclk = glk_plane_min_cdclk;
+ } else {
+ plane->max_width = skl_plane_max_width;
+ plane->max_height = skl_plane_max_height;
+ plane->min_cdclk = skl_plane_min_cdclk;
+ }
+
+ plane->max_stride = skl_plane_max_stride;
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ plane->update_noarm = icl_plane_update_noarm;
+ plane->update_arm = icl_plane_update_arm;
+ plane->disable_arm = icl_plane_disable_arm;
+ } else {
+ plane->update_noarm = skl_plane_update_noarm;
+ plane->update_arm = skl_plane_update_arm;
+ plane->disable_arm = skl_plane_disable_arm;
+ }
+ plane->get_hw_state = skl_plane_get_hw_state;
+ plane->check_plane = skl_plane_check;
+
+ if (plane_id == PLANE_PRIMARY) {
+ plane->need_async_flip_disable_wa = IS_DISPLAY_VER(dev_priv,
+ 9, 10);
+ plane->async_flip = skl_plane_async_flip;
+ plane->enable_flip_done = skl_plane_enable_flip_done;
+ plane->disable_flip_done = skl_plane_disable_flip_done;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ formats = icl_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
+ else if (DISPLAY_VER(dev_priv) >= 10)
+ formats = glk_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
+ else
+ formats = skl_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ plane_funcs = &gen12_plane_funcs;
+ else
+ plane_funcs = &skl_plane_funcs;
+
+ if (plane_id == PLANE_PRIMARY)
+ plane_type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ plane_type = DRM_PLANE_TYPE_OVERLAY;
+
+ modifiers = intel_fb_plane_get_modifiers(dev_priv,
+ skl_get_plane_caps(dev_priv, pipe, plane_id));
+
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ 0, plane_funcs,
+ formats, num_formats, modifiers,
+ plane_type,
+ "plane %d%c", plane_id + 1,
+ pipe_name(pipe));
+
+ kfree(modifiers);
+
+ if (ret)
+ goto fail;
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ supported_rotations = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
+ else
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ supported_rotations |= DRM_MODE_REFLECT_X;
+
+ drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+
+ supported_csc = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709);
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ supported_csc |= BIT(DRM_COLOR_YCBCR_BT2020);
+
+ drm_plane_create_color_properties(&plane->base,
+ supported_csc,
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+ drm_plane_create_alpha_property(&plane->base);
+ drm_plane_create_blend_mode_property(&plane->base,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
+ drm_plane_create_zpos_immutable_property(&plane->base, plane_id);
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ drm_plane_enable_fb_damage_clips(&plane->base);
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ drm_plane_create_scaling_filter_property(&plane->base,
+ BIT(DRM_SCALING_FILTER_DEFAULT) |
+ BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
+
+ intel_plane_helper_add(plane);
+
+ return plane;
+
+fail:
+ intel_plane_free(plane);
+
+ return ERR_PTR(ret);
+}
+
+void
+skl_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe;
+ u32 val, base, offset, stride_mult, tiling, alpha;
+ int fourcc, pixel_format;
+ unsigned int aligned_height;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+ static_assert(PLANE_CTL_TILED_YF == PLANE_CTL_TILED_4);
+
+ if (!plane->get_hw_state(plane, &pipe))
+ return;
+
+ drm_WARN_ON(dev, pipe != crtc->pipe);
+
+ if (crtc_state->bigjoiner_pipes) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported bigjoiner configuration for initial FB\n");
+ return;
+ }
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb) {
+ drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
+ return;
+ }
+
+ fb = &intel_fb->base;
+
+ fb->dev = dev;
+
+ val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ pixel_format = val & PLANE_CTL_FORMAT_MASK_ICL;
+ else
+ pixel_format = val & PLANE_CTL_FORMAT_MASK_SKL;
+
+ if (DISPLAY_VER(dev_priv) >= 10) {
+ u32 color_ctl;
+
+ color_ctl = intel_de_read(dev_priv, PLANE_COLOR_CTL(pipe, plane_id));
+ alpha = REG_FIELD_GET(PLANE_COLOR_ALPHA_MASK, color_ctl);
+ } else {
+ alpha = REG_FIELD_GET(PLANE_CTL_ALPHA_MASK, val);
+ }
+
+ fourcc = skl_format_to_fourcc(pixel_format,
+ val & PLANE_CTL_ORDER_RGBX, alpha);
+ fb->format = drm_format_info(fourcc);
+
+ tiling = val & PLANE_CTL_TILED_MASK;
+ switch (tiling) {
+ case PLANE_CTL_TILED_LINEAR:
+ fb->modifier = DRM_FORMAT_MOD_LINEAR;
+ break;
+ case PLANE_CTL_TILED_X:
+ plane_config->tiling = I915_TILING_X;
+ fb->modifier = I915_FORMAT_MOD_X_TILED;
+ break;
+ case PLANE_CTL_TILED_Y:
+ plane_config->tiling = I915_TILING_Y;
+ if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
+ if (DISPLAY_VER(dev_priv) >= 12)
+ fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS;
+ else
+ fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
+ else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
+ else
+ fb->modifier = I915_FORMAT_MOD_Y_TILED;
+ break;
+ case PLANE_CTL_TILED_YF: /* aka PLANE_CTL_TILED_4 on XE_LPD+ */
+ if (HAS_4TILE(dev_priv)) {
+ u32 rc_mask = PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
+ PLANE_CTL_CLEAR_COLOR_DISABLE;
+
+ if ((val & rc_mask) == rc_mask)
+ fb->modifier = I915_FORMAT_MOD_4_TILED_DG2_RC_CCS;
+ else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_4_TILED_DG2_MC_CCS;
+ else if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC;
+ else
+ fb->modifier = I915_FORMAT_MOD_4_TILED;
+ } else {
+ if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
+ else
+ fb->modifier = I915_FORMAT_MOD_Yf_TILED;
+ }
+ break;
+ default:
+ MISSING_CASE(tiling);
+ goto error;
+ }
+
+ /*
+ * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
+ * while i915 HW rotation is clockwise, thats why this swapping.
+ */
+ switch (val & PLANE_CTL_ROTATE_MASK) {
+ case PLANE_CTL_ROTATE_0:
+ plane_config->rotation = DRM_MODE_ROTATE_0;
+ break;
+ case PLANE_CTL_ROTATE_90:
+ plane_config->rotation = DRM_MODE_ROTATE_270;
+ break;
+ case PLANE_CTL_ROTATE_180:
+ plane_config->rotation = DRM_MODE_ROTATE_180;
+ break;
+ case PLANE_CTL_ROTATE_270:
+ plane_config->rotation = DRM_MODE_ROTATE_90;
+ break;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 11 && val & PLANE_CTL_FLIP_HORIZONTAL)
+ plane_config->rotation |= DRM_MODE_REFLECT_X;
+
+ /* 90/270 degree rotation would require extra work */
+ if (drm_rotation_90_or_270(plane_config->rotation))
+ goto error;
+
+ base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & PLANE_SURF_ADDR_MASK;
+ plane_config->base = base;
+
+ offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
+
+ val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
+ fb->height = REG_FIELD_GET(PLANE_HEIGHT_MASK, val) + 1;
+ fb->width = REG_FIELD_GET(PLANE_WIDTH_MASK, val) + 1;
+
+ val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
+ stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
+
+ fb->pitches[0] = REG_FIELD_GET(PLANE_STRIDE__MASK, val) * stride_mult;
+
+ aligned_height = intel_fb_align_height(fb, 0, fb->height);
+
+ plane_config->size = fb->pitches[0] * aligned_height;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
+ plane_config->size);
+
+ plane_config->fb = intel_fb;
+ return;
+
+error:
+ kfree(intel_fb);
+}
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.h b/drivers/gpu/drm/i915/display/skl_universal_plane.h
new file mode 100644
index 000000000..351040b64
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _SKL_UNIVERSAL_PLANE_H_
+#define _SKL_UNIVERSAL_PLANE_H_
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_initial_plane_config;
+struct intel_plane_state;
+
+enum pipe;
+enum plane_id;
+
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id);
+
+void skl_get_initial_plane_config(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config);
+
+int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
+
+int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
+ int *x, int *y, u32 *offset);
+
+bool icl_is_nv12_y_plane(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id);
+bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
new file mode 100644
index 000000000..a7adf0247
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -0,0 +1,3575 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <drm/drm_blend.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_bw.h"
+#include "intel_de.h"
+#include "intel_display.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "skl_watermark.h"
+
+#include "i915_drv.h"
+#include "i915_fixed.h"
+#include "i915_reg.h"
+#include "intel_pcode.h"
+#include "intel_pm.h"
+
+static void skl_sagv_disable(struct drm_i915_private *i915);
+
+/* Stores plane specific WM parameters */
+struct skl_wm_params {
+ bool x_tiled, y_tiled;
+ bool rc_surface;
+ bool is_planar;
+ u32 width;
+ u8 cpp;
+ u32 plane_pixel_rate;
+ u32 y_min_scanlines;
+ u32 plane_bytes_per_line;
+ uint_fixed_16_16_t plane_blocks_per_line;
+ uint_fixed_16_16_t y_tile_minimum;
+ u32 linetime_us;
+ u32 dbuf_block_size;
+};
+
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915)
+{
+ u8 enabled_slices = 0;
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(i915, slice) {
+ if (intel_uncore_read(&i915->uncore,
+ DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
+ enabled_slices |= BIT(slice);
+ }
+
+ return enabled_slices;
+}
+
+/*
+ * FIXME: We still don't have the proper code detect if we need to apply the WA,
+ * so assume we'll always need it in order to avoid underruns.
+ */
+static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) == 9;
+}
+
+static bool
+intel_has_sagv(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) &&
+ i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
+}
+
+static u32
+intel_sagv_block_time(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 14) {
+ u32 val;
+
+ val = intel_uncore_read(&i915->uncore, MTL_LATENCY_SAGV);
+
+ return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ u32 val = 0;
+ int ret;
+
+ ret = snb_pcode_read(&i915->uncore,
+ GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
+ &val, NULL);
+ if (ret) {
+ drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n");
+ return 0;
+ }
+
+ return val;
+ } else if (DISPLAY_VER(i915) == 11) {
+ return 10;
+ } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) {
+ return 30;
+ } else {
+ return 0;
+ }
+}
+
+static void intel_sagv_init(struct drm_i915_private *i915)
+{
+ if (!intel_has_sagv(i915))
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+
+ /*
+ * Probe to see if we have working SAGV control.
+ * For icl+ this was already determined by intel_bw_init_hw().
+ */
+ if (DISPLAY_VER(i915) < 11)
+ skl_sagv_disable(i915);
+
+ drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN);
+
+ i915->display.sagv.block_time_us = intel_sagv_block_time(i915);
+
+ drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
+ str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us);
+
+ /* avoid overflow when adding with wm0 latency/etc. */
+ if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,
+ "Excessive SAGV block time %u, ignoring\n",
+ i915->display.sagv.block_time_us))
+ i915->display.sagv.block_time_us = 0;
+
+ if (!intel_has_sagv(i915))
+ i915->display.sagv.block_time_us = 0;
+}
+
+/*
+ * SAGV dynamically adjusts the system agent voltage and clock frequencies
+ * depending on power and performance requirements. The display engine access
+ * to system memory is blocked during the adjustment time. Because of the
+ * blocking time, having this enabled can cause full system hangs and/or pipe
+ * underruns if we don't meet all of the following requirements:
+ *
+ * - <= 1 pipe enabled
+ * - All planes can enable watermarks for latencies >= SAGV engine block time
+ * - We're not using an interlaced display configuration
+ */
+static void skl_sagv_enable(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (i915->display.sagv.status == I915_SAGV_ENABLED)
+ return;
+
+ drm_dbg_kms(&i915->drm, "Enabling SAGV\n");
+ ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_ENABLE);
+
+ /* We don't need to wait for SAGV when enabling */
+
+ /*
+ * Some skl systems, pre-release machines in particular,
+ * don't actually have SAGV.
+ */
+ if (IS_SKYLAKE(i915) && ret == -ENXIO) {
+ drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ return;
+ } else if (ret < 0) {
+ drm_err(&i915->drm, "Failed to enable SAGV\n");
+ return;
+ }
+
+ i915->display.sagv.status = I915_SAGV_ENABLED;
+}
+
+static void skl_sagv_disable(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (i915->display.sagv.status == I915_SAGV_DISABLED)
+ return;
+
+ drm_dbg_kms(&i915->drm, "Disabling SAGV\n");
+ /* bspec says to keep retrying for at least 1 ms */
+ ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_DISABLE,
+ GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
+ 1);
+ /*
+ * Some skl systems, pre-release machines in particular,
+ * don't actually have SAGV.
+ */
+ if (IS_SKYLAKE(i915) && ret == -ENXIO) {
+ drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ return;
+ } else if (ret < 0) {
+ drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret);
+ return;
+ }
+
+ i915->display.sagv.status = I915_SAGV_DISABLED;
+}
+
+static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+
+ if (!new_bw_state)
+ return;
+
+ if (!intel_can_enable_sagv(i915, new_bw_state))
+ skl_sagv_disable(i915);
+}
+
+static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+
+ if (!new_bw_state)
+ return;
+
+ if (intel_can_enable_sagv(i915, new_bw_state))
+ skl_sagv_enable(i915);
+}
+
+static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask;
+ new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Restrict required qgv points before updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(i915, new_mask);
+}
+
+static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+ new_mask = new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Allow required qgv points after updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(i915, new_mask);
+}
+
+void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ /*
+ * Just return if we can't control SAGV or don't have it.
+ * This is different from situation when we have SAGV but just can't
+ * afford it due to DBuf limitation - in case if SAGV is completely
+ * disabled in a BIOS, we are not even allowed to send a PCode request,
+ * as it will throw an error. So have to check it here.
+ */
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (DISPLAY_VER(i915) >= 11)
+ icl_sagv_pre_plane_update(state);
+ else
+ skl_sagv_pre_plane_update(state);
+}
+
+void intel_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ /*
+ * Just return if we can't control SAGV or don't have it.
+ * This is different from situation when we have SAGV but just can't
+ * afford it due to DBuf limitation - in case if SAGV is completely
+ * disabled in a BIOS, we are not even allowed to send a PCode request,
+ * as it will throw an error. So have to check it here.
+ */
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (DISPLAY_VER(i915) >= 11)
+ icl_sagv_post_plane_update(state);
+ else
+ skl_sagv_post_plane_update(state);
+}
+
+static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum plane_id plane_id;
+ int max_level = INT_MAX;
+
+ if (!intel_has_sagv(i915))
+ return false;
+
+ if (!crtc_state->hw.active)
+ return true;
+
+ if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ return false;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ int level;
+
+ /* Skip this plane if it's not enabled */
+ if (!wm->wm[0].enable)
+ continue;
+
+ /* Find the highest enabled wm level for this plane */
+ for (level = ilk_wm_max_level(i915);
+ !wm->wm[level].enable; --level)
+ { }
+
+ /* Highest common enabled wm level for all planes */
+ max_level = min(level, max_level);
+ }
+
+ /* No enabled planes? */
+ if (max_level == INT_MAX)
+ return true;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ /*
+ * All enabled planes must have enabled a common wm level that
+ * can tolerate memory latencies higher than sagv_block_time_us
+ */
+ if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
+ return false;
+ }
+
+ return true;
+}
+
+static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum plane_id plane_id;
+
+ if (!crtc_state->hw.active)
+ return true;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (wm->wm[0].enable && !wm->sagv.wm0.enable)
+ return false;
+ }
+
+ return true;
+}
+
+static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (DISPLAY_VER(i915) >= 12)
+ return tgl_crtc_can_enable_sagv(crtc_state);
+ else
+ return skl_crtc_can_enable_sagv(crtc_state);
+}
+
+bool intel_can_enable_sagv(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state)
+{
+ if (DISPLAY_VER(i915) < 11 &&
+ bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
+ return false;
+
+ return bw_state->pipe_sagv_reject == 0;
+}
+
+static int intel_compute_sagv_mask(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ int ret;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_bw_state *new_bw_state = NULL;
+ const struct intel_bw_state *old_bw_state = NULL;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc,
+ new_crtc_state, i) {
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ if (intel_crtc_can_enable_sagv(new_crtc_state))
+ new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
+ else
+ new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
+ }
+
+ if (!new_bw_state)
+ return 0;
+
+ new_bw_state->active_pipes =
+ intel_calc_active_pipes(state, old_bw_state->active_pipes);
+
+ if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ if (intel_can_enable_sagv(i915, new_bw_state) !=
+ intel_can_enable_sagv(i915, old_bw_state)) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc,
+ new_crtc_state, i) {
+ struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+ /*
+ * We store use_sagv_wm in the crtc state rather than relying on
+ * that bw state since we have no convenient way to get at the
+ * latter from the plane commit hooks (especially in the legacy
+ * cursor case)
+ */
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
+ DISPLAY_VER(i915) >= 12 &&
+ intel_can_enable_sagv(i915, new_bw_state);
+ }
+
+ return 0;
+}
+
+static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
+ u16 start, u16 end)
+{
+ entry->start = start;
+ entry->end = end;
+
+ return end;
+}
+
+static int intel_dbuf_slice_size(struct drm_i915_private *i915)
+{
+ return INTEL_INFO(i915)->display.dbuf.size /
+ hweight8(INTEL_INFO(i915)->display.dbuf.slice_mask);
+}
+
+static void
+skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
+ struct skl_ddb_entry *ddb)
+{
+ int slice_size = intel_dbuf_slice_size(i915);
+
+ if (!slice_mask) {
+ ddb->start = 0;
+ ddb->end = 0;
+ return;
+ }
+
+ ddb->start = (ffs(slice_mask) - 1) * slice_size;
+ ddb->end = fls(slice_mask) * slice_size;
+
+ WARN_ON(ddb->start >= ddb->end);
+ WARN_ON(ddb->end > INTEL_INFO(i915)->display.dbuf.size);
+}
+
+static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
+{
+ struct skl_ddb_entry ddb;
+
+ if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
+ slice_mask = BIT(DBUF_S1);
+ else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
+ slice_mask = BIT(DBUF_S3);
+
+ skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
+
+ return ddb.start;
+}
+
+u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+ const struct skl_ddb_entry *entry)
+{
+ int slice_size = intel_dbuf_slice_size(i915);
+ enum dbuf_slice start_slice, end_slice;
+ u8 slice_mask = 0;
+
+ if (!skl_ddb_entry_size(entry))
+ return 0;
+
+ start_slice = entry->start / slice_size;
+ end_slice = (entry->end - 1) / slice_size;
+
+ /*
+ * Per plane DDB entry can in a really worst case be on multiple slices
+ * but single entry is anyway contigious.
+ */
+ while (start_slice <= end_slice) {
+ slice_mask |= BIT(start_slice);
+ start_slice++;
+ }
+
+ return slice_mask;
+}
+
+static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int hdisplay, vdisplay;
+
+ if (!crtc_state->hw.active)
+ return 0;
+
+ /*
+ * Watermark/ddb requirement highly depends upon width of the
+ * framebuffer, So instead of allocating DDB equally among pipes
+ * distribute DDB based on resolution/width of the display.
+ */
+ drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
+
+ return hdisplay;
+}
+
+static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
+ enum pipe for_pipe,
+ unsigned int *weight_start,
+ unsigned int *weight_end,
+ unsigned int *weight_total)
+{
+ struct drm_i915_private *i915 =
+ to_i915(dbuf_state->base.state->base.dev);
+ enum pipe pipe;
+
+ *weight_start = 0;
+ *weight_end = 0;
+ *weight_total = 0;
+
+ for_each_pipe(i915, pipe) {
+ int weight = dbuf_state->weight[pipe];
+
+ /*
+ * Do not account pipes using other slice sets
+ * luckily as of current BSpec slice sets do not partially
+ * intersect(pipes share either same one slice or same slice set
+ * i.e no partial intersection), so it is enough to check for
+ * equality for now.
+ */
+ if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
+ continue;
+
+ *weight_total += weight;
+ if (pipe < for_pipe) {
+ *weight_start += weight;
+ *weight_end += weight;
+ } else if (pipe == for_pipe) {
+ *weight_end += weight;
+ }
+ }
+}
+
+static int
+skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ unsigned int weight_total, weight_start, weight_end;
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ struct intel_crtc_state *crtc_state;
+ struct skl_ddb_entry ddb_slices;
+ enum pipe pipe = crtc->pipe;
+ unsigned int mbus_offset = 0;
+ u32 ddb_range_size;
+ u32 dbuf_slice_mask;
+ u32 start, end;
+ int ret;
+
+ if (new_dbuf_state->weight[pipe] == 0) {
+ skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
+ goto out;
+ }
+
+ dbuf_slice_mask = new_dbuf_state->slices[pipe];
+
+ skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices);
+ mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask);
+ ddb_range_size = skl_ddb_entry_size(&ddb_slices);
+
+ intel_crtc_dbuf_weights(new_dbuf_state, pipe,
+ &weight_start, &weight_end, &weight_total);
+
+ start = ddb_range_size * weight_start / weight_total;
+ end = ddb_range_size * weight_end / weight_total;
+
+ skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
+ ddb_slices.start - mbus_offset + start,
+ ddb_slices.start - mbus_offset + end);
+
+out:
+ if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
+ skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
+ &new_dbuf_state->ddb[pipe]))
+ return 0;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ /*
+ * Used for checking overlaps, so we need absolute
+ * offsets instead of MBUS relative offsets.
+ */
+ crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
+ crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
+ old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
+ new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
+ old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
+
+ return 0;
+}
+
+static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+ int width, const struct drm_format_info *format,
+ u64 modifier, unsigned int rotation,
+ u32 plane_pixel_rate, struct skl_wm_params *wp,
+ int color_plane);
+
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ int level,
+ unsigned int latency,
+ const struct skl_wm_params *wp,
+ const struct skl_wm_level *result_prev,
+ struct skl_wm_level *result /* out */);
+
+static unsigned int
+skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
+ int num_active)
+{
+ struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ struct skl_wm_level wm = {};
+ int ret, min_ddb_alloc = 0;
+ struct skl_wm_params wp;
+
+ ret = skl_compute_wm_params(crtc_state, 256,
+ drm_format_info(DRM_FORMAT_ARGB8888),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_MODE_ROTATE_0,
+ crtc_state->pixel_rate, &wp, 0);
+ drm_WARN_ON(&i915->drm, ret);
+
+ for (level = 0; level <= max_level; level++) {
+ unsigned int latency = i915->display.wm.skl_latency[level];
+
+ skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
+ if (wm.min_ddb_alloc == U16_MAX)
+ break;
+
+ min_ddb_alloc = wm.min_ddb_alloc;
+ }
+
+ return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
+}
+
+static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
+{
+ skl_ddb_entry_init(entry,
+ REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
+ REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
+ if (entry->end)
+ entry->end++;
+}
+
+static void
+skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
+ const enum pipe pipe,
+ const enum plane_id plane_id,
+ struct skl_ddb_entry *ddb,
+ struct skl_ddb_entry *ddb_y)
+{
+ u32 val;
+
+ /* Cursor doesn't support NV12/planar, so no extra calculation needed */
+ if (plane_id == PLANE_CURSOR) {
+ val = intel_uncore_read(&i915->uncore, CUR_BUF_CFG(pipe));
+ skl_ddb_entry_init_from_hw(ddb, val);
+ return;
+ }
+
+ val = intel_uncore_read(&i915->uncore, PLANE_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(ddb, val);
+
+ if (DISPLAY_VER(i915) >= 11)
+ return;
+
+ val = intel_uncore_read(&i915->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(ddb_y, val);
+}
+
+static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb,
+ struct skl_ddb_entry *ddb_y)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
+ enum plane_id plane_id;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ wakeref = intel_display_power_get_if_enabled(i915, power_domain);
+ if (!wakeref)
+ return;
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ skl_ddb_get_hw_plane_state(i915, pipe,
+ plane_id,
+ &ddb[plane_id],
+ &ddb_y[plane_id]);
+
+ intel_display_power_put(i915, power_domain, wakeref);
+}
+
+struct dbuf_slice_conf_entry {
+ u8 active_pipes;
+ u8 dbuf_mask[I915_MAX_PIPES];
+ bool join_mbus;
+};
+
+/*
+ * Table taken from Bspec 12716
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {}
+};
+
+/*
+ * Table taken from Bspec 49255
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {}
+};
+
+static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {}
+};
+
+static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
+ /*
+ * Keep the join_mbus cases first so check_mbus_joined()
+ * will prefer them over the !join_mbus cases.
+ */
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = true,
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = true,
+ },
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ .join_mbus = false,
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = false,
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {}
+
+};
+
+static bool check_mbus_joined(u8 active_pipes,
+ const struct dbuf_slice_conf_entry *dbuf_slices)
+{
+ int i;
+
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes)
+ return dbuf_slices[i].join_mbus;
+ }
+ return false;
+}
+
+static bool adlp_check_mbus_joined(u8 active_pipes)
+{
+ return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
+}
+
+static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
+ const struct dbuf_slice_conf_entry *dbuf_slices)
+{
+ int i;
+
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes &&
+ dbuf_slices[i].join_mbus == join_mbus)
+ return dbuf_slices[i].dbuf_mask[pipe];
+ }
+ return 0;
+}
+
+/*
+ * This function finds an entry with same enabled pipe configuration and
+ * returns correspondent DBuf slice mask as stated in BSpec for particular
+ * platform.
+ */
+static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ /*
+ * FIXME: For ICL this is still a bit unclear as prev BSpec revision
+ * required calculating "pipe ratio" in order to determine
+ * if one or two slices can be used for single pipe configurations
+ * as additional constraint to the existing table.
+ * However based on recent info, it should be not "pipe ratio"
+ * but rather ratio between pixel_rate and cdclk with additional
+ * constants, so for now we are using only table until this is
+ * clarified. Also this is the reason why crtc_state param is
+ * still here - we will need it once those additional constraints
+ * pop up.
+ */
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ icl_allowed_dbufs);
+}
+
+static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ tgl_allowed_dbufs);
+}
+
+static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ adlp_allowed_dbufs);
+}
+
+static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ dg2_allowed_dbufs);
+}
+
+static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (IS_DG2(i915))
+ return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ else if (DISPLAY_VER(i915) >= 13)
+ return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ else if (DISPLAY_VER(i915) == 12)
+ return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ else if (DISPLAY_VER(i915) == 11)
+ return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ /*
+ * For anything else just return one slice yet.
+ * Should be extended for other platforms.
+ */
+ return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
+}
+
+static bool
+use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ return DISPLAY_VER(i915) >= 13 &&
+ crtc_state->uapi.async_flip &&
+ plane->async_flip;
+}
+
+static u64
+skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum plane_id plane_id;
+ u64 data_rate = 0;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ data_rate += crtc_state->rel_data_rate[plane_id];
+
+ if (DISPLAY_VER(i915) < 11)
+ data_rate += crtc_state->rel_data_rate_y[plane_id];
+ }
+
+ return data_rate;
+}
+
+static const struct skl_wm_level *
+skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
+ enum plane_id plane_id,
+ int level)
+{
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ if (level == 0 && pipe_wm->use_sagv_wm)
+ return &wm->sagv.wm0;
+
+ return &wm->wm[level];
+}
+
+static const struct skl_wm_level *
+skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
+ enum plane_id plane_id)
+{
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ if (pipe_wm->use_sagv_wm)
+ return &wm->sagv.trans_wm;
+
+ return &wm->trans_wm;
+}
+
+/*
+ * We only disable the watermarks for each plane if
+ * they exceed the ddb allocation of said plane. This
+ * is done so that we don't end up touching cursor
+ * watermarks needlessly when some other plane reduces
+ * our max possible watermark level.
+ *
+ * Bspec has this to say about the PLANE_WM enable bit:
+ * "All the watermarks at this level for all enabled
+ * planes must be enabled before the level will be used."
+ * So this is actually safe to do.
+ */
+static void
+skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
+{
+ if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
+ memset(wm, 0, sizeof(*wm));
+}
+
+static void
+skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
+ const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
+{
+ if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
+ uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
+ memset(wm, 0, sizeof(*wm));
+ memset(uv_wm, 0, sizeof(*uv_wm));
+ }
+}
+
+static bool icl_need_wm1_wa(struct drm_i915_private *i915,
+ enum plane_id plane_id)
+{
+ /*
+ * Wa_1408961008:icl, ehl
+ * Wa_14012656716:tgl, adl
+ * Underruns with WM1+ disabled
+ */
+ return DISPLAY_VER(i915) == 11 ||
+ (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
+}
+
+struct skl_plane_ddb_iter {
+ u64 data_rate;
+ u16 start, size;
+};
+
+static void
+skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
+ struct skl_ddb_entry *ddb,
+ const struct skl_wm_level *wm,
+ u64 data_rate)
+{
+ u16 size, extra = 0;
+
+ if (data_rate) {
+ extra = min_t(u16, iter->size,
+ DIV64_U64_ROUND_UP(iter->size * data_rate,
+ iter->data_rate));
+ iter->size -= extra;
+ iter->data_rate -= data_rate;
+ }
+
+ /*
+ * Keep ddb entry of all disabled planes explicitly zeroed
+ * to avoid skl_ddb_add_affected_planes() adding them to
+ * the state when other planes change their allocations.
+ */
+ size = wm->min_ddb_alloc + extra;
+ if (size)
+ iter->start = skl_ddb_entry_init(ddb, iter->start,
+ iter->start + size);
+}
+
+static int
+skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_dbuf_state *dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
+ int num_active = hweight8(dbuf_state->active_pipes);
+ struct skl_plane_ddb_iter iter;
+ enum plane_id plane_id;
+ u16 cursor_size;
+ u32 blocks;
+ int level;
+
+ /* Clear the partitioning for disabled planes. */
+ memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
+ memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
+
+ if (!crtc_state->hw.active)
+ return 0;
+
+ iter.start = alloc->start;
+ iter.size = skl_ddb_entry_size(alloc);
+ if (iter.size == 0)
+ return 0;
+
+ /* Allocate fixed number of blocks for cursor. */
+ cursor_size = skl_cursor_allocation(crtc_state, num_active);
+ iter.size -= cursor_size;
+ skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
+ alloc->end - cursor_size, alloc->end);
+
+ iter.data_rate = skl_total_relative_data_rate(crtc_state);
+
+ /*
+ * Find the highest watermark level for which we can satisfy the block
+ * requirement of active planes.
+ */
+ for (level = ilk_wm_max_level(i915); level >= 0; level--) {
+ blocks = 0;
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (plane_id == PLANE_CURSOR) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+
+ if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
+ drm_WARN_ON(&i915->drm,
+ wm->wm[level].min_ddb_alloc != U16_MAX);
+ blocks = U32_MAX;
+ break;
+ }
+ continue;
+ }
+
+ blocks += wm->wm[level].min_ddb_alloc;
+ blocks += wm->uv_wm[level].min_ddb_alloc;
+ }
+
+ if (blocks <= iter.size) {
+ iter.size -= blocks;
+ break;
+ }
+ }
+
+ if (level < 0) {
+ drm_dbg_kms(&i915->drm,
+ "Requested display configuration exceeds system DDB limitations");
+ drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",
+ blocks, iter.size);
+ return -EINVAL;
+ }
+
+ /* avoid the WARN later when we don't allocate any extra DDB */
+ if (iter.data_rate == 0)
+ iter.size = 0;
+
+ /*
+ * Grant each plane the blocks it requires at the highest achievable
+ * watermark level, plus an extra share of the leftover blocks
+ * proportional to its relative data rate.
+ */
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ if (DISPLAY_VER(i915) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id)) {
+ skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
+ crtc_state->rel_data_rate_y[plane_id]);
+ skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
+ crtc_state->rel_data_rate[plane_id]);
+ } else {
+ skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
+ crtc_state->rel_data_rate[plane_id]);
+ }
+ }
+ drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
+
+ /*
+ * When we calculated watermark values we didn't know how high
+ * of a level we'd actually be able to hit, so we just marked
+ * all levels as "enabled." Go back now and disable the ones
+ * that aren't actually possible.
+ */
+ for (level++; level <= ilk_wm_max_level(i915); level++) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (DISPLAY_VER(i915) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id))
+ skl_check_nv12_wm_level(&wm->wm[level],
+ &wm->uv_wm[level],
+ ddb_y, ddb);
+ else
+ skl_check_wm_level(&wm->wm[level], ddb);
+
+ if (icl_need_wm1_wa(i915, plane_id) &&
+ level == 1 && !wm->wm[level].enable &&
+ wm->wm[0].enable) {
+ wm->wm[level].blocks = wm->wm[0].blocks;
+ wm->wm[level].lines = wm->wm[0].lines;
+ wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
+ }
+ }
+ }
+
+ /*
+ * Go back and disable the transition and SAGV watermarks
+ * if it turns out we don't have enough DDB blocks for them.
+ */
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (DISPLAY_VER(i915) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id)) {
+ skl_check_wm_level(&wm->trans_wm, ddb_y);
+ } else {
+ WARN_ON(skl_ddb_entry_size(ddb_y));
+
+ skl_check_wm_level(&wm->trans_wm, ddb);
+ }
+
+ skl_check_wm_level(&wm->sagv.wm0, ddb);
+ skl_check_wm_level(&wm->sagv.trans_wm, ddb);
+ }
+
+ return 0;
+}
+
+/*
+ * The max latency should be 257 (max the punit can code is 255 and we add 2us
+ * for the read latency) and cpp should always be <= 8, so that
+ * should allow pixel_rate up to ~2 GHz which seems sufficient since max
+ * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
+ */
+static uint_fixed_16_16_t
+skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
+ u8 cpp, u32 latency, u32 dbuf_block_size)
+{
+ u32 wm_intermediate_val;
+ uint_fixed_16_16_t ret;
+
+ if (latency == 0)
+ return FP_16_16_MAX;
+
+ wm_intermediate_val = latency * pixel_rate * cpp;
+ ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
+
+ if (DISPLAY_VER(i915) >= 10)
+ ret = add_fixed16_u32(ret, 1);
+
+ return ret;
+}
+
+static uint_fixed_16_16_t
+skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
+ uint_fixed_16_16_t plane_blocks_per_line)
+{
+ u32 wm_intermediate_val;
+ uint_fixed_16_16_t ret;
+
+ if (latency == 0)
+ return FP_16_16_MAX;
+
+ wm_intermediate_val = latency * pixel_rate;
+ wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
+ pipe_htotal * 1000);
+ ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
+ return ret;
+}
+
+static uint_fixed_16_16_t
+intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ u32 pixel_rate;
+ u32 crtc_htotal;
+ uint_fixed_16_16_t linetime_us;
+
+ if (!crtc_state->hw.active)
+ return u32_to_fixed16(0);
+
+ pixel_rate = crtc_state->pixel_rate;
+
+ if (drm_WARN_ON(&i915->drm, pixel_rate == 0))
+ return u32_to_fixed16(0);
+
+ crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
+ linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
+
+ return linetime_us;
+}
+
+static int
+skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+ int width, const struct drm_format_info *format,
+ u64 modifier, unsigned int rotation,
+ u32 plane_pixel_rate, struct skl_wm_params *wp,
+ int color_plane)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ u32 interm_pbpl;
+
+ /* only planar format has two planes */
+ if (color_plane == 1 &&
+ !intel_format_info_is_yuv_semiplanar(format, modifier)) {
+ drm_dbg_kms(&i915->drm,
+ "Non planar format have single plane\n");
+ return -EINVAL;
+ }
+
+ wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
+ modifier == I915_FORMAT_MOD_4_TILED ||
+ modifier == I915_FORMAT_MOD_Yf_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC;
+ wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
+ wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC;
+ wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
+
+ wp->width = width;
+ if (color_plane == 1 && wp->is_planar)
+ wp->width /= 2;
+
+ wp->cpp = format->cpp[color_plane];
+ wp->plane_pixel_rate = plane_pixel_rate;
+
+ if (DISPLAY_VER(i915) >= 11 &&
+ modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
+ wp->dbuf_block_size = 256;
+ else
+ wp->dbuf_block_size = 512;
+
+ if (drm_rotation_90_or_270(rotation)) {
+ switch (wp->cpp) {
+ case 1:
+ wp->y_min_scanlines = 16;
+ break;
+ case 2:
+ wp->y_min_scanlines = 8;
+ break;
+ case 4:
+ wp->y_min_scanlines = 4;
+ break;
+ default:
+ MISSING_CASE(wp->cpp);
+ return -EINVAL;
+ }
+ } else {
+ wp->y_min_scanlines = 4;
+ }
+
+ if (skl_needs_memory_bw_wa(i915))
+ wp->y_min_scanlines *= 2;
+
+ wp->plane_bytes_per_line = wp->width * wp->cpp;
+ if (wp->y_tiled) {
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
+ wp->y_min_scanlines,
+ wp->dbuf_block_size);
+
+ if (DISPLAY_VER(i915) >= 10)
+ interm_pbpl++;
+
+ wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
+ wp->y_min_scanlines);
+ } else {
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
+ wp->dbuf_block_size);
+
+ if (!wp->x_tiled || DISPLAY_VER(i915) >= 10)
+ interm_pbpl++;
+
+ wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
+ }
+
+ wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
+ wp->plane_blocks_per_line);
+
+ wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state));
+
+ return 0;
+}
+
+static int
+skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ struct skl_wm_params *wp, int color_plane)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int width;
+
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ return skl_compute_wm_params(crtc_state, width,
+ fb->format, fb->modifier,
+ plane_state->hw.rotation,
+ intel_plane_pixel_rate(crtc_state, plane_state),
+ wp, color_plane);
+}
+
+static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
+{
+ if (DISPLAY_VER(i915) >= 10)
+ return true;
+
+ /* The number of lines are ignored for the level 0 watermark. */
+ return level > 0;
+}
+
+static int skl_wm_max_lines(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 13)
+ return 255;
+ else
+ return 31;
+}
+
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ int level,
+ unsigned int latency,
+ const struct skl_wm_params *wp,
+ const struct skl_wm_level *result_prev,
+ struct skl_wm_level *result /* out */)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ uint_fixed_16_16_t method1, method2;
+ uint_fixed_16_16_t selected_result;
+ u32 blocks, lines, min_ddb_alloc = 0;
+
+ if (latency == 0 ||
+ (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
+ /* reject it */
+ result->min_ddb_alloc = U16_MAX;
+ return;
+ }
+
+ /*
+ * WaIncreaseLatencyIPCEnabled: kbl,cfl
+ * Display WA #1141: kbl,cfl
+ */
+ if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
+ skl_watermark_ipc_enabled(i915))
+ latency += 4;
+
+ if (skl_needs_memory_bw_wa(i915) && wp->x_tiled)
+ latency += 15;
+
+ method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
+ wp->cpp, latency, wp->dbuf_block_size);
+ method2 = skl_wm_method2(wp->plane_pixel_rate,
+ crtc_state->hw.pipe_mode.crtc_htotal,
+ latency,
+ wp->plane_blocks_per_line);
+
+ if (wp->y_tiled) {
+ selected_result = max_fixed16(method2, wp->y_tile_minimum);
+ } else {
+ if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
+ wp->dbuf_block_size < 1) &&
+ (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
+ selected_result = method2;
+ } else if (latency >= wp->linetime_us) {
+ if (DISPLAY_VER(i915) == 9)
+ selected_result = min_fixed16(method1, method2);
+ else
+ selected_result = method2;
+ } else {
+ selected_result = method1;
+ }
+ }
+
+ blocks = fixed16_to_u32_round_up(selected_result) + 1;
+ /*
+ * Lets have blocks at minimum equivalent to plane_blocks_per_line
+ * as there will be at minimum one line for lines configuration. This
+ * is a work around for FIFO underruns observed with resolutions like
+ * 4k 60 Hz in single channel DRAM configurations.
+ *
+ * As per the Bspec 49325, if the ddb allocation can hold at least
+ * one plane_blocks_per_line, we should have selected method2 in
+ * the above logic. Assuming that modern versions have enough dbuf
+ * and method2 guarantees blocks equivalent to at least 1 line,
+ * select the blocks as plane_blocks_per_line.
+ *
+ * TODO: Revisit the logic when we have better understanding on DRAM
+ * channels' impact on the level 0 memory latency and the relevant
+ * wm calculations.
+ */
+ if (skl_wm_has_lines(i915, level))
+ blocks = max(blocks,
+ fixed16_to_u32_round_up(wp->plane_blocks_per_line));
+ lines = div_round_up_fixed16(selected_result,
+ wp->plane_blocks_per_line);
+
+ if (DISPLAY_VER(i915) == 9) {
+ /* Display WA #1125: skl,bxt,kbl */
+ if (level == 0 && wp->rc_surface)
+ blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
+
+ /* Display WA #1126: skl,bxt,kbl */
+ if (level >= 1 && level <= 7) {
+ if (wp->y_tiled) {
+ blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
+ lines += wp->y_min_scanlines;
+ } else {
+ blocks++;
+ }
+
+ /*
+ * Make sure result blocks for higher latency levels are
+ * at least as high as level below the current level.
+ * Assumption in DDB algorithm optimization for special
+ * cases. Also covers Display WA #1125 for RC.
+ */
+ if (result_prev->blocks > blocks)
+ blocks = result_prev->blocks;
+ }
+ }
+
+ if (DISPLAY_VER(i915) >= 11) {
+ if (wp->y_tiled) {
+ int extra_lines;
+
+ if (lines % wp->y_min_scanlines == 0)
+ extra_lines = wp->y_min_scanlines;
+ else
+ extra_lines = wp->y_min_scanlines * 2 -
+ lines % wp->y_min_scanlines;
+
+ min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
+ wp->plane_blocks_per_line);
+ } else {
+ min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
+ }
+ }
+
+ if (!skl_wm_has_lines(i915, level))
+ lines = 0;
+
+ if (lines > skl_wm_max_lines(i915)) {
+ /* reject it */
+ result->min_ddb_alloc = U16_MAX;
+ return;
+ }
+
+ /*
+ * If lines is valid, assume we can use this watermark level
+ * for now. We'll come back and disable it after we calculate the
+ * DDB allocation if it turns out we don't actually have enough
+ * blocks to satisfy it.
+ */
+ result->blocks = blocks;
+ result->lines = lines;
+ /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
+ result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
+ result->enable = true;
+
+ if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
+ result->can_sagv = latency >= i915->display.sagv.block_time_us;
+}
+
+static void
+skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ const struct skl_wm_params *wm_params,
+ struct skl_wm_level *levels)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ struct skl_wm_level *result_prev = &levels[0];
+
+ for (level = 0; level <= max_level; level++) {
+ struct skl_wm_level *result = &levels[level];
+ unsigned int latency = i915->display.wm.skl_latency[level];
+
+ skl_compute_plane_wm(crtc_state, plane, level, latency,
+ wm_params, result_prev, result);
+
+ result_prev = result;
+ }
+}
+
+static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ const struct skl_wm_params *wm_params,
+ struct skl_plane_wm *plane_wm)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
+ struct skl_wm_level *levels = plane_wm->wm;
+ unsigned int latency = 0;
+
+ if (i915->display.sagv.block_time_us)
+ latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0];
+
+ skl_compute_plane_wm(crtc_state, plane, 0, latency,
+ wm_params, &levels[0],
+ sagv_wm);
+}
+
+static void skl_compute_transition_wm(struct drm_i915_private *i915,
+ struct skl_wm_level *trans_wm,
+ const struct skl_wm_level *wm0,
+ const struct skl_wm_params *wp)
+{
+ u16 trans_min, trans_amount, trans_y_tile_min;
+ u16 wm0_blocks, trans_offset, blocks;
+
+ /* Transition WM don't make any sense if ipc is disabled */
+ if (!skl_watermark_ipc_enabled(i915))
+ return;
+
+ /*
+ * WaDisableTWM:skl,kbl,cfl,bxt
+ * Transition WM are not recommended by HW team for GEN9
+ */
+ if (DISPLAY_VER(i915) == 9)
+ return;
+
+ if (DISPLAY_VER(i915) >= 11)
+ trans_min = 4;
+ else
+ trans_min = 14;
+
+ /* Display WA #1140: glk,cnl */
+ if (DISPLAY_VER(i915) == 10)
+ trans_amount = 0;
+ else
+ trans_amount = 10; /* This is configurable amount */
+
+ trans_offset = trans_min + trans_amount;
+
+ /*
+ * The spec asks for Selected Result Blocks for wm0 (the real value),
+ * not Result Blocks (the integer value). Pay attention to the capital
+ * letters. The value wm_l0->blocks is actually Result Blocks, but
+ * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
+ * and since we later will have to get the ceiling of the sum in the
+ * transition watermarks calculation, we can just pretend Selected
+ * Result Blocks is Result Blocks minus 1 and it should work for the
+ * current platforms.
+ */
+ wm0_blocks = wm0->blocks - 1;
+
+ if (wp->y_tiled) {
+ trans_y_tile_min =
+ (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
+ blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
+ } else {
+ blocks = wm0_blocks + trans_offset;
+ }
+ blocks++;
+
+ /*
+ * Just assume we can enable the transition watermark. After
+ * computing the DDB we'll come back and disable it if that
+ * assumption turns out to be false.
+ */
+ trans_wm->blocks = blocks;
+ trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
+ trans_wm->enable = true;
+}
+
+static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ struct intel_plane *plane, int color_plane)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
+ struct skl_wm_params wm_params;
+ int ret;
+
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, color_plane);
+ if (ret)
+ return ret;
+
+ skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
+
+ skl_compute_transition_wm(i915, &wm->trans_wm,
+ &wm->wm[0], &wm_params);
+
+ if (DISPLAY_VER(i915) >= 12) {
+ tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
+
+ skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
+ &wm->sagv.wm0, &wm_params);
+ }
+
+ return 0;
+}
+
+static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ struct intel_plane *plane)
+{
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
+ struct skl_wm_params wm_params;
+ int ret;
+
+ wm->is_planar = true;
+
+ /* uv plane watermarks must also be validated for NV12/Planar */
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, 1);
+ if (ret)
+ return ret;
+
+ skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
+
+ return 0;
+}
+
+static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ enum plane_id plane_id = plane->id;
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int ret;
+
+ memset(wm, 0, sizeof(*wm));
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane, 0);
+ if (ret)
+ return ret;
+
+ if (fb->format->is_yuv && fb->format->num_planes > 1) {
+ ret = skl_build_plane_wm_uv(crtc_state, plane_state,
+ plane);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
+ int ret;
+
+ /* Watermarks calculated in master */
+ if (plane_state->planar_slave)
+ return 0;
+
+ memset(wm, 0, sizeof(*wm));
+
+ if (plane_state->planar_linked_plane) {
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ drm_WARN_ON(&i915->drm,
+ !intel_wm_plane_visible(crtc_state, plane_state));
+ drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||
+ fb->format->num_planes == 1);
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_state->planar_linked_plane, 0);
+ if (ret)
+ return ret;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane, 1);
+ if (ret)
+ return ret;
+ } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int skl_build_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int ret, i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ /*
+ * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc
+ * instead but we don't populate that correctly for NV12 Y
+ * planes so for now hack this.
+ */
+ if (plane->pipe != crtc->pipe)
+ continue;
+
+ if (DISPLAY_VER(i915) >= 11)
+ ret = icl_build_plane_wm(crtc_state, plane_state);
+ else
+ ret = skl_build_plane_wm(crtc_state, plane_state);
+ if (ret)
+ return ret;
+ }
+
+ crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
+
+ return 0;
+}
+
+static void skl_ddb_entry_write(struct drm_i915_private *i915,
+ i915_reg_t reg,
+ const struct skl_ddb_entry *entry)
+{
+ if (entry->end)
+ intel_de_write_fw(i915, reg,
+ PLANE_BUF_END(entry->end - 1) |
+ PLANE_BUF_START(entry->start));
+ else
+ intel_de_write_fw(i915, reg, 0);
+}
+
+static void skl_write_wm_level(struct drm_i915_private *i915,
+ i915_reg_t reg,
+ const struct skl_wm_level *level)
+{
+ u32 val = 0;
+
+ if (level->enable)
+ val |= PLANE_WM_EN;
+ if (level->ignore_lines)
+ val |= PLANE_WM_IGNORE_LINES;
+ val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
+ val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
+
+ intel_de_write_fw(i915, reg, val);
+}
+
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+
+ for (level = 0; level <= max_level; level++)
+ skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level),
+ skl_plane_wm_level(pipe_wm, plane_id, level));
+
+ skl_write_wm_level(i915, PLANE_WM_TRANS(pipe, plane_id),
+ skl_plane_trans_wm(pipe_wm, plane_id));
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ skl_write_wm_level(i915, PLANE_WM_SAGV(pipe, plane_id),
+ &wm->sagv.wm0);
+ skl_write_wm_level(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id),
+ &wm->sagv.trans_wm);
+ }
+
+ skl_ddb_entry_write(i915,
+ PLANE_BUF_CFG(pipe, plane_id), ddb);
+
+ if (DISPLAY_VER(i915) < 11)
+ skl_ddb_entry_write(i915,
+ PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
+}
+
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+
+ for (level = 0; level <= max_level; level++)
+ skl_write_wm_level(i915, CUR_WM(pipe, level),
+ skl_plane_wm_level(pipe_wm, plane_id, level));
+
+ skl_write_wm_level(i915, CUR_WM_TRANS(pipe),
+ skl_plane_trans_wm(pipe_wm, plane_id));
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ skl_write_wm_level(i915, CUR_WM_SAGV(pipe),
+ &wm->sagv.wm0);
+ skl_write_wm_level(i915, CUR_WM_SAGV_TRANS(pipe),
+ &wm->sagv.trans_wm);
+ }
+
+ skl_ddb_entry_write(i915, CUR_BUF_CFG(pipe), ddb);
+}
+
+static bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2)
+{
+ return l1->enable == l2->enable &&
+ l1->ignore_lines == l2->ignore_lines &&
+ l1->lines == l2->lines &&
+ l1->blocks == l2->blocks;
+}
+
+static bool skl_plane_wm_equals(struct drm_i915_private *i915,
+ const struct skl_plane_wm *wm1,
+ const struct skl_plane_wm *wm2)
+{
+ int level, max_level = ilk_wm_max_level(i915);
+
+ for (level = 0; level <= max_level; level++) {
+ /*
+ * We don't check uv_wm as the hardware doesn't actually
+ * use it. It only gets used for calculating the required
+ * ddb allocation.
+ */
+ if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
+ return false;
+ }
+
+ return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
+ skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
+ skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
+}
+
+static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
+{
+ return a->start < b->end && b->start < a->end;
+}
+
+static void skl_ddb_entry_union(struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
+{
+ if (a->end && b->end) {
+ a->start = min(a->start, b->start);
+ a->end = max(a->end, b->end);
+ } else if (b->end) {
+ a->start = b->start;
+ a->end = b->end;
+ }
+}
+
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry *entries,
+ int num_entries, int ignore_idx)
+{
+ int i;
+
+ for (i = 0; i < num_entries; i++) {
+ if (i != ignore_idx &&
+ skl_ddb_entries_overlap(ddb, &entries[i]))
+ return true;
+ }
+
+ return false;
+}
+
+static int
+skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
+
+ if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
+ skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
+ }
+
+ return 0;
+}
+
+static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
+{
+ struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev);
+ u8 enabled_slices;
+ enum pipe pipe;
+
+ /*
+ * FIXME: For now we always enable slice S1 as per
+ * the Bspec display initialization sequence.
+ */
+ enabled_slices = BIT(DBUF_S1);
+
+ for_each_pipe(i915, pipe)
+ enabled_slices |= dbuf_state->slices[pipe];
+
+ return enabled_slices;
+}
+
+static int
+skl_compute_ddb(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *old_dbuf_state;
+ struct intel_dbuf_state *new_dbuf_state = NULL;
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int ret, i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ new_dbuf_state = intel_atomic_get_dbuf_state(state);
+ if (IS_ERR(new_dbuf_state))
+ return PTR_ERR(new_dbuf_state);
+
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ break;
+ }
+
+ if (!new_dbuf_state)
+ return 0;
+
+ new_dbuf_state->active_pipes =
+ intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ if (HAS_MBUS_JOINING(i915))
+ new_dbuf_state->joined_mbus =
+ adlp_check_mbus_joined(new_dbuf_state->active_pipes);
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->slices[pipe] =
+ skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
+ new_dbuf_state->joined_mbus);
+
+ if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
+
+ if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
+ old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
+ ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+
+ if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
+ /* TODO: Implement vblank synchronized MBUS joining changes */
+ ret = intel_modeset_all_pipes(state);
+ if (ret)
+ return ret;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
+ old_dbuf_state->enabled_slices,
+ new_dbuf_state->enabled_slices,
+ INTEL_INFO(i915)->display.dbuf.slice_mask,
+ str_yes_no(old_dbuf_state->joined_mbus),
+ str_yes_no(new_dbuf_state->joined_mbus));
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
+
+ if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ ret = skl_crtc_allocate_ddb(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_crtc_allocate_plane_ddb(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = skl_ddb_add_affected_planes(old_crtc_state,
+ new_crtc_state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static char enast(bool enable)
+{
+ return enable ? '*' : ' ';
+}
+
+static void
+skl_print_wm_changes(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+ int i;
+
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
+
+ old_pipe_wm = &old_crtc_state->wm.skl.optimal;
+ new_pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
+ const struct skl_ddb_entry *old, *new;
+
+ old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
+ new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
+
+ if (skl_ddb_entry_equal(old, new))
+ continue;
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
+ plane->base.base.id, plane->base.name,
+ old->start, old->end, new->start, new->end,
+ skl_ddb_entry_size(old), skl_ddb_entry_size(new));
+ }
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
+ const struct skl_plane_wm *old_wm, *new_wm;
+
+ old_wm = &old_pipe_wm->planes[plane_id];
+ new_wm = &new_pipe_wm->planes[plane_id];
+
+ if (skl_plane_wm_equals(i915, old_wm, new_wm))
+ continue;
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
+ enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
+ enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
+ enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
+ enast(old_wm->trans_wm.enable),
+ enast(old_wm->sagv.wm0.enable),
+ enast(old_wm->sagv.trans_wm.enable),
+ enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
+ enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
+ enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
+ enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
+ enast(new_wm->trans_wm.enable),
+ enast(new_wm->sagv.wm0.enable),
+ enast(new_wm->sagv.trans_wm.enable));
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
+ " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
+ enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
+ enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
+ enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
+ enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
+ enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
+ enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
+ enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
+ enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
+ enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
+ enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
+ enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
+ enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
+ enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
+ enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
+ enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
+ enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
+ enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
+ enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
+ enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
+ enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].blocks, old_wm->wm[1].blocks,
+ old_wm->wm[2].blocks, old_wm->wm[3].blocks,
+ old_wm->wm[4].blocks, old_wm->wm[5].blocks,
+ old_wm->wm[6].blocks, old_wm->wm[7].blocks,
+ old_wm->trans_wm.blocks,
+ old_wm->sagv.wm0.blocks,
+ old_wm->sagv.trans_wm.blocks,
+ new_wm->wm[0].blocks, new_wm->wm[1].blocks,
+ new_wm->wm[2].blocks, new_wm->wm[3].blocks,
+ new_wm->wm[4].blocks, new_wm->wm[5].blocks,
+ new_wm->wm[6].blocks, new_wm->wm[7].blocks,
+ new_wm->trans_wm.blocks,
+ new_wm->sagv.wm0.blocks,
+ new_wm->sagv.trans_wm.blocks);
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
+ old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
+ old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
+ old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
+ old_wm->trans_wm.min_ddb_alloc,
+ old_wm->sagv.wm0.min_ddb_alloc,
+ old_wm->sagv.trans_wm.min_ddb_alloc,
+ new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
+ new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
+ new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
+ new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
+ new_wm->trans_wm.min_ddb_alloc,
+ new_wm->sagv.wm0.min_ddb_alloc,
+ new_wm->sagv.trans_wm.min_ddb_alloc);
+ }
+ }
+}
+
+static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
+ const struct skl_pipe_wm *old_pipe_wm,
+ const struct skl_pipe_wm *new_pipe_wm)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ int level, max_level = ilk_wm_max_level(i915);
+
+ for (level = 0; level <= max_level; level++) {
+ /*
+ * We don't check uv_wm as the hardware doesn't actually
+ * use it. It only gets used for calculating the required
+ * ddb allocation.
+ */
+ if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
+ skl_plane_wm_level(new_pipe_wm, plane->id, level)))
+ return false;
+ }
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
+ const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
+
+ if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
+ !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
+ return false;
+ }
+
+ return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
+ skl_plane_trans_wm(new_pipe_wm, plane->id));
+}
+
+/*
+ * To make sure the cursor watermark registers are always consistent
+ * with our computed state the following scenario needs special
+ * treatment:
+ *
+ * 1. enable cursor
+ * 2. move cursor entirely offscreen
+ * 3. disable cursor
+ *
+ * Step 2. does call .disable_plane() but does not zero the watermarks
+ * (since we consider an offscreen cursor still active for the purposes
+ * of watermarks). Step 3. would not normally call .disable_plane()
+ * because the actual plane visibility isn't changing, and we don't
+ * deallocate the cursor ddb until the pipe gets disabled. So we must
+ * force step 3. to call .disable_plane() to update the watermark
+ * registers properly.
+ *
+ * Other planes do not suffer from this issues as their watermarks are
+ * calculated based on the actual plane visibility. The only time this
+ * can trigger for the other planes is during the initial readout as the
+ * default value of the watermarks registers is not zero.
+ */
+static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
+
+ /*
+ * Force a full wm update for every plane on modeset.
+ * Required because the reset value of the wm registers
+ * is non-zero, whereas we want all disabled planes to
+ * have zero watermarks. So if we turn off the relevant
+ * power well the hardware state will go out of sync
+ * with the software state.
+ */
+ if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
+ skl_plane_selected_wm_equals(plane,
+ &old_crtc_state->wm.skl.optimal,
+ &new_crtc_state->wm.skl.optimal))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
+ }
+
+ return 0;
+}
+
+static int
+skl_compute_wm(struct intel_atomic_state *state)
+{
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *new_crtc_state;
+ int ret, i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ ret = skl_build_pipe_wm(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ ret = skl_compute_ddb(state);
+ if (ret)
+ return ret;
+
+ ret = intel_compute_sagv_mask(state);
+ if (ret)
+ return ret;
+
+ /*
+ * skl_compute_ddb() will have adjusted the final watermarks
+ * based on how much ddb is available. Now we can actually
+ * check if the final watermarks changed.
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ ret = skl_wm_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ skl_print_wm_changes(state);
+
+ return 0;
+}
+
+static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
+{
+ level->enable = val & PLANE_WM_EN;
+ level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
+ level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
+ level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
+}
+
+static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
+ struct skl_pipe_wm *out)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ int level, max_level;
+ enum plane_id plane_id;
+ u32 val;
+
+ max_level = ilk_wm_max_level(i915);
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_plane_wm *wm = &out->planes[plane_id];
+
+ for (level = 0; level <= max_level; level++) {
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore, PLANE_WM(pipe, plane_id, level));
+ else
+ val = intel_uncore_read(&i915->uncore, CUR_WM(pipe, level));
+
+ skl_wm_level_from_reg_val(val, &wm->wm[level]);
+ }
+
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore, PLANE_WM_TRANS(pipe, plane_id));
+ else
+ val = intel_uncore_read(&i915->uncore, CUR_WM_TRANS(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->trans_wm);
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore,
+ PLANE_WM_SAGV(pipe, plane_id));
+ else
+ val = intel_uncore_read(&i915->uncore,
+ CUR_WM_SAGV(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
+
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore,
+ PLANE_WM_SAGV_TRANS(pipe, plane_id));
+ else
+ val = intel_uncore_read(&i915->uncore,
+ CUR_WM_SAGV_TRANS(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ wm->sagv.wm0 = wm->wm[0];
+ wm->sagv.trans_wm = wm->trans_wm;
+ }
+ }
+}
+
+void skl_wm_get_hw_state(struct drm_i915_private *i915)
+{
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ struct intel_crtc *crtc;
+
+ if (HAS_MBUS_JOINING(i915))
+ dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ unsigned int mbus_offset;
+ enum plane_id plane_id;
+ u8 slices;
+
+ memset(&crtc_state->wm.skl.optimal, 0,
+ sizeof(crtc_state->wm.skl.optimal));
+ if (crtc_state->hw.active)
+ skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
+ crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
+
+ memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ skl_ddb_get_hw_plane_state(i915, crtc->pipe,
+ plane_id, ddb, ddb_y);
+
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
+ }
+
+ dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
+
+ /*
+ * Used for checking overlaps, so we need absolute
+ * offsets instead of MBUS relative offsets.
+ */
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ mbus_offset = mbus_ddb_offset(i915, slices);
+ crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
+ crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
+
+ /* The slices actually used by the planes on the pipe */
+ dbuf_state->slices[pipe] =
+ skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
+ crtc->base.base.id, crtc->base.name,
+ dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
+ dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
+ str_yes_no(dbuf_state->joined_mbus));
+ }
+
+ dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices;
+}
+
+static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+{
+ const struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ entries[crtc->pipe] = crtc_state->wm.skl.ddb;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ u8 slices;
+
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ if (dbuf_state->slices[crtc->pipe] & ~slices)
+ return true;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
+ I915_MAX_PIPES, crtc->pipe))
+ return true;
+ }
+
+ return false;
+}
+
+void skl_wm_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_crtc *crtc;
+
+ /*
+ * On TGL/RKL (at least) the BIOS likes to assign the planes
+ * to the wrong DBUF slices. This will cause an infinite loop
+ * in skl_commit_modeset_enables() as it can't find a way to
+ * transition between the old bogus DBUF layout to the new
+ * proper DBUF layout without DBUF allocation overlaps between
+ * the planes (which cannot be allowed or else the hardware
+ * may hang). If we detect a bogus DBUF layout just turn off
+ * all the planes so that skl_commit_modeset_enables() can
+ * simply ignore them.
+ */
+ if (!skl_dbuf_is_misconfigured(i915))
+ return;
+
+ drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
+
+ drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+
+ memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
+ }
+}
+
+void intel_wm_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct skl_hw_state {
+ struct skl_ddb_entry ddb[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+ struct skl_pipe_wm wm;
+ } *hw;
+ const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
+ int level, max_level = ilk_wm_max_level(i915);
+ struct intel_plane *plane;
+ u8 hw_enabled_slices;
+
+ if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
+ return;
+
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return;
+
+ skl_pipe_wm_get_hw_state(crtc, &hw->wm);
+
+ skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
+
+ hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
+
+ if (DISPLAY_VER(i915) >= 11 &&
+ hw_enabled_slices != i915->display.dbuf.enabled_slices)
+ drm_err(&i915->drm,
+ "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
+ i915->display.dbuf.enabled_slices,
+ hw_enabled_slices);
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ const struct skl_wm_level *hw_wm_level, *sw_wm_level;
+
+ /* Watermarks */
+ for (level = 0; level <= max_level; level++) {
+ hw_wm_level = &hw->wm.planes[plane->id].wm[level];
+ sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
+
+ if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
+ continue;
+
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name, level,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
+ sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
+
+ if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
+
+ if (HAS_HW_SAGV_WM(i915) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
+
+ if (HAS_HW_SAGV_WM(i915) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
+
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
+ plane->base.base.id, plane->base.name,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
+ }
+ }
+
+ kfree(hw);
+}
+
+bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
+{
+ return i915->display.wm.ipc_enabled;
+}
+
+void skl_watermark_ipc_update(struct drm_i915_private *i915)
+{
+ if (!HAS_IPC(i915))
+ return;
+
+ intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL2, DISP_IPC_ENABLE,
+ skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
+}
+
+static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
+{
+ /* Display WA #0477 WaDisableIPC: skl */
+ if (IS_SKYLAKE(i915))
+ return false;
+
+ /* Display WA #1141: SKL:all KBL:all CFL */
+ if (IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915))
+ return i915->dram_info.symmetric_memory;
+
+ return true;
+}
+
+void skl_watermark_ipc_init(struct drm_i915_private *i915)
+{
+ if (!HAS_IPC(i915))
+ return;
+
+ i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
+
+ skl_watermark_ipc_update(i915);
+}
+
+static void
+adjust_wm_latency(struct drm_i915_private *i915,
+ u16 wm[], int max_level, int read_latency)
+{
+ bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
+ int i, level;
+
+ /*
+ * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
+ * need to be disabled. We make sure to sanitize the values out
+ * of the punit to satisfy this requirement.
+ */
+ for (level = 1; level <= max_level; level++) {
+ if (wm[level] == 0) {
+ for (i = level + 1; i <= max_level; i++)
+ wm[i] = 0;
+
+ max_level = level - 1;
+ break;
+ }
+ }
+
+ /*
+ * WaWmMemoryReadLatency
+ *
+ * punit doesn't take into account the read latency so we need
+ * to add proper adjustement to each valid level we retrieve
+ * from the punit when level 0 response data is 0us.
+ */
+ if (wm[0] == 0) {
+ for (level = 0; level <= max_level; level++)
+ wm[level] += read_latency;
+ }
+
+ /*
+ * WA Level-0 adjustment for 16GB DIMMs: SKL+
+ * If we could not get dimm info enable this WA to prevent from
+ * any underrun. If not able to get Dimm info assume 16GB dimm
+ * to avoid any underrun.
+ */
+ if (wm_lv_0_adjust_needed)
+ wm[0] += 1;
+}
+
+static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ int max_level = ilk_wm_max_level(i915);
+ u32 val;
+
+ val = intel_uncore_read(uncore, MTL_LATENCY_LP0_LP1);
+ wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
+ wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
+
+ val = intel_uncore_read(uncore, MTL_LATENCY_LP2_LP3);
+ wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
+ wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
+
+ val = intel_uncore_read(uncore, MTL_LATENCY_LP4_LP5);
+ wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
+ wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
+
+ adjust_wm_latency(i915, wm, max_level, 6);
+}
+
+static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ int max_level = ilk_wm_max_level(i915);
+ int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
+ int mult = IS_DG2(i915) ? 2 : 1;
+ u32 val;
+ int ret;
+
+ /* read the first set of memory latencies[0:3] */
+ val = 0; /* data0 to be programmed to 0 for first set */
+ ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ if (ret) {
+ drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ return;
+ }
+
+ wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
+ wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
+ wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
+ wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
+
+ /* read the second set of memory latencies[4:7] */
+ val = 1; /* data0 to be programmed to 1 for second set */
+ ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ if (ret) {
+ drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ return;
+ }
+
+ wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
+ wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
+ wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
+ wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
+
+ adjust_wm_latency(i915, wm, max_level, read_latency);
+}
+
+static void skl_setup_wm_latency(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 14)
+ mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
+ else
+ skl_read_wm_latency(i915, i915->display.wm.skl_latency);
+
+ intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency);
+}
+
+static const struct intel_wm_funcs skl_wm_funcs = {
+ .compute_global_watermarks = skl_compute_wm,
+};
+
+void skl_wm_init(struct drm_i915_private *i915)
+{
+ intel_sagv_init(i915);
+
+ skl_setup_wm_latency(i915);
+
+ i915->display.funcs.wm = &skl_wm_funcs;
+}
+
+static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
+ if (!dbuf_state)
+ return NULL;
+
+ return &dbuf_state->base;
+}
+
+static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_dbuf_funcs = {
+ .atomic_duplicate_state = intel_dbuf_duplicate_state,
+ .atomic_destroy_state = intel_dbuf_destroy_state,
+};
+
+struct intel_dbuf_state *
+intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_global_state *dbuf_state;
+
+ dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj);
+ if (IS_ERR(dbuf_state))
+ return ERR_CAST(dbuf_state);
+
+ return to_intel_dbuf_state(dbuf_state);
+}
+
+int intel_dbuf_init(struct drm_i915_private *i915)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
+ if (!dbuf_state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj,
+ &dbuf_state->base, &intel_dbuf_funcs);
+
+ return 0;
+}
+
+/*
+ * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
+ * update the request state of all DBUS slices.
+ */
+static void update_mbus_pre_enable(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ u32 mbus_ctl, dbuf_min_tracker_val;
+ enum dbuf_slice slice;
+ const struct intel_dbuf_state *dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+
+ if (!HAS_MBUS_JOINING(i915))
+ return;
+
+ /*
+ * TODO: Implement vblank synchronized MBUS joining changes.
+ * Must be properly coordinated with dbuf reprogramming.
+ */
+ if (dbuf_state->joined_mbus) {
+ mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
+ MBUS_JOIN_PIPE_SELECT_NONE;
+ dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
+ } else {
+ mbus_ctl = MBUS_HASHING_MODE_2x2 |
+ MBUS_JOIN_PIPE_SELECT_NONE;
+ dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
+ }
+
+ intel_de_rmw(i915, MBUS_CTL,
+ MBUS_HASHING_MODE_MASK | MBUS_JOIN |
+ MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
+
+ for_each_dbuf_slice(i915, slice)
+ intel_de_rmw(i915, DBUF_CTL_S(slice),
+ DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
+ dbuf_min_tracker_val);
+}
+
+void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state ||
+ (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
+ new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ update_mbus_pre_enable(state);
+ gen9_dbuf_slices_update(i915,
+ old_dbuf_state->enabled_slices |
+ new_dbuf_state->enabled_slices);
+}
+
+void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state ||
+ (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
+ new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ gen9_dbuf_slices_update(i915,
+ new_dbuf_state->enabled_slices);
+}
+
+static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
+{
+ switch (pipe) {
+ case PIPE_A:
+ return !(active_pipes & BIT(PIPE_D));
+ case PIPE_D:
+ return !(active_pipes & BIT(PIPE_A));
+ case PIPE_B:
+ return !(active_pipes & BIT(PIPE_C));
+ case PIPE_C:
+ return !(active_pipes & BIT(PIPE_B));
+ default: /* to suppress compiler warning */
+ MISSING_CASE(pipe);
+ break;
+ }
+
+ return false;
+}
+
+void intel_mbus_dbox_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
+ const struct intel_crtc_state *new_crtc_state;
+ const struct intel_crtc *crtc;
+ u32 val = 0;
+ int i;
+
+ if (DISPLAY_VER(i915) < 11)
+ return;
+
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ if (!new_dbuf_state ||
+ (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
+ new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
+ return;
+
+ if (DISPLAY_VER(i915) >= 14)
+ val |= MBUS_DBOX_I_CREDIT(2);
+
+ if (DISPLAY_VER(i915) >= 12) {
+ val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
+ val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
+ val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
+ }
+
+ if (DISPLAY_VER(i915) >= 14)
+ val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) :
+ MBUS_DBOX_A_CREDIT(8);
+ else if (IS_ALDERLAKE_P(i915))
+ /* Wa_22010947358:adl-p */
+ val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
+ MBUS_DBOX_A_CREDIT(4);
+ else
+ val |= MBUS_DBOX_A_CREDIT(2);
+
+ if (DISPLAY_VER(i915) >= 14) {
+ val |= MBUS_DBOX_B_CREDIT(0xA);
+ } else if (IS_ALDERLAKE_P(i915)) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(12);
+ } else {
+ val |= MBUS_DBOX_BW_CREDIT(1);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ u32 pipe_val = val;
+
+ if (!new_crtc_state->hw.active)
+ continue;
+
+ if (DISPLAY_VER(i915) >= 14) {
+ if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
+ new_dbuf_state->active_pipes))
+ pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL;
+ else
+ pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL;
+ }
+
+ intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val);
+ }
+}
+
+static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *i915 = m->private;
+
+ seq_printf(m, "Isochronous Priority Control: %s\n",
+ str_yes_no(skl_watermark_ipc_enabled(i915)));
+ return 0;
+}
+
+static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *i915 = inode->i_private;
+
+ return single_open(file, skl_watermark_ipc_status_show, i915);
+}
+
+static ssize_t skl_watermark_ipc_status_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *i915 = m->private;
+ intel_wakeref_t wakeref;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool_from_user(ubuf, len, &enable);
+ if (ret < 0)
+ return ret;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ if (!skl_watermark_ipc_enabled(i915) && enable)
+ drm_info(&i915->drm,
+ "Enabling IPC: WM will be proper only after next commit\n");
+ i915->display.wm.ipc_enabled = enable;
+ skl_watermark_ipc_update(i915);
+ }
+
+ return len;
+}
+
+static const struct file_operations skl_watermark_ipc_status_fops = {
+ .owner = THIS_MODULE,
+ .open = skl_watermark_ipc_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = skl_watermark_ipc_status_write
+};
+
+void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ if (!HAS_IPC(i915))
+ return;
+
+ debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
+ &skl_watermark_ipc_status_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
new file mode 100644
index 000000000..7a5a4e67c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __SKL_WATERMARK_H__
+#define __SKL_WATERMARK_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+#include "intel_global_state.h"
+#include "intel_pm_types.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_bw_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane;
+
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915);
+
+void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
+void intel_sagv_post_plane_update(struct intel_atomic_state *state);
+bool intel_can_enable_sagv(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state);
+
+u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+ const struct skl_ddb_entry *entry);
+
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry *entries,
+ int num_entries, int ignore_idx);
+
+void skl_wm_get_hw_state(struct drm_i915_private *i915);
+void skl_wm_sanitize(struct drm_i915_private *i915);
+
+void intel_wm_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state);
+
+void skl_watermark_ipc_init(struct drm_i915_private *i915);
+void skl_watermark_ipc_update(struct drm_i915_private *i915);
+bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
+void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915);
+
+void skl_wm_init(struct drm_i915_private *i915);
+
+struct intel_dbuf_state {
+ struct intel_global_state base;
+
+ struct skl_ddb_entry ddb[I915_MAX_PIPES];
+ unsigned int weight[I915_MAX_PIPES];
+ u8 slices[I915_MAX_PIPES];
+ u8 enabled_slices;
+ u8 active_pipes;
+ bool joined_mbus;
+};
+
+struct intel_dbuf_state *
+intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
+
+#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base)
+#define intel_atomic_get_old_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
+#define intel_atomic_get_new_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
+
+int intel_dbuf_init(struct drm_i915_private *i915);
+void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
+void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
+void intel_mbus_dbox_update(struct intel_atomic_state *state);
+
+#endif /* __SKL_WATERMARK_H__ */
+
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
new file mode 100644
index 000000000..114088ca5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -0,0 +1,2015 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_mipi_dsi.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_backlight.h"
+#include "intel_connector.h"
+#include "intel_crtc.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dsi.h"
+#include "intel_dsi_vbt.h"
+#include "intel_fifo_underrun.h"
+#include "intel_panel.h"
+#include "skl_scaler.h"
+#include "vlv_dsi.h"
+#include "vlv_dsi_pll.h"
+#include "vlv_dsi_regs.h"
+#include "vlv_sideband.h"
+
+/* return pixels in terms of txbyteclkhs */
+static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
+ u16 burst_mode_ratio)
+{
+ return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
+ 8 * 100), lane_count);
+}
+
+/* return pixels equvalent to txbyteclkhs */
+static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count,
+ u16 burst_mode_ratio)
+{
+ return DIV_ROUND_UP((clk_hs * lane_count * 8 * 100),
+ (bpp * burst_mode_ratio));
+}
+
+enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
+{
+ /* It just so happens the VBT matches register contents. */
+ switch (fmt) {
+ case VID_MODE_FORMAT_RGB888:
+ return MIPI_DSI_FMT_RGB888;
+ case VID_MODE_FORMAT_RGB666:
+ return MIPI_DSI_FMT_RGB666;
+ case VID_MODE_FORMAT_RGB666_PACKED:
+ return MIPI_DSI_FMT_RGB666_PACKED;
+ case VID_MODE_FORMAT_RGB565:
+ return MIPI_DSI_FMT_RGB565;
+ default:
+ MISSING_CASE(fmt);
+ return MIPI_DSI_FMT_RGB666;
+ }
+}
+
+void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 mask;
+
+ mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
+ LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
+
+ if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ mask, 100))
+ drm_err(&dev_priv->drm, "DPI FIFOs are not empty\n");
+}
+
+static void write_data(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ const u8 *data, u32 len)
+{
+ u32 i, j;
+
+ for (i = 0; i < len; i += 4) {
+ u32 val = 0;
+
+ for (j = 0; j < min_t(u32, len - i, 4); j++)
+ val |= *data++ << 8 * j;
+
+ intel_de_write(dev_priv, reg, val);
+ }
+}
+
+static void read_data(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ u8 *data, u32 len)
+{
+ u32 i, j;
+
+ for (i = 0; i < len; i += 4) {
+ u32 val = intel_de_read(dev_priv, reg);
+
+ for (j = 0; j < min_t(u32, len - i, 4); j++)
+ *data++ = val >> 8 * j;
+ }
+}
+
+static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
+ struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum port port = intel_dsi_host->port;
+ struct mipi_dsi_packet packet;
+ ssize_t ret;
+ const u8 *header, *data;
+ i915_reg_t data_reg, ctrl_reg;
+ u32 data_mask, ctrl_mask;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret < 0)
+ return ret;
+
+ header = packet.header;
+ data = packet.payload;
+
+ if (msg->flags & MIPI_DSI_MSG_USE_LPM) {
+ data_reg = MIPI_LP_GEN_DATA(port);
+ data_mask = LP_DATA_FIFO_FULL;
+ ctrl_reg = MIPI_LP_GEN_CTRL(port);
+ ctrl_mask = LP_CTRL_FIFO_FULL;
+ } else {
+ data_reg = MIPI_HS_GEN_DATA(port);
+ data_mask = HS_DATA_FIFO_FULL;
+ ctrl_reg = MIPI_HS_GEN_CTRL(port);
+ ctrl_mask = HS_CTRL_FIFO_FULL;
+ }
+
+ /* note: this is never true for reads */
+ if (packet.payload_length) {
+ if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ data_mask, 50))
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for HS/LP DATA FIFO !full\n");
+
+ write_data(dev_priv, data_reg, packet.payload,
+ packet.payload_length);
+ }
+
+ if (msg->rx_len) {
+ intel_de_write(dev_priv, MIPI_INTR_STAT(port),
+ GEN_READ_DATA_AVAIL);
+ }
+
+ if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ ctrl_mask, 50)) {
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for HS/LP CTRL FIFO !full\n");
+ }
+
+ intel_de_write(dev_priv, ctrl_reg,
+ header[2] << 16 | header[1] << 8 | header[0]);
+
+ /* ->rx_len is set only for reads */
+ if (msg->rx_len) {
+ data_mask = GEN_READ_DATA_AVAIL;
+ if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port),
+ data_mask, 50))
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for read data.\n");
+
+ read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
+ }
+
+ /* XXX: fix for reads and writes */
+ return 4 + packet.payload_length;
+}
+
+static int intel_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static int intel_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
+ .attach = intel_dsi_host_attach,
+ .detach = intel_dsi_host_detach,
+ .transfer = intel_dsi_host_transfer,
+};
+
+/*
+ * send a video mode command
+ *
+ * XXX: commands with data in MIPI_DPI_DATA?
+ */
+static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
+ enum port port)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 mask;
+
+ /* XXX: pipe, hs */
+ if (hs)
+ cmd &= ~DPI_LP_MODE;
+ else
+ cmd |= DPI_LP_MODE;
+
+ /* clear bit */
+ intel_de_write(dev_priv, MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
+
+ /* XXX: old code skips write if control unchanged */
+ if (cmd == intel_de_read(dev_priv, MIPI_DPI_CONTROL(port)))
+ drm_dbg_kms(&dev_priv->drm,
+ "Same special packet %02x twice in a row.\n", cmd);
+
+ intel_de_write(dev_priv, MIPI_DPI_CONTROL(port), cmd);
+
+ mask = SPL_PKT_SENT_INTERRUPT;
+ if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100))
+ drm_err(&dev_priv->drm,
+ "Video mode command 0x%08x send failed.\n", cmd);
+
+ return 0;
+}
+
+static void band_gap_reset(struct drm_i915_private *dev_priv)
+{
+ vlv_flisdsi_get(dev_priv);
+
+ vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
+ vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
+ vlv_flisdsi_write(dev_priv, 0x0F, 0x0025);
+ udelay(150);
+ vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
+ vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
+
+ vlv_flisdsi_put(dev_priv);
+}
+
+static int intel_dsi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+ base);
+ struct intel_connector *intel_connector = intel_dsi->attached_connector;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ int ret;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
+ ret = intel_panel_compute_config(intel_connector, adjusted_mode);
+ if (ret)
+ return ret;
+
+ ret = intel_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return -EINVAL;
+
+ /* DSI uses short packets for sync events, so clear mode flags for DSI */
+ adjusted_mode->flags = 0;
+
+ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB888)
+ pipe_config->pipe_bpp = 24;
+ else
+ pipe_config->pipe_bpp = 18;
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ /* Enable Frame time stamp based scanline reporting */
+ pipe_config->mode_flags |=
+ I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
+
+ /* Dual link goes to DSI transcoder A. */
+ if (intel_dsi->ports == BIT(PORT_C))
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
+ else
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_A;
+
+ ret = bxt_dsi_pll_compute(encoder, pipe_config);
+ if (ret)
+ return -EINVAL;
+ } else {
+ ret = vlv_dsi_pll_compute(encoder, pipe_config);
+ if (ret)
+ return -EINVAL;
+ }
+
+ pipe_config->clock_set = true;
+
+ return 0;
+}
+
+static bool glk_dsi_enable_io(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ u32 tmp;
+ bool cold_boot = false;
+
+ /* Set the MIPI mode
+ * If MIPI_Mode is off, then writing to LP_Wake bit is not reflecting.
+ * Power ON MIPI IO first and then write into IO reset and LP wake bits
+ */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ tmp | GLK_MIPIIO_ENABLE);
+ }
+
+ /* Put the IO into reset */
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
+ tmp &= ~GLK_MIPIIO_RESET_RELEASED;
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
+
+ /* Program LP Wake */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
+ tmp &= ~GLK_LP_WAKE;
+ else
+ tmp |= GLK_LP_WAKE;
+ intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
+ }
+
+ /* Wait for Pwr ACK */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ GLK_MIPIIO_PORT_POWERED, 20))
+ drm_err(&dev_priv->drm, "MIPIO port is powergated\n");
+ }
+
+ /* Check for cold boot scenario */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ cold_boot |=
+ !(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY);
+ }
+
+ return cold_boot;
+}
+
+static void glk_dsi_device_ready(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ u32 val;
+
+ /* Wait for MIPI PHY status bit to set */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY, 20))
+ drm_err(&dev_priv->drm, "PHY is not ON\n");
+ }
+
+ /* Get IO out of reset */
+ val = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
+ val | GLK_MIPIIO_RESET_RELEASED);
+
+ /* Get IO out of Low power state*/
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
+ val &= ~ULPS_STATE_MASK;
+ val |= DEVICE_READY;
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ usleep_range(10, 15);
+ } else {
+ /* Enter ULPS */
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
+ val &= ~ULPS_STATE_MASK;
+ val |= (ULPS_STATE_ENTER | DEVICE_READY);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+
+ /* Wait for ULPS active */
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_ULPS_NOT_ACTIVE, 20))
+ drm_err(&dev_priv->drm, "ULPS not active\n");
+
+ /* Exit ULPS */
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
+ val &= ~ULPS_STATE_MASK;
+ val |= (ULPS_STATE_EXIT | DEVICE_READY);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+
+ /* Enter Normal Mode */
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
+ val &= ~ULPS_STATE_MASK;
+ val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+
+ val = intel_de_read(dev_priv, MIPI_CTRL(port));
+ val &= ~GLK_LP_WAKE;
+ intel_de_write(dev_priv, MIPI_CTRL(port), val);
+ }
+ }
+
+ /* Wait for Stop state */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ GLK_DATA_LANE_STOP_STATE, 20))
+ drm_err(&dev_priv->drm,
+ "Date lane not in STOP state\n");
+ }
+
+ /* Wait for AFE LATCH */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port),
+ AFE_LATCHOUT, 20))
+ drm_err(&dev_priv->drm,
+ "D-PHY not entering LP-11 state\n");
+ }
+}
+
+static void bxt_dsi_device_ready(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ u32 val;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ /* Enable MIPI PHY transparent latch */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ val = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
+ intel_de_write(dev_priv, BXT_MIPI_PORT_CTRL(port),
+ val | LP_OUTPUT_HOLD);
+ usleep_range(2000, 2500);
+ }
+
+ /* Clear ULPS and set device ready */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
+ val &= ~ULPS_STATE_MASK;
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ usleep_range(2000, 2500);
+ val |= DEVICE_READY;
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ }
+}
+
+static void vlv_dsi_device_ready(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ u32 val;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ vlv_flisdsi_get(dev_priv);
+ /* program rcomp for compliance, reduce from 50 ohms to 45 ohms
+ * needed everytime after power gate */
+ vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
+ vlv_flisdsi_put(dev_priv);
+
+ /* bandgap reset is needed after everytime we do power gate */
+ band_gap_reset(dev_priv);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_ENTER);
+ usleep_range(2500, 3000);
+
+ /* Enable MIPI PHY transparent latch
+ * Common bit for both MIPI Port A & MIPI Port C
+ * No similar bit in MIPI Port C reg
+ */
+ val = intel_de_read(dev_priv, MIPI_PORT_CTRL(PORT_A));
+ intel_de_write(dev_priv, MIPI_PORT_CTRL(PORT_A),
+ val | LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_EXIT);
+ usleep_range(2500, 3000);
+
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY);
+ usleep_range(2500, 3000);
+ }
+}
+
+static void intel_dsi_device_ready(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_GEMINILAKE(dev_priv))
+ glk_dsi_device_ready(encoder);
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ bxt_dsi_device_ready(encoder);
+ else
+ vlv_dsi_device_ready(encoder);
+}
+
+static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ u32 val;
+
+ /* Enter ULPS */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
+ val &= ~ULPS_STATE_MASK;
+ val |= (ULPS_STATE_ENTER | DEVICE_READY);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
+ }
+
+ /* Wait for MIPI PHY status bit to unset */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY, 20))
+ drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
+ }
+
+ /* Wait for Pwr ACK bit to unset */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_MIPIIO_PORT_POWERED, 20))
+ drm_err(&dev_priv->drm,
+ "MIPI IO Port is not powergated\n");
+ }
+}
+
+static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ u32 tmp;
+
+ /* Put the IO into reset */
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
+ tmp &= ~GLK_MIPIIO_RESET_RELEASED;
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
+
+ /* Wait for MIPI PHY status bit to unset */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY, 20))
+ drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
+ }
+
+ /* Clear MIPI mode */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ tmp &= ~GLK_MIPIIO_ENABLE;
+ intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
+ }
+}
+
+static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
+{
+ glk_dsi_enter_low_power_mode(encoder);
+ glk_dsi_disable_mipi_io(encoder);
+}
+
+static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
+ i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
+ BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
+ u32 val;
+
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY | ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
+
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY | ULPS_STATE_EXIT);
+ usleep_range(2000, 2500);
+
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY | ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
+
+ /*
+ * On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI
+ * Port A only. MIPI Port C has no similar bit for checking.
+ */
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || port == PORT_A) &&
+ intel_de_wait_for_clear(dev_priv, port_ctrl,
+ AFE_LATCHOUT, 30))
+ drm_err(&dev_priv->drm, "DSI LP not going Low\n");
+
+ /* Disable MIPI PHY transparent latch */
+ val = intel_de_read(dev_priv, port_ctrl);
+ intel_de_write(dev_priv, port_ctrl, val & ~LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00);
+ usleep_range(2000, 2500);
+ }
+}
+
+static void intel_dsi_port_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ u32 temp;
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ temp = intel_de_read(dev_priv,
+ MIPI_CTRL(port));
+ temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK |
+ intel_dsi->pixel_overlap <<
+ BXT_PIXEL_OVERLAP_CNT_SHIFT;
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ temp);
+ }
+ } else {
+ temp = intel_de_read(dev_priv, VLV_CHICKEN_3);
+ temp &= ~PIXEL_OVERLAP_CNT_MASK |
+ intel_dsi->pixel_overlap <<
+ PIXEL_OVERLAP_CNT_SHIFT;
+ intel_de_write(dev_priv, VLV_CHICKEN_3, temp);
+ }
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
+ BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, port_ctrl);
+
+ temp &= ~LANE_CONFIGURATION_MASK;
+ temp &= ~DUAL_LINK_MODE_MASK;
+
+ if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
+ temp |= (intel_dsi->dual_link - 1)
+ << DUAL_LINK_MODE_SHIFT;
+ if (IS_BROXTON(dev_priv))
+ temp |= LANE_CONFIGURATION_DUAL_LINK_A;
+ else
+ temp |= crtc->pipe ?
+ LANE_CONFIGURATION_DUAL_LINK_B :
+ LANE_CONFIGURATION_DUAL_LINK_A;
+ }
+
+ if (intel_dsi->pixel_format != MIPI_DSI_FMT_RGB888)
+ temp |= DITHERING_ENABLE;
+
+ /* assert ip_tg_enable signal */
+ intel_de_write(dev_priv, port_ctrl, temp | DPI_ENABLE);
+ intel_de_posting_read(dev_priv, port_ctrl);
+ }
+}
+
+static void intel_dsi_port_disable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
+ BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+ u32 temp;
+
+ /* de-assert ip_tg_enable signal */
+ temp = intel_de_read(dev_priv, port_ctrl);
+ intel_de_write(dev_priv, port_ctrl, temp & ~DPI_ENABLE);
+ intel_de_posting_read(dev_priv, port_ctrl);
+ }
+}
+
+static void intel_dsi_wait_panel_power_cycle(struct intel_dsi *intel_dsi)
+{
+ ktime_t panel_power_on_time;
+ s64 panel_power_off_duration;
+
+ panel_power_on_time = ktime_get_boottime();
+ panel_power_off_duration = ktime_ms_delta(panel_power_on_time,
+ intel_dsi->panel_power_off_time);
+
+ if (panel_power_off_duration < (s64)intel_dsi->panel_pwr_cycle_delay)
+ msleep(intel_dsi->panel_pwr_cycle_delay - panel_power_off_duration);
+}
+
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+ const struct intel_crtc_state *pipe_config);
+static void intel_dsi_unprepare(struct intel_encoder *encoder);
+
+/*
+ * Panel enable/disable sequences from the VBT spec.
+ *
+ * Note the spec has AssertReset / DeassertReset swapped from their
+ * usual naming. We use the normal names to avoid confusion (so below
+ * they are swapped compared to the spec).
+ *
+ * Steps starting with MIPI refer to VBT sequences, note that for v2
+ * VBTs several steps which have a VBT in v2 are expected to be handled
+ * directly by the driver, by directly driving gpios for example.
+ *
+ * v2 video mode seq v3 video mode seq command mode seq
+ * - power on - MIPIPanelPowerOn - power on
+ * - wait t1+t2 - wait t1+t2
+ * - MIPIDeassertResetPin - MIPIDeassertResetPin - MIPIDeassertResetPin
+ * - io lines to lp-11 - io lines to lp-11 - io lines to lp-11
+ * - MIPISendInitialDcsCmds - MIPISendInitialDcsCmds - MIPISendInitialDcsCmds
+ * - MIPITearOn
+ * - MIPIDisplayOn
+ * - turn on DPI - turn on DPI - set pipe to dsr mode
+ * - MIPIDisplayOn - MIPIDisplayOn
+ * - wait t5 - wait t5
+ * - backlight on - MIPIBacklightOn - backlight on
+ * ... ... ... issue mem cmds ...
+ * - backlight off - MIPIBacklightOff - backlight off
+ * - wait t6 - wait t6
+ * - MIPIDisplayOff
+ * - turn off DPI - turn off DPI - disable pipe dsr mode
+ * - MIPITearOff
+ * - MIPIDisplayOff - MIPIDisplayOff
+ * - io lines to lp-00 - io lines to lp-00 - io lines to lp-00
+ * - MIPIAssertResetPin - MIPIAssertResetPin - MIPIAssertResetPin
+ * - wait t3 - wait t3
+ * - power off - MIPIPanelPowerOff - power off
+ * - wait t4 - wait t4
+ */
+
+/*
+ * DSI port enable has to be done before pipe and plane enable, so we do it in
+ * the pre_enable hook instead of the enable hook.
+ */
+static void intel_dsi_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ enum port port;
+ u32 val;
+ bool glk_cold_boot = false;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ intel_dsi_wait_panel_power_cycle(intel_dsi);
+
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
+ /*
+ * The BIOS may leave the PLL in a wonky state where it doesn't
+ * lock. It needs to be fully powered down to fix it.
+ */
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ bxt_dsi_pll_disable(encoder);
+ bxt_dsi_pll_enable(encoder, pipe_config);
+ } else {
+ vlv_dsi_pll_disable(encoder);
+ vlv_dsi_pll_enable(encoder, pipe_config);
+ }
+
+ if (IS_BROXTON(dev_priv)) {
+ /* Add MIPI IO reset programming for modeset */
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
+ val | MIPIO_RST_CTRL);
+
+ /* Power up DSI regulator */
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
+ }
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ u32 val;
+
+ /* Disable DPOunit clock gating, can stall pipe */
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
+ val |= DPOUNIT_CLOCK_GATE_DISABLE;
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
+ }
+
+ if (!IS_GEMINILAKE(dev_priv))
+ intel_dsi_prepare(encoder, pipe_config);
+
+ /* Give the panel time to power-on and then deassert its reset */
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
+ msleep(intel_dsi->panel_on_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ glk_cold_boot = glk_dsi_enable_io(encoder);
+
+ /* Prepare port in cold boot(s3/s4) scenario */
+ if (glk_cold_boot)
+ intel_dsi_prepare(encoder, pipe_config);
+ }
+
+ /* Put device in ready state (LP-11) */
+ intel_dsi_device_ready(encoder);
+
+ /* Prepare port in normal boot scenario */
+ if (IS_GEMINILAKE(dev_priv) && !glk_cold_boot)
+ intel_dsi_prepare(encoder, pipe_config);
+
+ /* Send initialization commands in LP mode */
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
+
+ /*
+ * Enable port in pre-enable phase itself because as per hw team
+ * recommendation, port should be enabled before plane & pipe
+ */
+ if (is_cmd_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_write(dev_priv,
+ MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_ON);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+ } else {
+ msleep(20); /* XXX */
+ for_each_dsi_port(port, intel_dsi->ports)
+ dpi_send_cmd(intel_dsi, TURN_ON, false, port);
+ msleep(100);
+
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
+ intel_dsi_port_enable(encoder, pipe_config);
+ }
+
+ intel_backlight_enable(pipe_config, conn_state);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+}
+
+static void bxt_dsi_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
+
+ intel_crtc_vblank_on(crtc_state);
+}
+
+/*
+ * DSI port disable has to be done after pipe and plane disable, so we do it in
+ * the post_disable hook.
+ */
+static void intel_dsi_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+
+ drm_dbg_kms(&i915->drm, "\n");
+
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
+ intel_backlight_disable(old_conn_state);
+
+ /*
+ * According to the spec we should send SHUTDOWN before
+ * MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing
+ * has shown that the v3 sequence works for v2 VBTs too
+ */
+ if (is_vid_mode(intel_dsi)) {
+ /* Send Shutdown command to the panel in LP mode */
+ for_each_dsi_port(port, intel_dsi->ports)
+ dpi_send_cmd(intel_dsi, SHUTDOWN, false, port);
+ msleep(10);
+ }
+}
+
+static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_GEMINILAKE(dev_priv))
+ glk_dsi_clear_device_ready(encoder);
+ else
+ vlv_dsi_clear_device_ready(encoder);
+}
+
+static void intel_dsi_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ u32 val;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ intel_crtc_vblank_off(old_crtc_state);
+
+ skl_scaler_disable(old_crtc_state);
+ }
+
+ if (is_vid_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports)
+ vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
+
+ intel_dsi_port_disable(encoder);
+ usleep_range(2000, 5000);
+ }
+
+ intel_dsi_unprepare(encoder);
+
+ /*
+ * if disable packets are sent before sending shutdown packet then in
+ * some next enable sequence send turn on packet error is observed
+ */
+ if (is_cmd_mode(intel_dsi))
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_OFF);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
+
+ /* Transition to LP-00 */
+ intel_dsi_clear_device_ready(encoder);
+
+ if (IS_BROXTON(dev_priv)) {
+ /* Power down DSI regulator to save power */
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL,
+ HS_IO_CTRL_SELECT);
+
+ /* Add MIPI IO reset programming for modeset */
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
+ val & ~MIPIO_RST_CTRL);
+ }
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ bxt_dsi_pll_disable(encoder);
+ } else {
+ u32 val;
+
+ vlv_dsi_pll_disable(encoder);
+
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
+ val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
+ }
+
+ /* Assert reset */
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
+
+ msleep(intel_dsi->panel_off_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
+
+ intel_dsi->panel_power_off_time = ktime_get_boottime();
+}
+
+static void intel_dsi_shutdown(struct intel_encoder *encoder)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+ intel_dsi_wait_panel_power_cycle(intel_dsi);
+}
+
+static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ intel_wakeref_t wakeref;
+ enum port port;
+ bool active = false;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
+ return false;
+
+ /*
+ * On Broxton the PLL needs to be enabled with a valid divider
+ * configuration, otherwise accessing DSI registers will hang the
+ * machine. See BSpec North Display Engine registers/MIPI[BXT].
+ */
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ !bxt_dsi_pll_is_enabled(dev_priv))
+ goto out_put_power;
+
+ /* XXX: this only works for one DSI output */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ i915_reg_t ctrl_reg = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
+ BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+ bool enabled = intel_de_read(dev_priv, ctrl_reg) & DPI_ENABLE;
+
+ /*
+ * Due to some hardware limitations on VLV/CHV, the DPI enable
+ * bit in port C control register does not get set. As a
+ * workaround, check pipe B conf instead.
+ */
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ port == PORT_C)
+ enabled = intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+
+ /* Try command mode if video mode not enabled */
+ if (!enabled) {
+ u32 tmp = intel_de_read(dev_priv,
+ MIPI_DSI_FUNC_PRG(port));
+ enabled = tmp & CMD_MODE_DATA_WIDTH_MASK;
+ }
+
+ if (!enabled)
+ continue;
+
+ if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
+ continue;
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ u32 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ tmp &= BXT_PIPE_SELECT_MASK;
+ tmp >>= BXT_PIPE_SELECT_SHIFT;
+
+ if (drm_WARN_ON(&dev_priv->drm, tmp > PIPE_C))
+ continue;
+
+ *pipe = tmp;
+ } else {
+ *pipe = port == PORT_A ? PIPE_A : PIPE_B;
+ }
+
+ active = true;
+ break;
+ }
+
+out_put_power:
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+
+ return active;
+}
+
+static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+ struct drm_display_mode *adjusted_mode_sw;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ unsigned int lane_count = intel_dsi->lane_count;
+ unsigned int bpp, fmt;
+ enum port port;
+ u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
+ u16 hfp_sw, hsync_sw, hbp_sw;
+ u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
+ crtc_hblank_start_sw, crtc_hblank_end_sw;
+
+ /* FIXME: hw readout should not depend on SW state */
+ adjusted_mode_sw = &crtc->config->hw.adjusted_mode;
+
+ /*
+ * Atleast one port is active as encoder->get_config called only if
+ * encoder->get_hw_state() returns true.
+ */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
+ break;
+ }
+
+ fmt = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
+ bpp = mipi_dsi_pixel_format_to_bpp(
+ pixel_format_from_register_bits(fmt));
+
+ pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+
+ /* Enable Frame time stamo based scanline reporting */
+ pipe_config->mode_flags |=
+ I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
+
+ /* In terms of pixels */
+ adjusted_mode->crtc_hdisplay =
+ intel_de_read(dev_priv,
+ BXT_MIPI_TRANS_HACTIVE(port));
+ adjusted_mode->crtc_vdisplay =
+ intel_de_read(dev_priv,
+ BXT_MIPI_TRANS_VACTIVE(port));
+ adjusted_mode->crtc_vtotal =
+ intel_de_read(dev_priv,
+ BXT_MIPI_TRANS_VTOTAL(port));
+
+ hactive = adjusted_mode->crtc_hdisplay;
+ hfp = intel_de_read(dev_priv, MIPI_HFP_COUNT(port));
+
+ /*
+ * Meaningful for video mode non-burst sync pulse mode only,
+ * can be zero for non-burst sync events and burst modes
+ */
+ hsync = intel_de_read(dev_priv, MIPI_HSYNC_PADDING_COUNT(port));
+ hbp = intel_de_read(dev_priv, MIPI_HBP_COUNT(port));
+
+ /* harizontal values are in terms of high speed byte clock */
+ hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hsync = pixels_from_txbyteclkhs(hsync, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp = pixels_from_txbyteclkhs(hbp, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+
+ if (intel_dsi->dual_link) {
+ hfp *= 2;
+ hsync *= 2;
+ hbp *= 2;
+ }
+
+ /* vertical values are in terms of lines */
+ vfp = intel_de_read(dev_priv, MIPI_VFP_COUNT(port));
+ vsync = intel_de_read(dev_priv, MIPI_VSYNC_PADDING_COUNT(port));
+ vbp = intel_de_read(dev_priv, MIPI_VBP_COUNT(port));
+
+ adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
+ adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay;
+ adjusted_mode->crtc_hsync_end = hsync + adjusted_mode->crtc_hsync_start;
+ adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
+ adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal;
+
+ adjusted_mode->crtc_vsync_start = vfp + adjusted_mode->crtc_vdisplay;
+ adjusted_mode->crtc_vsync_end = vsync + adjusted_mode->crtc_vsync_start;
+ adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
+ adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
+
+ /*
+ * In BXT DSI there is no regs programmed with few horizontal timings
+ * in Pixels but txbyteclkhs.. So retrieval process adds some
+ * ROUND_UP ERRORS in the process of PIXELS<==>txbyteclkhs.
+ * Actually here for the given adjusted_mode, we are calculating the
+ * value programmed to the port and then back to the horizontal timing
+ * param in pixels. This is the expected value, including roundup errors
+ * And if that is same as retrieved value from port, then
+ * (HW state) adjusted_mode's horizontal timings are corrected to
+ * match with SW state to nullify the errors.
+ */
+ /* Calculating the value programmed to the Port register */
+ hfp_sw = adjusted_mode_sw->crtc_hsync_start -
+ adjusted_mode_sw->crtc_hdisplay;
+ hsync_sw = adjusted_mode_sw->crtc_hsync_end -
+ adjusted_mode_sw->crtc_hsync_start;
+ hbp_sw = adjusted_mode_sw->crtc_htotal -
+ adjusted_mode_sw->crtc_hsync_end;
+
+ if (intel_dsi->dual_link) {
+ hfp_sw /= 2;
+ hsync_sw /= 2;
+ hbp_sw /= 2;
+ }
+
+ hfp_sw = txbyteclkhs(hfp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hsync_sw = txbyteclkhs(hsync_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp_sw = txbyteclkhs(hbp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+
+ /* Reverse calculating the adjusted mode parameters from port reg vals*/
+ hfp_sw = pixels_from_txbyteclkhs(hfp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hsync_sw = pixels_from_txbyteclkhs(hsync_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp_sw = pixels_from_txbyteclkhs(hbp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+
+ if (intel_dsi->dual_link) {
+ hfp_sw *= 2;
+ hsync_sw *= 2;
+ hbp_sw *= 2;
+ }
+
+ crtc_htotal_sw = adjusted_mode_sw->crtc_hdisplay + hfp_sw +
+ hsync_sw + hbp_sw;
+ crtc_hsync_start_sw = hfp_sw + adjusted_mode_sw->crtc_hdisplay;
+ crtc_hsync_end_sw = hsync_sw + crtc_hsync_start_sw;
+ crtc_hblank_start_sw = adjusted_mode_sw->crtc_hdisplay;
+ crtc_hblank_end_sw = crtc_htotal_sw;
+
+ if (adjusted_mode->crtc_htotal == crtc_htotal_sw)
+ adjusted_mode->crtc_htotal = adjusted_mode_sw->crtc_htotal;
+
+ if (adjusted_mode->crtc_hsync_start == crtc_hsync_start_sw)
+ adjusted_mode->crtc_hsync_start =
+ adjusted_mode_sw->crtc_hsync_start;
+
+ if (adjusted_mode->crtc_hsync_end == crtc_hsync_end_sw)
+ adjusted_mode->crtc_hsync_end =
+ adjusted_mode_sw->crtc_hsync_end;
+
+ if (adjusted_mode->crtc_hblank_start == crtc_hblank_start_sw)
+ adjusted_mode->crtc_hblank_start =
+ adjusted_mode_sw->crtc_hblank_start;
+
+ if (adjusted_mode->crtc_hblank_end == crtc_hblank_end_sw)
+ adjusted_mode->crtc_hblank_end =
+ adjusted_mode_sw->crtc_hblank_end;
+}
+
+static void intel_dsi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ u32 pclk;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ bxt_dsi_get_pipe_config(encoder, pipe_config);
+ pclk = bxt_dsi_get_pclk(encoder, pipe_config);
+ } else {
+ pclk = vlv_dsi_get_pclk(encoder, pipe_config);
+ }
+
+ pipe_config->port_clock = pclk;
+
+ /* FIXME definitely not right for burst/cmd mode/pixel overlap */
+ pipe_config->hw.adjusted_mode.crtc_clock = pclk;
+ if (intel_dsi->dual_link)
+ pipe_config->hw.adjusted_mode.crtc_clock *= 2;
+}
+
+/* return txclkesc cycles in terms of divider and duration in us */
+static u16 txclkesc(u32 divider, unsigned int us)
+{
+ switch (divider) {
+ case ESCAPE_CLOCK_DIVIDER_1:
+ default:
+ return 20 * us;
+ case ESCAPE_CLOCK_DIVIDER_2:
+ return 10 * us;
+ case ESCAPE_CLOCK_DIVIDER_4:
+ return 5 * us;
+ }
+}
+
+static void set_dsi_timings(struct drm_encoder *encoder,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+ enum port port;
+ unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ unsigned int lane_count = intel_dsi->lane_count;
+
+ u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
+
+ hactive = adjusted_mode->crtc_hdisplay;
+ hfp = adjusted_mode->crtc_hsync_start - adjusted_mode->crtc_hdisplay;
+ hsync = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
+ hbp = adjusted_mode->crtc_htotal - adjusted_mode->crtc_hsync_end;
+
+ if (intel_dsi->dual_link) {
+ hactive /= 2;
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ hactive += intel_dsi->pixel_overlap;
+ hfp /= 2;
+ hsync /= 2;
+ hbp /= 2;
+ }
+
+ vfp = adjusted_mode->crtc_vsync_start - adjusted_mode->crtc_vdisplay;
+ vsync = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
+ vbp = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_end;
+
+ /* horizontal values are in terms of high speed byte clock */
+ hactive = txbyteclkhs(hactive, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hfp = txbyteclkhs(hfp, bpp, lane_count, intel_dsi->burst_mode_ratio);
+ hsync = txbyteclkhs(hsync, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ /*
+ * Program hdisplay and vdisplay on MIPI transcoder.
+ * This is different from calculated hactive and
+ * vactive, as they are calculated per channel basis,
+ * whereas these values should be based on resolution.
+ */
+ intel_de_write(dev_priv, BXT_MIPI_TRANS_HACTIVE(port),
+ adjusted_mode->crtc_hdisplay);
+ intel_de_write(dev_priv, BXT_MIPI_TRANS_VACTIVE(port),
+ adjusted_mode->crtc_vdisplay);
+ intel_de_write(dev_priv, BXT_MIPI_TRANS_VTOTAL(port),
+ adjusted_mode->crtc_vtotal);
+ }
+
+ intel_de_write(dev_priv, MIPI_HACTIVE_AREA_COUNT(port),
+ hactive);
+ intel_de_write(dev_priv, MIPI_HFP_COUNT(port), hfp);
+
+ /* meaningful for video mode non-burst sync pulse mode only,
+ * can be zero for non-burst sync events and burst modes */
+ intel_de_write(dev_priv, MIPI_HSYNC_PADDING_COUNT(port),
+ hsync);
+ intel_de_write(dev_priv, MIPI_HBP_COUNT(port), hbp);
+
+ /* vertical values are in terms of lines */
+ intel_de_write(dev_priv, MIPI_VFP_COUNT(port), vfp);
+ intel_de_write(dev_priv, MIPI_VSYNC_PADDING_COUNT(port),
+ vsync);
+ intel_de_write(dev_priv, MIPI_VBP_COUNT(port), vbp);
+ }
+}
+
+static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
+{
+ switch (fmt) {
+ case MIPI_DSI_FMT_RGB888:
+ return VID_MODE_FORMAT_RGB888;
+ case MIPI_DSI_FMT_RGB666:
+ return VID_MODE_FORMAT_RGB666;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return VID_MODE_FORMAT_RGB666_PACKED;
+ case MIPI_DSI_FMT_RGB565:
+ return VID_MODE_FORMAT_RGB565;
+ default:
+ MISSING_CASE(fmt);
+ return VID_MODE_FORMAT_RGB666;
+ }
+}
+
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ enum port port;
+ unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 val, tmp;
+ u16 mode_hdisplay;
+
+ drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(crtc->pipe));
+
+ mode_hdisplay = adjusted_mode->crtc_hdisplay;
+
+ if (intel_dsi->dual_link) {
+ mode_hdisplay /= 2;
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ mode_hdisplay += intel_dsi->pixel_overlap;
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ /*
+ * escape clock divider, 20MHz, shared for A and C.
+ * device ready must be off when doing this! txclkesc?
+ */
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
+ tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
+ tmp | ESCAPE_CLOCK_DIVIDER_1);
+
+ /* read request priority is per pipe */
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ tmp &= ~READ_REQUEST_PRIORITY_MASK;
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ tmp | READ_REQUEST_PRIORITY_HIGH);
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ enum pipe pipe = crtc->pipe;
+
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ tmp &= ~BXT_PIPE_SELECT_MASK;
+
+ tmp |= BXT_PIPE_SELECT(pipe);
+ intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
+ }
+
+ /* XXX: why here, why like this? handling in irq handler?! */
+ intel_de_write(dev_priv, MIPI_INTR_STAT(port), 0xffffffff);
+ intel_de_write(dev_priv, MIPI_INTR_EN(port), 0xffffffff);
+
+ intel_de_write(dev_priv, MIPI_DPHY_PARAM(port),
+ intel_dsi->dphy_reg);
+
+ intel_de_write(dev_priv, MIPI_DPI_RESOLUTION(port),
+ adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT | mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+ }
+
+ set_dsi_timings(encoder, adjusted_mode);
+
+ val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT;
+ if (is_cmd_mode(intel_dsi)) {
+ val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT;
+ val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
+ } else {
+ val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
+ val |= pixel_format_to_reg(intel_dsi->pixel_format);
+ }
+
+ tmp = 0;
+ if (intel_dsi->eotp_pkt == 0)
+ tmp |= EOT_DISABLE;
+ if (intel_dsi->clock_stop)
+ tmp |= CLOCKSTOP;
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ tmp |= BXT_DPHY_DEFEATURE_EN;
+ if (!is_cmd_mode(intel_dsi))
+ tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
+
+ /* timeouts for recovery. one frame IIUC. if counter expires,
+ * EOT and stop state. */
+
+ /*
+ * In burst mode, value greater than one DPI line Time in byte
+ * clock (txbyteclkhs) To timeout this timer 1+ of the above
+ * said value is recommended.
+ *
+ * In non-burst mode, Value greater than one DPI frame time in
+ * byte clock(txbyteclkhs) To timeout this timer 1+ of the above
+ * said value is recommended.
+ *
+ * In DBI only mode, value greater than one DBI frame time in
+ * byte clock(txbyteclkhs) To timeout this timer 1+ of the above
+ * said value is recommended.
+ */
+
+ if (is_vid_mode(intel_dsi) &&
+ intel_dsi->video_mode == BURST_MODE) {
+ intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+ txbyteclkhs(adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
+ } else {
+ intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+ txbyteclkhs(adjusted_mode->crtc_vtotal * adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
+ }
+ intel_de_write(dev_priv, MIPI_LP_RX_TIMEOUT(port),
+ intel_dsi->lp_rx_timeout);
+ intel_de_write(dev_priv, MIPI_TURN_AROUND_TIMEOUT(port),
+ intel_dsi->turn_arnd_val);
+ intel_de_write(dev_priv, MIPI_DEVICE_RESET_TIMER(port),
+ intel_dsi->rst_timer_val);
+
+ /* dphy stuff */
+
+ /* in terms of low power clock */
+ intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+ txclkesc(intel_dsi->escape_clk_div, 100));
+
+ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
+ !intel_dsi->dual_link) {
+ /*
+ * BXT spec says write MIPI_INIT_COUNT for
+ * both the ports, even if only one is
+ * getting used. So write the other port
+ * if not in dual link mode.
+ */
+ intel_de_write(dev_priv,
+ MIPI_INIT_COUNT(port == PORT_A ? PORT_C : PORT_A),
+ intel_dsi->init_count);
+ }
+
+ /* recovery disables */
+ intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), tmp);
+
+ /* in terms of low power clock */
+ intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+ intel_dsi->init_count);
+
+ /* in terms of txbyteclkhs. actual high to low switch +
+ * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
+ *
+ * XXX: write MIPI_STOP_STATE_STALL?
+ */
+ intel_de_write(dev_priv, MIPI_HIGH_LOW_SWITCH_COUNT(port),
+ intel_dsi->hs_to_lp_count);
+
+ /* XXX: low power clock equivalence in terms of byte clock.
+ * the number of byte clocks occupied in one low power clock.
+ * based on txbyteclkhs and txclkesc.
+ * txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL
+ * ) / 105.???
+ */
+ intel_de_write(dev_priv, MIPI_LP_BYTECLK(port),
+ intel_dsi->lp_byte_clk);
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ intel_de_write(dev_priv, MIPI_TLPX_TIME_COUNT(port),
+ intel_dsi->lp_byte_clk);
+ /* Shadow of DPHY reg */
+ intel_de_write(dev_priv, MIPI_CLK_LANE_TIMING(port),
+ intel_dsi->dphy_reg);
+ }
+
+ /* the bw essential for transmitting 16 long packets containing
+ * 252 bytes meant for dcs write memory command is programmed in
+ * this register in terms of byte clocks. based on dsi transfer
+ * rate and the number of lanes configured the time taken to
+ * transmit 16 long packets in a dsi stream varies. */
+ intel_de_write(dev_priv, MIPI_DBI_BW_CTRL(port),
+ intel_dsi->bw_timer);
+
+ intel_de_write(dev_priv, MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
+ intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
+
+ if (is_vid_mode(intel_dsi)) {
+ u32 fmt = intel_dsi->video_frmt_cfg_bits | IP_TG_CONFIG;
+
+ /*
+ * Some panels might have resolution which is not a
+ * multiple of 64 like 1366 x 768. Enable RANDOM
+ * resolution support for such panels by default.
+ */
+ fmt |= RANDOM_DPI_DISPLAY_RESOLUTION;
+
+ switch (intel_dsi->video_mode) {
+ default:
+ MISSING_CASE(intel_dsi->video_mode);
+ fallthrough;
+ case NON_BURST_SYNC_EVENTS:
+ fmt |= VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS;
+ break;
+ case NON_BURST_SYNC_PULSE:
+ fmt |= VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE;
+ break;
+ case BURST_MODE:
+ fmt |= VIDEO_MODE_BURST;
+ break;
+ }
+
+ intel_de_write(dev_priv, MIPI_VIDEO_MODE_FORMAT(port), fmt);
+ }
+ }
+}
+
+static void intel_dsi_unprepare(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ u32 val;
+
+ if (IS_GEMINILAKE(dev_priv))
+ return;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* Panel commands can be sent when clock is in LP11 */
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x0);
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ bxt_dsi_reset_clocks(encoder, port);
+ else
+ vlv_dsi_reset_clocks(encoder, port);
+ intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
+
+ val = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port));
+ val &= ~VID_MODE_FORMAT_MASK;
+ intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
+
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1);
+ }
+}
+
+static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
+
+ intel_dsi_vbt_gpio_cleanup(intel_dsi);
+ intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs intel_dsi_funcs = {
+ .destroy = intel_dsi_encoder_destroy,
+};
+
+static enum drm_mode_status vlv_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ enum drm_mode_status status;
+
+ status = intel_cpu_transcoder_mode_valid(i915, mode);
+ if (status != MODE_OK)
+ return status;
+ }
+
+ return intel_dsi_mode_valid(connector, mode);
+}
+
+static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
+ .get_modes = intel_dsi_get_modes,
+ .mode_valid = vlv_dsi_mode_valid,
+ .atomic_check = intel_digital_connector_atomic_check,
+};
+
+static const struct drm_connector_funcs intel_dsi_connector_funcs = {
+ .detect = intel_panel_detect,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
+ .destroy = intel_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_digital_connector_atomic_get_property,
+ .atomic_set_property = intel_digital_connector_atomic_set_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state,
+};
+
+static void vlv_dsi_add_properties(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ const struct drm_display_mode *fixed_mode =
+ intel_panel_preferred_fixed_mode(connector);
+ u32 allowed_scalers;
+
+ allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
+ if (!HAS_GMCH(dev_priv))
+ allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
+
+ drm_connector_attach_scaling_mode_property(&connector->base,
+ allowed_scalers);
+
+ connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+
+ drm_connector_set_panel_orientation_with_quirk(&connector->base,
+ intel_dsi_get_panel_orientation(connector),
+ fixed_mode->hdisplay,
+ fixed_mode->vdisplay);
+}
+
+#define NS_KHZ_RATIO 1000000
+
+#define PREPARE_CNT_MAX 0x3F
+#define EXIT_ZERO_CNT_MAX 0x3F
+#define CLK_ZERO_CNT_MAX 0xFF
+#define TRAIL_CNT_MAX 0x1F
+
+static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_connector *connector = intel_dsi->attached_connector;
+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
+ u32 tlpx_ns, extra_byte_count, tlpx_ui;
+ u32 ui_num, ui_den;
+ u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+ u32 ths_prepare_ns, tclk_trail_ns;
+ u32 tclk_prepare_clkzero, ths_prepare_hszero;
+ u32 lp_to_hs_switch, hs_to_lp_switch;
+ u32 mul;
+
+ tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
+
+ switch (intel_dsi->lane_count) {
+ case 1:
+ case 2:
+ extra_byte_count = 2;
+ break;
+ case 3:
+ extra_byte_count = 4;
+ break;
+ case 4:
+ default:
+ extra_byte_count = 3;
+ break;
+ }
+
+ /* in Kbps */
+ ui_num = NS_KHZ_RATIO;
+ ui_den = intel_dsi_bitrate(intel_dsi);
+
+ tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
+ ths_prepare_hszero = mipi_config->ths_prepare_hszero;
+
+ /*
+ * B060
+ * LP byte clock = TLPX/ (8UI)
+ */
+ intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num);
+
+ /* DDR clock period = 2 * UI
+ * UI(sec) = 1/(bitrate * 10^3) (bitrate is in KHZ)
+ * UI(nsec) = 10^6 / bitrate
+ * DDR clock period (nsec) = 2 * UI = (2 * 10^6)/ bitrate
+ * DDR clock count = ns_value / DDR clock period
+ *
+ * For GEMINILAKE dphy_param_reg will be programmed in terms of
+ * HS byte clock count for other platform in HS ddr clock count
+ */
+ mul = IS_GEMINILAKE(dev_priv) ? 8 : 2;
+ ths_prepare_ns = max(mipi_config->ths_prepare,
+ mipi_config->tclk_prepare);
+
+ /* prepare count */
+ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
+
+ if (prepare_cnt > PREPARE_CNT_MAX) {
+ drm_dbg_kms(&dev_priv->drm, "prepare count too high %u\n",
+ prepare_cnt);
+ prepare_cnt = PREPARE_CNT_MAX;
+ }
+
+ /* exit zero count */
+ exit_zero_cnt = DIV_ROUND_UP(
+ (ths_prepare_hszero - ths_prepare_ns) * ui_den,
+ ui_num * mul
+ );
+
+ /*
+ * Exit zero is unified val ths_zero and ths_exit
+ * minimum value for ths_exit = 110ns
+ * min (exit_zero_cnt * 2) = 110/UI
+ * exit_zero_cnt = 55/UI
+ */
+ if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num)
+ exit_zero_cnt += 1;
+
+ if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
+ drm_dbg_kms(&dev_priv->drm, "exit zero count too high %u\n",
+ exit_zero_cnt);
+ exit_zero_cnt = EXIT_ZERO_CNT_MAX;
+ }
+
+ /* clk zero count */
+ clk_zero_cnt = DIV_ROUND_UP(
+ (tclk_prepare_clkzero - ths_prepare_ns)
+ * ui_den, ui_num * mul);
+
+ if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
+ drm_dbg_kms(&dev_priv->drm, "clock zero count too high %u\n",
+ clk_zero_cnt);
+ clk_zero_cnt = CLK_ZERO_CNT_MAX;
+ }
+
+ /* trail count */
+ tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
+ trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
+
+ if (trail_cnt > TRAIL_CNT_MAX) {
+ drm_dbg_kms(&dev_priv->drm, "trail count too high %u\n",
+ trail_cnt);
+ trail_cnt = TRAIL_CNT_MAX;
+ }
+
+ /* B080 */
+ intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 |
+ clk_zero_cnt << 8 | prepare_cnt;
+
+ /*
+ * LP to HS switch count = 4TLPX + PREP_COUNT * mul + EXIT_ZERO_COUNT *
+ * mul + 10UI + Extra Byte Count
+ *
+ * HS to LP switch count = THS-TRAIL + 2TLPX + Extra Byte Count
+ * Extra Byte Count is calculated according to number of lanes.
+ * High Low Switch Count is the Max of LP to HS and
+ * HS to LP switch count
+ *
+ */
+ tlpx_ui = DIV_ROUND_UP(tlpx_ns * ui_den, ui_num);
+
+ /* B044 */
+ /* FIXME:
+ * The comment above does not match with the code */
+ lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * mul +
+ exit_zero_cnt * mul + 10, 8);
+
+ hs_to_lp_switch = DIV_ROUND_UP(mipi_config->ths_trail + 2 * tlpx_ui, 8);
+
+ intel_dsi->hs_to_lp_count = max(lp_to_hs_switch, hs_to_lp_switch);
+ intel_dsi->hs_to_lp_count += extra_byte_count;
+
+ /* B088 */
+ /* LP -> HS for clock lanes
+ * LP clk sync + LP11 + LP01 + tclk_prepare + tclk_zero +
+ * extra byte count
+ * 2TPLX + 1TLPX + 1 TPLX(in ns) + prepare_cnt * 2 + clk_zero_cnt *
+ * 2(in UI) + extra byte count
+ * In byteclks = (4TLPX + prepare_cnt * 2 + clk_zero_cnt *2 (in UI)) /
+ * 8 + extra byte count
+ */
+ intel_dsi->clk_lp_to_hs_count =
+ DIV_ROUND_UP(
+ 4 * tlpx_ui + prepare_cnt * 2 +
+ clk_zero_cnt * 2,
+ 8);
+
+ intel_dsi->clk_lp_to_hs_count += extra_byte_count;
+
+ /* HS->LP for Clock Lanes
+ * Low Power clock synchronisations + 1Tx byteclk + tclk_trail +
+ * Extra byte count
+ * 2TLPX + 8UI + (trail_count*2)(in UI) + Extra byte count
+ * In byteclks = (2*TLpx(in UI) + trail_count*2 +8)(in UI)/8 +
+ * Extra byte count
+ */
+ intel_dsi->clk_hs_to_lp_count =
+ DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
+ 8);
+ intel_dsi->clk_hs_to_lp_count += extra_byte_count;
+
+ intel_dsi_log_params(intel_dsi);
+}
+
+void vlv_dsi_init(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_dsi *intel_dsi;
+ struct intel_encoder *intel_encoder;
+ struct drm_encoder *encoder;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ struct drm_display_mode *current_mode;
+ enum port port;
+ enum pipe pipe;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ /* There is no detection method for MIPI so rely on VBT */
+ if (!intel_bios_is_dsi_present(dev_priv, &port))
+ return;
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ dev_priv->display.dsi.mmio_base = BXT_MIPI_BASE;
+ else
+ dev_priv->display.dsi.mmio_base = VLV_MIPI_BASE;
+
+ intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+ if (!intel_dsi)
+ return;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
+ kfree(intel_dsi);
+ return;
+ }
+
+ intel_encoder = &intel_dsi->base;
+ encoder = &intel_encoder->base;
+ intel_dsi->attached_connector = intel_connector;
+
+ connector = &intel_connector->base;
+
+ drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
+ "DSI %c", port_name(port));
+
+ intel_encoder->compute_config = intel_dsi_compute_config;
+ intel_encoder->pre_enable = intel_dsi_pre_enable;
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ intel_encoder->enable = bxt_dsi_enable;
+ intel_encoder->disable = intel_dsi_disable;
+ intel_encoder->post_disable = intel_dsi_post_disable;
+ intel_encoder->get_hw_state = intel_dsi_get_hw_state;
+ intel_encoder->get_config = intel_dsi_get_config;
+ intel_encoder->update_pipe = intel_backlight_update;
+ intel_encoder->shutdown = intel_dsi_shutdown;
+
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ intel_encoder->port = port;
+ intel_encoder->type = INTEL_OUTPUT_DSI;
+ intel_encoder->power_domain = POWER_DOMAIN_PORT_DSI;
+ intel_encoder->cloneable = 0;
+
+ /*
+ * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
+ * port C. BXT isn't limited like this.
+ */
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ intel_encoder->pipe_mask = ~0;
+ else if (port == PORT_A)
+ intel_encoder->pipe_mask = BIT(PIPE_A);
+ else
+ intel_encoder->pipe_mask = BIT(PIPE_B);
+
+ intel_dsi->panel_power_off_time = ktime_get_boottime();
+
+ intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, NULL);
+
+ if (intel_connector->panel.vbt.dsi.config->dual_link)
+ intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
+ else
+ intel_dsi->ports = BIT(port);
+
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
+
+ /* Create a DSI host (and a device) for each port. */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ struct intel_dsi_host *host;
+
+ host = intel_dsi_host_init(intel_dsi, &intel_dsi_host_ops,
+ port);
+ if (!host)
+ goto err;
+
+ intel_dsi->dsi_hosts[port] = host;
+ }
+
+ if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
+ drm_dbg_kms(&dev_priv->drm, "no device found\n");
+ goto err;
+ }
+
+ /* Use clock read-back from current hw-state for fastboot */
+ current_mode = intel_encoder_current_mode(intel_encoder);
+ if (current_mode) {
+ drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n",
+ intel_dsi->pclk, current_mode->clock);
+ if (intel_fuzzy_clock_check(intel_dsi->pclk,
+ current_mode->clock)) {
+ drm_dbg_kms(&dev_priv->drm, "Using GOP pclk\n");
+ intel_dsi->pclk = current_mode->clock;
+ }
+
+ kfree(current_mode);
+ }
+
+ vlv_dphy_param_init(intel_dsi);
+
+ intel_dsi_vbt_gpio_init(intel_dsi,
+ intel_dsi_get_hw_state(intel_encoder, &pipe));
+
+ drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
+
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+
+ mutex_lock(&dev->mode_config.mutex);
+ intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (!intel_panel_preferred_fixed_mode(intel_connector)) {
+ drm_dbg_kms(&dev_priv->drm, "no fixed mode\n");
+ goto err_cleanup_connector;
+ }
+
+ intel_panel_init(intel_connector);
+
+ intel_backlight_setup(intel_connector, INVALID_PIPE);
+
+ vlv_dsi_add_properties(intel_connector);
+
+ return;
+
+err_cleanup_connector:
+ drm_connector_cleanup(&intel_connector->base);
+err:
+ drm_encoder_cleanup(&intel_encoder->base);
+ kfree(intel_dsi);
+ kfree(intel_connector);
+}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h
new file mode 100644
index 000000000..0c2b279df
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __VLV_DSI_H__
+#define __VLV_DSI_H__
+
+#include <linux/types.h>
+
+enum port;
+struct drm_i915_private;
+struct intel_dsi;
+
+void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
+enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
+void vlv_dsi_init(struct drm_i915_private *dev_priv);
+
+#endif /* __VLV_DSI_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
new file mode 100644
index 000000000..af7402127
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -0,0 +1,624 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Shobhit Kumar <shobhit.kumar@intel.com>
+ * Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/string_helpers.h>
+
+#include "i915_drv.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dsi.h"
+#include "vlv_dsi_pll.h"
+#include "vlv_dsi_pll_regs.h"
+#include "vlv_sideband.h"
+
+static const u16 lfsr_converts[] = {
+ 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
+ 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
+ 106, 53, 282, 397, 454, 227, 113, 56, 284, 142, /* 81 - 90 */
+ 71, 35, 273, 136, 324, 418, 465, 488, 500, 506 /* 91 - 100 */
+};
+
+/* Get DSI clock from pixel clock */
+static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt,
+ int lane_count)
+{
+ u32 dsi_clk_khz;
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(fmt);
+
+ /* DSI data rate = pixel clock * bits per pixel / lane count
+ pixel clock is converted from KHz to Hz */
+ dsi_clk_khz = DIV_ROUND_CLOSEST(pclk * bpp, lane_count);
+
+ return dsi_clk_khz;
+}
+
+static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *config,
+ int target_dsi_clk)
+{
+ unsigned int m_min, m_max, p_min = 2, p_max = 6;
+ unsigned int m, n, p;
+ unsigned int calc_m, calc_p;
+ int delta, ref_clk;
+
+ /* target_dsi_clk is expected in kHz */
+ if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
+ drm_err(&dev_priv->drm, "DSI CLK Out of Range\n");
+ return -ECHRNG;
+ }
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ ref_clk = 100000;
+ n = 4;
+ m_min = 70;
+ m_max = 96;
+ } else {
+ ref_clk = 25000;
+ n = 1;
+ m_min = 62;
+ m_max = 92;
+ }
+
+ calc_p = p_min;
+ calc_m = m_min;
+ delta = abs(target_dsi_clk - (m_min * ref_clk) / (p_min * n));
+
+ for (m = m_min; m <= m_max && delta; m++) {
+ for (p = p_min; p <= p_max && delta; p++) {
+ /*
+ * Find the optimal m and p divisors with minimal delta
+ * +/- the required clock
+ */
+ int calc_dsi_clk = (m * ref_clk) / (p * n);
+ int d = abs(target_dsi_clk - calc_dsi_clk);
+ if (d < delta) {
+ delta = d;
+ calc_m = m;
+ calc_p = p;
+ }
+ }
+ }
+
+ /* register has log2(N1), this works fine for powers of two */
+ config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
+ config->dsi_pll.div =
+ (ffs(n) - 1) << DSI_PLL_N1_DIV_SHIFT |
+ (u32)lfsr_converts[calc_m - 62] << DSI_PLL_M1_DIV_SHIFT;
+
+ return 0;
+}
+
+static int vlv_dsi_pclk(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 dsi_clock;
+ u32 pll_ctl, pll_div;
+ u32 m = 0, p = 0, n;
+ int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
+ int i;
+
+ pll_ctl = config->dsi_pll.ctrl;
+ pll_div = config->dsi_pll.div;
+
+ /* mask out other bits and extract the P1 divisor */
+ pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
+ pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
+
+ /* N1 divisor */
+ n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT;
+ n = 1 << n; /* register has log2(N1) */
+
+ /* mask out the other bits and extract the M1 divisor */
+ pll_div &= DSI_PLL_M1_DIV_MASK;
+ pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
+
+ while (pll_ctl) {
+ pll_ctl = pll_ctl >> 1;
+ p++;
+ }
+ p--;
+
+ if (!p) {
+ drm_err(&dev_priv->drm, "wrong P1 divisor\n");
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) {
+ if (lfsr_converts[i] == pll_div)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(lfsr_converts)) {
+ drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
+ return 0;
+ }
+
+ m = i + 62;
+
+ dsi_clock = (m * refclk) / (p * n);
+
+ return DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp);
+}
+
+/*
+ * XXX: The muxing and gating is hard coded for now. Need to add support for
+ * sharing PLLs with two DSI outputs.
+ */
+int vlv_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ int pclk, dsi_clk, ret;
+
+ dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
+ intel_dsi->lane_count);
+
+ ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm, "dsi_calc_mnp failed\n");
+ return ret;
+ }
+
+ if (intel_dsi->ports & (1 << PORT_A))
+ config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+
+ if (intel_dsi->ports & (1 << PORT_C))
+ config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
+
+ config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;
+
+ drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n",
+ config->dsi_pll.div, config->dsi_pll.ctrl);
+
+ pclk = vlv_dsi_pclk(encoder, config);
+ config->port_clock = pclk;
+
+ /* FIXME definitely not right for burst/cmd mode/pixel overlap */
+ config->hw.adjusted_mode.crtc_clock = pclk;
+ if (intel_dsi->dual_link)
+ config->hw.adjusted_mode.crtc_clock *= 2;
+
+ return 0;
+}
+
+void vlv_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ vlv_cck_get(dev_priv);
+
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL,
+ config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN);
+
+ /* wait at least 0.5 us after ungating before enabling VCO,
+ * allow hrtimer subsystem optimization by relaxing timing
+ */
+ usleep_range(10, 50);
+
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
+
+ if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
+ DSI_PLL_LOCK, 20)) {
+
+ vlv_cck_put(dev_priv);
+ drm_err(&dev_priv->drm, "DSI PLL lock failed\n");
+ return;
+ }
+ vlv_cck_put(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
+}
+
+void vlv_dsi_pll_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 tmp;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ vlv_cck_get(dev_priv);
+
+ tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ tmp &= ~DSI_PLL_VCO_EN;
+ tmp |= DSI_PLL_LDO_GATE;
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+
+ vlv_cck_put(dev_priv);
+}
+
+bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+{
+ bool enabled;
+ u32 val;
+ u32 mask;
+
+ mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED;
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
+ enabled = (val & mask) == mask;
+
+ if (!enabled)
+ return false;
+
+ /*
+ * Dividers must be programmed with valid values. As per BSEPC, for
+ * GEMINLAKE only PORT A divider values are checked while for BXT
+ * both divider values are validated. Check this here for
+ * paranoia, since BIOS is known to misconfigure PLLs in this way at
+ * times, and since accessing DSI registers with invalid dividers
+ * causes a system hang.
+ */
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
+ if (IS_GEMINILAKE(dev_priv)) {
+ if (!(val & BXT_DSIA_16X_MASK)) {
+ drm_dbg(&dev_priv->drm,
+ "Invalid PLL divider (%08x)\n", val);
+ enabled = false;
+ }
+ } else {
+ if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
+ drm_dbg(&dev_priv->drm,
+ "Invalid PLL divider (%08x)\n", val);
+ enabled = false;
+ }
+ }
+
+ return enabled;
+}
+
+void bxt_dsi_pll_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 val;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
+ val &= ~BXT_DSI_PLL_DO_ENABLE;
+ intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
+
+ /*
+ * PLL lock should deassert within 200us.
+ * Wait up to 1ms before timing out.
+ */
+ if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED, 1))
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for PLL lock deassertion\n");
+}
+
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 pll_ctl, pll_div;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ vlv_cck_get(dev_priv);
+ pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
+ vlv_cck_put(dev_priv);
+
+ config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
+ config->dsi_pll.div = pll_div;
+
+ return vlv_dsi_pclk(encoder, config);
+}
+
+static int bxt_dsi_pclk(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 dsi_ratio, dsi_clk;
+
+ dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
+ dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
+
+ return DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp);
+}
+
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 pclk;
+
+ config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
+
+ pclk = bxt_dsi_pclk(encoder, config);
+
+ drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk);
+ return pclk;
+}
+
+void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
+{
+ u32 temp;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+ temp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT);
+}
+
+static void glk_dsi_program_esc_clock(struct drm_device *dev,
+ const struct intel_crtc_state *config)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 dsi_rate = 0;
+ u32 pll_ratio = 0;
+ u32 ddr_clk = 0;
+ u32 div1_value = 0;
+ u32 div2_value = 0;
+ u32 txesc1_div = 0;
+ u32 txesc2_div = 0;
+
+ pll_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
+
+ dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2;
+
+ ddr_clk = dsi_rate / 2;
+
+ /* Variable divider value */
+ div1_value = DIV_ROUND_CLOSEST(ddr_clk, 20000);
+
+ /* Calculate TXESC1 divider */
+ if (div1_value <= 10)
+ txesc1_div = div1_value;
+ else if ((div1_value > 10) && (div1_value <= 20))
+ txesc1_div = DIV_ROUND_UP(div1_value, 2);
+ else if ((div1_value > 20) && (div1_value <= 30))
+ txesc1_div = DIV_ROUND_UP(div1_value, 4);
+ else if ((div1_value > 30) && (div1_value <= 40))
+ txesc1_div = DIV_ROUND_UP(div1_value, 6);
+ else if ((div1_value > 40) && (div1_value <= 50))
+ txesc1_div = DIV_ROUND_UP(div1_value, 8);
+ else
+ txesc1_div = 10;
+
+ /* Calculate TXESC2 divider */
+ div2_value = DIV_ROUND_UP(div1_value, txesc1_div);
+
+ txesc2_div = min_t(u32, div2_value, 10);
+
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1,
+ (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2,
+ (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
+}
+
+/* Program BXT Mipi clocks and dividers */
+static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
+ const struct intel_crtc_state *config)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 tmp;
+ u32 dsi_rate = 0;
+ u32 pll_ratio = 0;
+ u32 rx_div;
+ u32 tx_div;
+ u32 rx_div_upper;
+ u32 rx_div_lower;
+ u32 mipi_8by3_divider;
+
+ /* Clear old configurations */
+ tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
+ tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
+ tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
+ tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
+ tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
+
+ /* Get the current DSI rate(actual) */
+ pll_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
+ dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2;
+
+ /*
+ * tx clock should be <= 20MHz and the div value must be
+ * subtracted by 1 as per bspec
+ */
+ tx_div = DIV_ROUND_UP(dsi_rate, 20000) - 1;
+ /*
+ * rx clock should be <= 150MHz and the div value must be
+ * subtracted by 1 as per bspec
+ */
+ rx_div = DIV_ROUND_UP(dsi_rate, 150000) - 1;
+
+ /*
+ * rx divider value needs to be updated in the
+ * two differnt bit fields in the register hence splitting the
+ * rx divider value accordingly
+ */
+ rx_div_lower = rx_div & RX_DIVIDER_BIT_1_2;
+ rx_div_upper = (rx_div & RX_DIVIDER_BIT_3_4) >> 2;
+
+ mipi_8by3_divider = 0x2;
+
+ tmp |= BXT_MIPI_8X_BY3_DIVIDER(port, mipi_8by3_divider);
+ tmp |= BXT_MIPI_TX_ESCLK_DIVIDER(port, tx_div);
+ tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower);
+ tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper);
+
+ intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
+}
+
+int bxt_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
+ u32 dsi_clk;
+ int pclk;
+
+ dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
+ intel_dsi->lane_count);
+
+ /*
+ * From clock diagram, to get PLL ratio divider, divide double of DSI
+ * link rate (i.e., 2*8x=16x frequency value) by ref clock. Make sure to
+ * round 'up' the result
+ */
+ dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
+
+ if (IS_BROXTON(dev_priv)) {
+ dsi_ratio_min = BXT_DSI_PLL_RATIO_MIN;
+ dsi_ratio_max = BXT_DSI_PLL_RATIO_MAX;
+ } else {
+ dsi_ratio_min = GLK_DSI_PLL_RATIO_MIN;
+ dsi_ratio_max = GLK_DSI_PLL_RATIO_MAX;
+ }
+
+ if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) {
+ drm_err(&dev_priv->drm,
+ "Can't get a suitable ratio from DSI PLL ratios\n");
+ return -ECHRNG;
+ } else
+ drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n");
+
+ /*
+ * Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
+ * Spec says both have to be programmed, even if one is not getting
+ * used. Configure MIPI_CLOCK_CTL dividers in modeset
+ */
+ config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2;
+
+ /* As per recommendation from hardware team,
+ * Prog PVD ratio =1 if dsi ratio <= 50
+ */
+ if (IS_BROXTON(dev_priv) && dsi_ratio <= 50)
+ config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
+
+ pclk = bxt_dsi_pclk(encoder, config);
+ config->port_clock = pclk;
+
+ /* FIXME definitely not right for burst/cmd mode/pixel overlap */
+ config->hw.adjusted_mode.crtc_clock = pclk;
+ if (intel_dsi->dual_link)
+ config->hw.adjusted_mode.crtc_clock *= 2;
+
+ return 0;
+}
+
+void bxt_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+ u32 val;
+
+ drm_dbg_kms(&dev_priv->drm, "\n");
+
+ /* Configure PLL vales */
+ intel_de_write(dev_priv, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
+ intel_de_posting_read(dev_priv, BXT_DSI_PLL_CTL);
+
+ /* Program TX, RX, Dphy clocks */
+ if (IS_BROXTON(dev_priv)) {
+ for_each_dsi_port(port, intel_dsi->ports)
+ bxt_dsi_program_clocks(encoder->base.dev, port, config);
+ } else {
+ glk_dsi_program_esc_clock(encoder->base.dev, config);
+ }
+
+ /* Enable DSI PLL */
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
+ val |= BXT_DSI_PLL_DO_ENABLE;
+ intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
+
+ /* Timeout and fail if PLL not locked */
+ if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED, 1)) {
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DSI PLL to lock\n");
+ return;
+ }
+
+ drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
+}
+
+void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
+{
+ u32 tmp;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ /* Clear old configurations */
+ if (IS_BROXTON(dev_priv)) {
+ tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
+ tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
+ tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
+ tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
+ tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
+ intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
+ } else {
+ tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV1);
+ tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK;
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, tmp);
+
+ tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV2);
+ tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK;
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, tmp);
+ }
+ intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
+}
+
+static void assert_dsi_pll(struct drm_i915_private *i915, bool state)
+{
+ bool cur_state;
+
+ vlv_cck_get(i915);
+ cur_state = vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_VCO_EN;
+ vlv_cck_put(i915);
+
+ I915_STATE_WARN(cur_state != state,
+ "DSI PLL state assertion failure (expected %s, current %s)\n",
+ str_on_off(state), str_on_off(cur_state));
+}
+
+void assert_dsi_pll_enabled(struct drm_i915_private *i915)
+{
+ assert_dsi_pll(i915, true);
+}
+
+void assert_dsi_pll_disabled(struct drm_i915_private *i915)
+{
+ assert_dsi_pll(i915, false);
+}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
new file mode 100644
index 000000000..ab9291ad1
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __VLV_DSI_PLL_H__
+#define __VLV_DSI_PLL_H__
+
+#include <linux/types.h>
+
+enum port;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_encoder;
+
+int vlv_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void vlv_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config);
+void vlv_dsi_pll_disable(struct intel_encoder *encoder);
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
+
+bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+int bxt_dsi_pll_compute(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void bxt_dsi_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config);
+void bxt_dsi_pll_disable(struct intel_encoder *encoder);
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
+
+void assert_dsi_pll_enabled(struct drm_i915_private *i915);
+void assert_dsi_pll_disabled(struct drm_i915_private *i915);
+
+#endif /* __VLV_DSI_PLL_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll_regs.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll_regs.h
new file mode 100644
index 000000000..45590e14e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll_regs.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __VLV_DSI_PLL_REGS_H__
+#define __VLV_DSI_PLL_REGS_H__
+
+#include "vlv_dsi_regs.h"
+
+#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
+#define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF
+#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
+#define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF
+
+#define BXT_MAX_VAR_OUTPUT_KHZ 39500
+
+#define BXT_MIPI_CLOCK_CTL _MMIO(0x46090)
+#define BXT_MIPI1_DIV_SHIFT 26
+#define BXT_MIPI2_DIV_SHIFT 10
+#define BXT_MIPI_DIV_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_DIV_SHIFT, \
+ BXT_MIPI2_DIV_SHIFT)
+
+/* TX control divider to select actual TX clock output from (8x/var) */
+#define BXT_MIPI1_TX_ESCLK_SHIFT 26
+#define BXT_MIPI2_TX_ESCLK_SHIFT 10
+#define BXT_MIPI_TX_ESCLK_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_SHIFT, \
+ BXT_MIPI2_TX_ESCLK_SHIFT)
+#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (0x3F << 26)
+#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (0x3F << 10)
+#define BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port) \
+ _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
+ BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
+#define BXT_MIPI_TX_ESCLK_DIVIDER(port, val) \
+ (((val) & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
+/* RX upper control divider to select actual RX clock output from 8x */
+#define BXT_MIPI1_RX_ESCLK_UPPER_SHIFT 21
+#define BXT_MIPI2_RX_ESCLK_UPPER_SHIFT 5
+#define BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_SHIFT, \
+ BXT_MIPI2_RX_ESCLK_UPPER_SHIFT)
+#define BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 21)
+#define BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 5)
+#define BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \
+ BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK)
+#define BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val) \
+ (((val) & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
+/* 8/3X divider to select the actual 8/3X clock output from 8x */
+#define BXT_MIPI1_8X_BY3_SHIFT 19
+#define BXT_MIPI2_8X_BY3_SHIFT 3
+#define BXT_MIPI_8X_BY3_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_8X_BY3_SHIFT, \
+ BXT_MIPI2_8X_BY3_SHIFT)
+#define BXT_MIPI1_8X_BY3_DIVIDER_MASK (3 << 19)
+#define BXT_MIPI2_8X_BY3_DIVIDER_MASK (3 << 3)
+#define BXT_MIPI_8X_BY3_DIVIDER_MASK(port) \
+ _MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \
+ BXT_MIPI2_8X_BY3_DIVIDER_MASK)
+#define BXT_MIPI_8X_BY3_DIVIDER(port, val) \
+ (((val) & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
+/* RX lower control divider to select actual RX clock output from 8x */
+#define BXT_MIPI1_RX_ESCLK_LOWER_SHIFT 16
+#define BXT_MIPI2_RX_ESCLK_LOWER_SHIFT 0
+#define BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_SHIFT, \
+ BXT_MIPI2_RX_ESCLK_LOWER_SHIFT)
+#define BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 16)
+#define BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 0)
+#define BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \
+ BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK)
+#define BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val) \
+ (((val) & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
+
+#define RX_DIVIDER_BIT_1_2 0x3
+#define RX_DIVIDER_BIT_3_4 0xC
+
+#define BXT_DSI_PLL_CTL _MMIO(0x161000)
+#define BXT_DSI_PLL_PVD_RATIO_SHIFT 16
+#define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
+#define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
+#define BXT_DSIC_16X_BY1 (0 << 10)
+#define BXT_DSIC_16X_BY2 (1 << 10)
+#define BXT_DSIC_16X_BY3 (2 << 10)
+#define BXT_DSIC_16X_BY4 (3 << 10)
+#define BXT_DSIC_16X_MASK (3 << 10)
+#define BXT_DSIA_16X_BY1 (0 << 8)
+#define BXT_DSIA_16X_BY2 (1 << 8)
+#define BXT_DSIA_16X_BY3 (2 << 8)
+#define BXT_DSIA_16X_BY4 (3 << 8)
+#define BXT_DSIA_16X_MASK (3 << 8)
+#define BXT_DSI_FREQ_SEL_SHIFT 8
+#define BXT_DSI_FREQ_SEL_MASK (0xF << BXT_DSI_FREQ_SEL_SHIFT)
+
+#define BXT_DSI_PLL_RATIO_MAX 0x7D
+#define BXT_DSI_PLL_RATIO_MIN 0x22
+#define GLK_DSI_PLL_RATIO_MAX 0x6F
+#define GLK_DSI_PLL_RATIO_MIN 0x22
+#define BXT_DSI_PLL_RATIO_MASK 0xFF
+#define BXT_REF_CLOCK_KHZ 19200
+
+#define BXT_DSI_PLL_ENABLE _MMIO(0x46080)
+#define BXT_DSI_PLL_DO_ENABLE (1 << 31)
+#define BXT_DSI_PLL_LOCKED (1 << 30)
+
+#endif /* __VLV_DSI_PLL_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
new file mode 100644
index 000000000..e065b8f2e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
@@ -0,0 +1,482 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __VLV_DSI_REGS_H__
+#define __VLV_DSI_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define VLV_MIPI_BASE VLV_DISPLAY_BASE
+#define BXT_MIPI_BASE 0x60000
+
+#define _MIPI_MMIO_BASE(__i915) ((__i915)->display.dsi.mmio_base)
+
+#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
+#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
+
+/* BXT MIPI mode configure */
+#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
+#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8
+#define BXT_MIPI_TRANS_HACTIVE(tc) _MMIO_MIPI(tc, \
+ _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
+
+#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC
+#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC
+#define BXT_MIPI_TRANS_VACTIVE(tc) _MMIO_MIPI(tc, \
+ _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
+
+#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100
+#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900
+#define BXT_MIPI_TRANS_VTOTAL(tc) _MMIO_MIPI(tc, \
+ _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
+
+#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
+#define STAP_SELECT (1 << 0)
+
+#define BXT_P_DSI_REGULATOR_TX_CTRL _MMIO(0x160054)
+#define HS_IO_CTRL_SELECT (1 << 0)
+
+#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
+#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
+#define MIPI_PORT_CTRL(port) _MMIO_MIPI(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
+
+ /* BXT port control */
+#define _BXT_MIPIA_PORT_CTRL 0x6B0C0
+#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
+#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
+
+#define DPI_ENABLE (1 << 31) /* A + C */
+#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
+#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
+#define DUAL_LINK_MODE_SHIFT 26
+#define DUAL_LINK_MODE_MASK (1 << 26)
+#define DUAL_LINK_MODE_FRONT_BACK (0 << 26)
+#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26)
+#define DITHERING_ENABLE (1 << 25) /* A + C */
+#define FLOPPED_HSTX (1 << 23)
+#define DE_INVERT (1 << 19) /* XXX */
+#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18
+#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18)
+#define AFE_LATCHOUT (1 << 17)
+#define LP_OUTPUT_HOLD (1 << 16)
+#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15
+#define MIPIC_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15)
+#define MIPIC_MIPI4DPHY_DELAY_COUNT_SHIFT 11
+#define MIPIC_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11)
+#define CSB_SHIFT 9
+#define CSB_MASK (3 << 9)
+#define CSB_20MHZ (0 << 9)
+#define CSB_10MHZ (1 << 9)
+#define CSB_40MHZ (2 << 9)
+#define BANDGAP_MASK (1 << 8)
+#define BANDGAP_PNW_CIRCUIT (0 << 8)
+#define BANDGAP_LNC_CIRCUIT (1 << 8)
+#define MIPIC_FLISDSI_DELAY_COUNT_LOW_SHIFT 5
+#define MIPIC_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5)
+#define TEARING_EFFECT_DELAY (1 << 4) /* A + C */
+#define TEARING_EFFECT_SHIFT 2 /* A + C */
+#define TEARING_EFFECT_MASK (3 << 2)
+#define TEARING_EFFECT_OFF (0 << 2)
+#define TEARING_EFFECT_DSI (1 << 2)
+#define TEARING_EFFECT_GPIO (2 << 2)
+#define LANE_CONFIGURATION_SHIFT 0
+#define LANE_CONFIGURATION_MASK (3 << 0)
+#define LANE_CONFIGURATION_4LANE (0 << 0)
+#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0)
+#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0)
+
+#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
+#define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
+#define MIPI_TEARING_CTRL(port) _MMIO_MIPI(port, _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL)
+#define TEARING_EFFECT_DELAY_SHIFT 0
+#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
+
+/* XXX: all bits reserved */
+#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
+
+/* MIPI DSI Controller and D-PHY registers */
+
+#define _MIPIA_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb000)
+#define _MIPIC_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb800)
+#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
+#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
+#define ULPS_STATE_MASK (3 << 1)
+#define ULPS_STATE_ENTER (2 << 1)
+#define ULPS_STATE_EXIT (1 << 1)
+#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
+#define DEVICE_READY (1 << 0)
+
+#define _MIPIA_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb004)
+#define _MIPIC_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb804)
+#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
+#define _MIPIA_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb008)
+#define _MIPIC_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb808)
+#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
+#define TEARING_EFFECT (1 << 31)
+#define SPL_PKT_SENT_INTERRUPT (1 << 30)
+#define GEN_READ_DATA_AVAIL (1 << 29)
+#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
+#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
+#define RX_PROT_VIOLATION (1 << 26)
+#define RX_INVALID_TX_LENGTH (1 << 25)
+#define ACK_WITH_NO_ERROR (1 << 24)
+#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
+#define LP_RX_TIMEOUT (1 << 22)
+#define HS_TX_TIMEOUT (1 << 21)
+#define DPI_FIFO_UNDERRUN (1 << 20)
+#define LOW_CONTENTION (1 << 19)
+#define HIGH_CONTENTION (1 << 18)
+#define TXDSI_VC_ID_INVALID (1 << 17)
+#define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16)
+#define TXCHECKSUM_ERROR (1 << 15)
+#define TXECC_MULTIBIT_ERROR (1 << 14)
+#define TXECC_SINGLE_BIT_ERROR (1 << 13)
+#define TXFALSE_CONTROL_ERROR (1 << 12)
+#define RXDSI_VC_ID_INVALID (1 << 11)
+#define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10)
+#define RXCHECKSUM_ERROR (1 << 9)
+#define RXECC_MULTIBIT_ERROR (1 << 8)
+#define RXECC_SINGLE_BIT_ERROR (1 << 7)
+#define RXFALSE_CONTROL_ERROR (1 << 6)
+#define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5)
+#define RX_LP_TX_SYNC_ERROR (1 << 4)
+#define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3)
+#define RXEOT_SYNC_ERROR (1 << 2)
+#define RXSOT_SYNC_ERROR (1 << 1)
+#define RXSOT_ERROR (1 << 0)
+
+#define _MIPIA_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb00c)
+#define _MIPIC_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
+#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
+#define CMD_MODE_NOT_SUPPORTED (0 << 13)
+#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
+#define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13)
+#define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13)
+#define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13)
+#define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13)
+#define VID_MODE_FORMAT_MASK (0xf << 7)
+#define VID_MODE_NOT_SUPPORTED (0 << 7)
+#define VID_MODE_FORMAT_RGB565 (1 << 7)
+#define VID_MODE_FORMAT_RGB666_PACKED (2 << 7)
+#define VID_MODE_FORMAT_RGB666 (3 << 7)
+#define VID_MODE_FORMAT_RGB888 (4 << 7)
+#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5
+#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5)
+#define VID_MODE_CHANNEL_NUMBER_SHIFT 3
+#define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3)
+#define DATA_LANES_PRG_REG_SHIFT 0
+#define DATA_LANES_PRG_REG_MASK (7 << 0)
+
+#define _MIPIA_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb010)
+#define _MIPIC_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
+#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
+
+#define _MIPIA_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb014)
+#define _MIPIC_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
+#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
+
+#define _MIPIA_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb018)
+#define _MIPIC_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
+#define TURN_AROUND_TIMEOUT_MASK 0x3f
+
+#define _MIPIA_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb01c)
+#define _MIPIC_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
+#define DEVICE_RESET_TIMER_MASK 0xffff
+
+#define _MIPIA_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb020)
+#define _MIPIC_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb820)
+#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
+#define VERTICAL_ADDRESS_SHIFT 16
+#define VERTICAL_ADDRESS_MASK (0xffff << 16)
+#define HORIZONTAL_ADDRESS_SHIFT 0
+#define HORIZONTAL_ADDRESS_MASK 0xffff
+
+#define _MIPIA_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb024)
+#define _MIPIC_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
+#define DBI_FIFO_EMPTY_HALF (0 << 0)
+#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
+#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
+
+/* regs below are bits 15:0 */
+#define _MIPIA_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb028)
+#define _MIPIC_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
+
+#define _MIPIA_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb02c)
+#define _MIPIC_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb82c)
+#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
+
+#define _MIPIA_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb030)
+#define _MIPIC_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb830)
+#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
+
+#define _MIPIA_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb034)
+#define _MIPIC_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
+
+#define _MIPIA_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb038)
+#define _MIPIC_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
+
+#define _MIPIA_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb03c)
+#define _MIPIC_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb83c)
+#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
+
+#define _MIPIA_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb040)
+#define _MIPIC_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb840)
+#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
+
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb044)
+#define _MIPIC_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
+
+#define _MIPIA_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb048)
+#define _MIPIC_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb848)
+#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
+#define DPI_LP_MODE (1 << 6)
+#define BACKLIGHT_OFF (1 << 5)
+#define BACKLIGHT_ON (1 << 4)
+#define COLOR_MODE_OFF (1 << 3)
+#define COLOR_MODE_ON (1 << 2)
+#define TURN_ON (1 << 1)
+#define SHUTDOWN (1 << 0)
+
+#define _MIPIA_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb04c)
+#define _MIPIC_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb84c)
+#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
+#define COMMAND_BYTE_SHIFT 0
+#define COMMAND_BYTE_MASK (0x3f << 0)
+
+#define _MIPIA_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb050)
+#define _MIPIC_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb850)
+#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
+#define MASTER_INIT_TIMER_SHIFT 0
+#define MASTER_INIT_TIMER_MASK (0xffff << 0)
+
+#define _MIPIA_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb054)
+#define _MIPIC_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \
+ _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
+#define MAX_RETURN_PKT_SIZE_SHIFT 0
+#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
+
+#define _MIPIA_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb058)
+#define _MIPIC_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
+#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
+#define DISABLE_VIDEO_BTA (1 << 3)
+#define IP_TG_CONFIG (1 << 2)
+#define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0)
+#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
+#define VIDEO_MODE_BURST (3 << 0)
+
+#define _MIPIA_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb05c)
+#define _MIPIC_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb85c)
+#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
+#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9)
+#define BXT_DPHY_DEFEATURE_EN (1 << 8)
+#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
+#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
+#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
+#define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4)
+#define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
+#define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2)
+#define CLOCKSTOP (1 << 1)
+#define EOT_DISABLE (1 << 0)
+
+#define _MIPIA_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb060)
+#define _MIPIC_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb860)
+#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
+#define LP_BYTECLK_SHIFT 0
+#define LP_BYTECLK_MASK (0xffff << 0)
+
+#define _MIPIA_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb0a4)
+#define _MIPIC_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb8a4)
+#define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
+
+#define _MIPIA_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb098)
+#define _MIPIC_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb898)
+#define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
+
+/* bits 31:0 */
+#define _MIPIA_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb064)
+#define _MIPIC_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb864)
+#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
+
+/* bits 31:0 */
+#define _MIPIA_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb068)
+#define _MIPIC_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb868)
+#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb06c)
+#define _MIPIC_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb86c)
+#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb070)
+#define _MIPIC_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb870)
+#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
+#define LONG_PACKET_WORD_COUNT_SHIFT 8
+#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
+#define SHORT_PACKET_PARAM_SHIFT 8
+#define SHORT_PACKET_PARAM_MASK (0xffff << 8)
+#define VIRTUAL_CHANNEL_SHIFT 6
+#define VIRTUAL_CHANNEL_MASK (3 << 6)
+#define DATA_TYPE_SHIFT 0
+#define DATA_TYPE_MASK (0x3f << 0)
+/* data type values, see include/video/mipi_display.h */
+
+#define _MIPIA_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb074)
+#define _MIPIC_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb874)
+#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
+#define DPI_FIFO_EMPTY (1 << 28)
+#define DBI_FIFO_EMPTY (1 << 27)
+#define LP_CTRL_FIFO_EMPTY (1 << 26)
+#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
+#define LP_CTRL_FIFO_FULL (1 << 24)
+#define HS_CTRL_FIFO_EMPTY (1 << 18)
+#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
+#define HS_CTRL_FIFO_FULL (1 << 16)
+#define LP_DATA_FIFO_EMPTY (1 << 10)
+#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
+#define LP_DATA_FIFO_FULL (1 << 8)
+#define HS_DATA_FIFO_EMPTY (1 << 2)
+#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
+#define HS_DATA_FIFO_FULL (1 << 0)
+
+#define _MIPIA_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb078)
+#define _MIPIC_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
+#define DBI_HS_LP_MODE_MASK (1 << 0)
+#define DBI_LP_MODE (1 << 0)
+#define DBI_HS_MODE (0 << 0)
+
+#define _MIPIA_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb080)
+#define _MIPIC_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb880)
+#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
+#define EXIT_ZERO_COUNT_SHIFT 24
+#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
+#define TRAIL_COUNT_SHIFT 16
+#define TRAIL_COUNT_MASK (0x1f << 16)
+#define CLK_ZERO_COUNT_SHIFT 8
+#define CLK_ZERO_COUNT_MASK (0xff << 8)
+#define PREPARE_COUNT_SHIFT 0
+#define PREPARE_COUNT_MASK (0x3f << 0)
+
+#define _MIPIA_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb084)
+#define _MIPIC_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb884)
+#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
+
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb088)
+#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
+#define LP_HS_SSW_CNT_SHIFT 16
+#define LP_HS_SSW_CNT_MASK (0xffff << 16)
+#define HS_LP_PWR_SW_CNT_SHIFT 0
+#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
+
+#define _MIPIA_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb08c)
+#define _MIPIC_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb88c)
+#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
+#define STOP_STATE_STALL_COUNTER_SHIFT 0
+#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
+
+#define _MIPIA_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb090)
+#define _MIPIC_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb890)
+#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb094)
+#define _MIPIC_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb894)
+#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
+#define RX_CONTENTION_DETECTED (1 << 0)
+
+/* XXX: only pipe A ?!? */
+#define MIPIA_DBI_TYPEC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb100)
+#define DBI_TYPEC_ENABLE (1 << 31)
+#define DBI_TYPEC_WIP (1 << 30)
+#define DBI_TYPEC_OPTION_SHIFT 28
+#define DBI_TYPEC_OPTION_MASK (3 << 28)
+#define DBI_TYPEC_FREQ_SHIFT 24
+#define DBI_TYPEC_FREQ_MASK (0xf << 24)
+#define DBI_TYPEC_OVERRIDE (1 << 8)
+#define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0
+#define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0)
+
+/* MIPI adapter registers */
+
+#define _MIPIA_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb104)
+#define _MIPIC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb904)
+#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL)
+#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
+#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
+#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
+#define ESCAPE_CLOCK_DIVIDER_2 (1 << 5)
+#define ESCAPE_CLOCK_DIVIDER_4 (2 << 5)
+#define READ_REQUEST_PRIORITY_SHIFT 3
+#define READ_REQUEST_PRIORITY_MASK (3 << 3)
+#define READ_REQUEST_PRIORITY_LOW (0 << 3)
+#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
+#define RGB_FLIP_TO_BGR (1 << 2)
+
+#define BXT_PIPE_SELECT_SHIFT 7
+#define BXT_PIPE_SELECT_MASK (7 << 7)
+#define BXT_PIPE_SELECT(pipe) ((pipe) << 7)
+#define GLK_PHY_STATUS_PORT_READY (1 << 31) /* RO */
+#define GLK_ULPS_NOT_ACTIVE (1 << 30) /* RO */
+#define GLK_MIPIIO_RESET_RELEASED (1 << 28)
+#define GLK_CLOCK_LANE_STOP_STATE (1 << 27) /* RO */
+#define GLK_DATA_LANE_STOP_STATE (1 << 26) /* RO */
+#define GLK_LP_WAKE (1 << 22)
+#define GLK_LP11_LOW_PWR_MODE (1 << 21)
+#define GLK_LP00_LOW_PWR_MODE (1 << 20)
+#define GLK_FIREWALL_ENABLE (1 << 16)
+#define BXT_PIXEL_OVERLAP_CNT_MASK (0xf << 10)
+#define BXT_PIXEL_OVERLAP_CNT_SHIFT 10
+#define BXT_DSC_ENABLE (1 << 3)
+#define BXT_RGB_FLIP (1 << 2)
+#define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */
+#define GLK_MIPIIO_ENABLE (1 << 0)
+
+#define _MIPIA_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb108)
+#define _MIPIC_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb908)
+#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
+#define DATA_MEM_ADDRESS_SHIFT 5
+#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
+#define DATA_VALID (1 << 0)
+
+#define _MIPIA_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb10c)
+#define _MIPIC_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb90c)
+#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
+#define DATA_LENGTH_SHIFT 0
+#define DATA_LENGTH_MASK (0xfffff << 0)
+
+#define _MIPIA_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb110)
+#define _MIPIC_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb910)
+#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
+#define COMMAND_MEM_ADDRESS_SHIFT 5
+#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
+#define AUTO_PWG_ENABLE (1 << 2)
+#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
+#define COMMAND_VALID (1 << 0)
+
+#define _MIPIA_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb114)
+#define _MIPIC_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb914)
+#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
+#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
+#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
+
+#define _MIPIA_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb118)
+#define _MIPIC_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb918)
+#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+
+#define _MIPIA_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb138)
+#define _MIPIC_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb938)
+#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
+#define READ_DATA_VALID(n) (1 << (n))
+
+#endif /* __VLV_DSI_REGS_H__ */